gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fcntl
import glob
import httplib
import os
import pwd
import shlex
import socket
import struct
import tempfile
import threading
import eventlet
from eventlet.green import subprocess
from eventlet import greenthread
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import loggers
from oslo_rootwrap import client
from oslo_utils import excutils
from neutron.agent.common import config
from neutron.common import constants
from neutron.common import utils
from neutron.i18n import _LE
from neutron import wsgi
LOG = logging.getLogger(__name__)
config.register_root_helper(cfg.CONF)
class RootwrapDaemonHelper(object):
__client = None
__lock = threading.Lock()
def __new__(cls):
"""There is no reason to instantiate this class"""
raise NotImplementedError()
@classmethod
def get_client(cls):
with cls.__lock:
if cls.__client is None:
cls.__client = client.Client(
shlex.split(cfg.CONF.AGENT.root_helper_daemon))
return cls.__client
def addl_env_args(addl_env):
"""Build arugments for adding additional environment vars with env"""
# NOTE (twilson) If using rootwrap, an EnvFilter should be set up for the
# command instead of a CommandFilter.
if addl_env is None:
return []
return ['env'] + ['%s=%s' % pair for pair in addl_env.items()]
def create_process(cmd, run_as_root=False, addl_env=None):
"""Create a process object for the given command.
The return value will be a tuple of the process object and the
list of command arguments used to create it.
"""
cmd = map(str, addl_env_args(addl_env) + cmd)
if run_as_root:
cmd = shlex.split(config.get_root_helper(cfg.CONF)) + cmd
LOG.debug("Running command: %s", cmd)
obj = utils.subprocess_popen(cmd, shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return obj, cmd
def execute_rootwrap_daemon(cmd, process_input, addl_env):
cmd = map(str, addl_env_args(addl_env) + cmd)
# NOTE(twilson) oslo_rootwrap.daemon will raise on filter match
# errors, whereas oslo_rootwrap.cmd converts them to return codes.
# In practice, no neutron code should be trying to execute something that
# would throw those errors, and if it does it should be fixed as opposed to
# just logging the execution error.
LOG.debug("Running command (rootwrap daemon): %s", cmd)
client = RootwrapDaemonHelper.get_client()
return client.execute(cmd, process_input)
def execute(cmd, process_input=None, addl_env=None,
check_exit_code=True, return_stderr=False, log_fail_as_error=True,
extra_ok_codes=None, run_as_root=False):
try:
if run_as_root and cfg.CONF.AGENT.root_helper_daemon:
returncode, _stdout, _stderr = (
execute_rootwrap_daemon(cmd, process_input, addl_env))
else:
obj, cmd = create_process(cmd, run_as_root=run_as_root,
addl_env=addl_env)
_stdout, _stderr = obj.communicate(process_input)
returncode = obj.returncode
obj.stdin.close()
m = _("\nCommand: {cmd}\nExit code: {code}\nStdin: {stdin}\n"
"Stdout: {stdout}\nStderr: {stderr}").format(
cmd=cmd,
code=returncode,
stdin=process_input or '',
stdout=_stdout,
stderr=_stderr)
extra_ok_codes = extra_ok_codes or []
if returncode and returncode in extra_ok_codes:
returncode = None
if returncode and log_fail_as_error:
LOG.error(m)
else:
LOG.debug(m)
if returncode and check_exit_code:
raise RuntimeError(m)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
return (_stdout, _stderr) if return_stderr else _stdout
def get_interface_mac(interface):
MAC_START = 18
MAC_END = 24
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927,
struct.pack('256s', interface[:constants.DEVICE_NAME_MAX_LEN]))
return ''.join(['%02x:' % ord(char)
for char in info[MAC_START:MAC_END]])[:-1]
def replace_file(file_name, data):
"""Replaces the contents of file_name with data in a safe manner.
First write to a temp file and then rename. Since POSIX renames are
atomic, the file is unlikely to be corrupted by competing writes.
We create the tempfile on the same device to ensure that it can be renamed.
"""
base_dir = os.path.dirname(os.path.abspath(file_name))
tmp_file = tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False)
tmp_file.write(data)
tmp_file.close()
os.chmod(tmp_file.name, 0o644)
os.rename(tmp_file.name, file_name)
def find_child_pids(pid):
"""Retrieve a list of the pids of child processes of the given pid."""
try:
raw_pids = execute(['ps', '--ppid', pid, '-o', 'pid='],
log_fail_as_error=False)
except RuntimeError as e:
# Unexpected errors are the responsibility of the caller
with excutils.save_and_reraise_exception() as ctxt:
# Exception has already been logged by execute
no_children_found = 'Exit code: 1' in e.message
if no_children_found:
ctxt.reraise = False
return []
return [x.strip() for x in raw_pids.split('\n') if x.strip()]
def ensure_dir(dir_path):
"""Ensure a directory with 755 permissions mode."""
if not os.path.isdir(dir_path):
os.makedirs(dir_path, 0o755)
def _get_conf_base(cfg_root, uuid, ensure_conf_dir):
#TODO(mangelajo): separate responsibilities here, ensure_conf_dir
# should be a separate function
conf_dir = os.path.abspath(os.path.normpath(cfg_root))
conf_base = os.path.join(conf_dir, uuid)
if ensure_conf_dir:
ensure_dir(conf_dir)
return conf_base
def get_conf_file_name(cfg_root, uuid, cfg_file, ensure_conf_dir=False):
"""Returns the file name for a given kind of config file."""
conf_base = _get_conf_base(cfg_root, uuid, ensure_conf_dir)
return "%s.%s" % (conf_base, cfg_file)
def get_value_from_file(filename, converter=None):
try:
with open(filename, 'r') as f:
try:
return converter(f.read()) if converter else f.read()
except ValueError:
LOG.error(_LE('Unable to convert value in %s'), filename)
except IOError:
LOG.debug('Unable to access %s', filename)
def get_value_from_conf_file(cfg_root, uuid, cfg_file, converter=None):
"""A helper function to read a value from one of a config file."""
file_name = get_conf_file_name(cfg_root, uuid, cfg_file)
return get_value_from_file(file_name, converter)
def remove_conf_files(cfg_root, uuid):
conf_base = _get_conf_base(cfg_root, uuid, False)
for file_path in glob.iglob("%s.*" % conf_base):
os.unlink(file_path)
def get_root_helper_child_pid(pid, run_as_root=False):
"""
Get the lowest child pid in the process hierarchy
If root helper was used, two or more processes would be created:
- a root helper process (e.g. sudo myscript)
- possibly a rootwrap script (e.g. neutron-rootwrap)
- a child process (e.g. myscript)
Killing the root helper process will leave the child process
running, re-parented to init, so the only way to ensure that both
die is to target the child process directly.
"""
pid = str(pid)
if run_as_root:
try:
pid = find_child_pids(pid)[0]
except IndexError:
# Process is already dead
return None
while True:
try:
# We shouldn't have more than one child per process
# so keep getting the children of the first one
pid = find_child_pids(pid)[0]
except IndexError:
# Last process in the tree, return it
break
return pid
def remove_abs_path(cmd):
"""Remove absolute path of executable in cmd
Note: New instance of list is returned
:param cmd: parsed shlex command (e.g. ['/bin/foo', 'param1', 'param two'])
"""
if cmd and os.path.isabs(cmd[0]):
cmd = list(cmd)
cmd[0] = os.path.basename(cmd[0])
return cmd
def get_cmdline_from_pid(pid):
if pid is None or not os.path.exists('/proc/%s' % pid):
return []
with open('/proc/%s/cmdline' % pid, 'r') as f:
return f.readline().split('\0')[:-1]
def cmd_matches_expected(cmd, expected_cmd):
abs_cmd = remove_abs_path(cmd)
abs_expected_cmd = remove_abs_path(expected_cmd)
if abs_cmd != abs_expected_cmd:
# Commands executed with #! are prefixed with the script
# executable. Check for the expected cmd being a subset of the
# actual cmd to cover this possibility.
abs_cmd = remove_abs_path(abs_cmd[1:])
return abs_cmd == abs_expected_cmd
def pid_invoked_with_cmdline(pid, expected_cmd):
"""Validate process with given pid is running with provided parameters
"""
cmd = get_cmdline_from_pid(pid)
return cmd_matches_expected(cmd, expected_cmd)
def wait_until_true(predicate, timeout=60, sleep=1, exception=None):
"""
Wait until callable predicate is evaluated as True
:param predicate: Callable deciding whether waiting should continue.
Best practice is to instantiate predicate with functools.partial()
:param timeout: Timeout in seconds how long should function wait.
:param sleep: Polling interval for results in seconds.
:param exception: Exception class for eventlet.Timeout.
(see doc for eventlet.Timeout for more information)
"""
with eventlet.timeout.Timeout(timeout, exception):
while not predicate():
eventlet.sleep(sleep)
def ensure_directory_exists_without_file(path):
dirname = os.path.dirname(path)
if os.path.isdir(dirname):
try:
os.unlink(path)
except OSError:
with excutils.save_and_reraise_exception() as ctxt:
if not os.path.exists(path):
ctxt.reraise = False
else:
ensure_dir(dirname)
def is_effective_user(user_id_or_name):
"""Returns True if user_id_or_name is effective user (id/name)."""
euid = os.geteuid()
if str(user_id_or_name) == str(euid):
return True
effective_user_name = pwd.getpwuid(euid).pw_name
return user_id_or_name == effective_user_name
class UnixDomainHTTPConnection(httplib.HTTPConnection):
"""Connection class for HTTP over UNIX domain socket."""
def __init__(self, host, port=None, strict=None, timeout=None,
proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.socket_path = cfg.CONF.metadata_proxy_socket
def connect(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if self.timeout:
self.sock.settimeout(self.timeout)
self.sock.connect(self.socket_path)
class UnixDomainHttpProtocol(eventlet.wsgi.HttpProtocol):
def __init__(self, request, client_address, server):
if client_address == '':
client_address = ('<local>', 0)
# base class is old-style, so super does not work properly
eventlet.wsgi.HttpProtocol.__init__(self, request, client_address,
server)
class UnixDomainWSGIServer(wsgi.Server):
def __init__(self, name):
self._socket = None
self._launcher = None
self._server = None
super(UnixDomainWSGIServer, self).__init__(name)
def start(self, application, file_socket, workers, backlog):
self._socket = eventlet.listen(file_socket,
family=socket.AF_UNIX,
backlog=backlog)
self._launch(application, workers=workers)
def _run(self, application, socket):
"""Start a WSGI service in a new green thread."""
logger = logging.getLogger('eventlet.wsgi.server')
eventlet.wsgi.server(socket,
application,
max_size=self.num_threads,
protocol=UnixDomainHttpProtocol,
log=loggers.WritableLogger(logger))
|
|
# Copyright (c) 2013, Nathan Dunsworth - NFXPlugins
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NFXPlugins nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NFXPLUGINS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
__all__ = [
'SgProject'
]
# This module imports
import ShotgunORM
class SgProject(ShotgunORM.SgEntity):
'''
Class that represents a Project Entity.
'''
def sequence(self, sequence, sgFields=None):
'''
Returns the sequence Entity for this project.
Args:
* (str) sequence:
Name of the sequence.
* (list) sgFields:
List of fields to populate the result with.
'''
if not self.exists():
return None
return self.connection().findOne(
'Sequence',
[
[
'project',
'is',
self
],
[
'code',
'is',
sequence
]
],
sgFields
)
def sequenceNames(self):
'''
Returns a list of all Sequence names for this project.
'''
result = []
if not self.exists():
return result
seqs = self.sequences(sgFields=['code'])
for seq in seqs:
result.append(seq['code'])
result.sort()
return result
def sequences(self, sgFields=None):
'''
Returns a list of all Sequence Entities for this project.
* (list) sgFields:
List of fields to populate the results with.
'''
if not self.exists():
return []
return self.connection().find(
'Sequence',
[
[
'project',
'is',
self
]
],
sgFields
)
def shot(self, sequence, shot, sgFields=None):
'''
Returns the Shot Entity for the sequence of this project.
Args:
* (str) sequence:
Name of the sequence the shot belongs to.
* (str) shot:
Name of the shot.
* (list) sgFields:
List of fields to populate the result with.
'''
if not self.exists():
return None
return self.connection().findOne(
'Shot',
[
[
'project',
'is',
self
],
[
'sg_sequence',
'name_is',
sequence
],
[
'code',
'is',
shot
]
],
sgFields
)
def shotNames(self, sgSequences=None):
'''
Returns a dict containing of all Shot names for this project.
Args:
* (list) sgSequences:
Return only the Shot names associated with the list of Sequences.
'''
result = {}
if not self.exists():
return result
seqShots = self.shots(sgSequences=sgSequences, sgFields=['code'])
for seq, shots in seqShots.items():
shotNames = []
for shot in shots:
shotNames.append(shot['code'])
result[seq] = shotNames
return result
def shots(self, sgSequences=None, sgFields=None):
'''
Returns a dict of all Shot Entities for this project.
Args:
* (list) sgSequences:
Return only the Shots associated with the list of Sequences.
* (list) sgFields:
List of fields to populate the results with.
'''
result = {}
if not self.exists():
return result
if isinstance(sgSequences, (str, ShotgunORM.SgEntity)):
sgSequences = [sgSequences]
if sgSequences == None:
sgSequences = self.sequenceNames()
if len(sgSequences) <= 0:
return result
seqNames = []
for seq in sgSequences:
if isinstance(seq, str):
seqNames.append(seq)
else:
seqNames.append(seq['code'])
qEngine = self.connection().queryEngine()
# Block the query engine so all Shot fields get pulled at once.
qEngine.block()
try:
sequences = self.connection().find(
'Sequence',
[
[
'project',
'is',
self
],
[
'code',
'in',
seqNames
]
],
['code', 'shots']
)
for seq in sequences:
seqField = seq.field('shots')
result[seq['code']] = seqField.value(sgSyncFields={'Shot': sgFields})
finally:
qEngine.unblock()
return result
# Register the custom class.
ShotgunORM.SgEntity.registerDefaultEntityClass(
sgEntityCls=SgProject,
sgEntityTypes=['Project']
)
|
|
# Copyright 2014 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for VMware Datastore"""
import hashlib
import httplib
import logging
import os
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import netutils
from oslo_utils import units
try:
from oslo_vmware import api
import oslo_vmware.exceptions as vexc
from oslo_vmware.objects import datacenter as oslo_datacenter
from oslo_vmware.objects import datastore as oslo_datastore
from oslo_vmware import vim_util
except ImportError:
api = None
import six
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
import six.moves.urllib.parse as urlparse
import glance_store
from glance_store import capabilities
from glance_store import exceptions
from glance_store.i18n import _
from glance_store.i18n import _LE
from glance_store import location
LOG = logging.getLogger(__name__)
MAX_REDIRECTS = 5
DEFAULT_STORE_IMAGE_DIR = '/openstack_glance'
DS_URL_PREFIX = '/folder'
STORE_SCHEME = 'vsphere'
# check that datacenter/datastore combination is valid
_datastore_info_valid = False
_VMWARE_OPTS = [
cfg.StrOpt('vmware_server_host',
help=_('ESX/ESXi or vCenter Server target system. '
'The server value can be an IP address or a DNS name.')),
cfg.StrOpt('vmware_server_username',
help=_('Username for authenticating with '
'VMware ESX/VC server.')),
cfg.StrOpt('vmware_server_password',
help=_('Password for authenticating with '
'VMware ESX/VC server.'),
secret=True),
cfg.StrOpt('vmware_datacenter_path',
default='ha-datacenter',
help=_('DEPRECATED. Inventory path to a datacenter. '
'If the vmware_server_host specified is an ESX/ESXi, '
'the vmware_datacenter_path is optional. If specified, '
'it should be "ha-datacenter". This option is '
'deprecated in favor of vmware_datastores and will be '
'removed in the Liberty release.'),
deprecated_for_removal=True),
cfg.StrOpt('vmware_datastore_name',
help=_('DEPRECATED. Datastore associated with the datacenter. '
'This option is deprecated in favor of '
'vmware_datastores and will be removed in the Liberty '
'release.'),
deprecated_for_removal=True),
cfg.IntOpt('vmware_api_retry_count',
default=10,
help=_('Number of times VMware ESX/VC server API must be '
'retried upon connection related issues.')),
cfg.IntOpt('vmware_task_poll_interval',
default=5,
help=_('The interval used for polling remote tasks '
'invoked on VMware ESX/VC server.')),
cfg.StrOpt('vmware_store_image_dir',
default=DEFAULT_STORE_IMAGE_DIR,
help=_('The name of the directory where the glance images '
'will be stored in the VMware datastore.')),
cfg.BoolOpt('vmware_api_insecure',
default=False,
help=_('Allow to perform insecure SSL requests to ESX/VC.')),
cfg.MultiStrOpt(
'vmware_datastores',
help=_(
'A list of datastores where the image can be stored. This option '
'may be specified multiple times for specifying multiple '
'datastores. Either one of vmware_datastore_name or '
'vmware_datastores is required. The datastore name should be '
'specified after its datacenter path, seperated by ":". An '
'optional weight may be given after the datastore name, seperated '
'again by ":". Thus, the required format becomes '
'<datacenter_path>:<datastore_name>:<optional_weight>. When '
'adding an image, the datastore with highest weight will be '
'selected, unless there is not enough free space available in '
'cases where the image size is already known. If no weight is '
'given, it is assumed to be zero and the directory will be '
'considered for selection last. If multiple datastores have the '
'same weight, then the one with the most free space available is '
'selected.'))]
def http_response_iterator(conn, response, size):
"""Return an iterator for a file-like object.
:param conn: HTTP(S) Connection
:param response: httplib.HTTPResponse object
:param size: Chunk size to iterate with
"""
try:
chunk = response.read(size)
while chunk:
yield chunk
chunk = response.read(size)
finally:
conn.close()
class _Reader(object):
def __init__(self, data):
self._size = 0
self.data = data
self.checksum = hashlib.md5()
def read(self, size=None):
result = self.data.read(size)
self._size += len(result)
self.checksum.update(result)
return result
@property
def size(self):
return self._size
class _ChunkReader(_Reader):
def __init__(self, data, blocksize=8192):
self.blocksize = blocksize
self.current_chunk = ""
self.closed = False
super(_ChunkReader, self).__init__(data)
def read(self, size=None):
ret = ""
while size is None or size >= len(self.current_chunk):
ret += self.current_chunk
if size is not None:
size -= len(self.current_chunk)
if self.closed:
self.current_chunk = ""
break
self._get_chunk()
else:
ret += self.current_chunk[:size]
self.current_chunk = self.current_chunk[size:]
return ret
def _get_chunk(self):
if not self.closed:
chunk = self.data.read(self.blocksize)
chunk_len = len(chunk)
self._size += chunk_len
self.checksum.update(chunk)
if chunk:
self.current_chunk = '%x\r\n%s\r\n' % (chunk_len, chunk)
else:
self.current_chunk = '0\r\n\r\n'
self.closed = True
class StoreLocation(location.StoreLocation):
"""Class describing an VMware URI.
An VMware URI can look like any of the following:
vsphere://server_host/folder/file_path?dcPath=dc_path&dsName=ds_name
"""
def __init__(self, store_specs, conf):
super(StoreLocation, self).__init__(store_specs, conf)
self.datacenter_path = None
self.datastore_name = None
def process_specs(self):
self.scheme = self.specs.get('scheme', STORE_SCHEME)
self.server_host = self.specs.get('server_host')
self.path = os.path.join(DS_URL_PREFIX,
self.specs.get('image_dir').strip('/'),
self.specs.get('image_id'))
self.datacenter_path = self.specs.get('datacenter_path')
self.datstore_name = self.specs.get('datastore_name')
param_list = {'dsName': self.datstore_name}
if self.datacenter_path:
param_list['dcPath'] = self.datacenter_path
self.query = urlparse.urlencode(param_list)
def get_uri(self):
if netutils.is_valid_ipv6(self.server_host):
base_url = '%s://[%s]%s' % (self.scheme,
self.server_host, self.path)
else:
base_url = '%s://%s%s' % (self.scheme,
self.server_host, self.path)
return '%s?%s' % (base_url, self.query)
# NOTE(flaper87): Commenting out for now, it's probably better to do
# it during image add/get. This validation relies on a config param
# which doesn't make sense to have in the StoreLocation instance.
# def _is_valid_path(self, path):
# sdir = self.conf.glance_store.vmware_store_image_dir.strip('/')
# return path.startswith(os.path.join(DS_URL_PREFIX, sdir))
def parse_uri(self, uri):
if not uri.startswith('%s://' % STORE_SCHEME):
reason = (_("URI %(uri)s must start with %(scheme)s://") %
{'uri': uri, 'scheme': STORE_SCHEME})
LOG.info(reason)
raise exceptions.BadStoreUri(message=reason)
(self.scheme, self.server_host,
path, params, query, fragment) = urlparse.urlparse(uri)
if not query:
path, query = path.split('?')
self.path = path
self.query = query
# NOTE(flaper87): Read comment on `_is_valid_path`
# reason = 'Badly formed VMware datastore URI %(uri)s.' % {'uri': uri}
# LOG.debug(reason)
# raise exceptions.BadStoreUri(reason)
parts = urlparse.parse_qs(self.query)
dc_path = parts.get('dcPath')
if dc_path:
self.datacenter_path = dc_path[0]
ds_name = parts.get('dsName')
if ds_name:
self.datastore_name = ds_name[0]
class Store(glance_store.Store):
"""An implementation of the VMware datastore adapter."""
_CAPABILITIES = (capabilities.BitMasks.RW_ACCESS |
capabilities.BitMasks.DRIVER_REUSABLE)
OPTIONS = _VMWARE_OPTS
WRITE_CHUNKSIZE = units.Mi
def __init__(self, conf):
super(Store, self).__init__(conf)
self.datastores = {}
def reset_session(self):
self.session = api.VMwareAPISession(
self.server_host, self.server_username, self.server_password,
self.api_retry_count, self.tpoll_interval)
return self.session
def get_schemes(self):
return (STORE_SCHEME,)
def _sanity_check(self):
if self.conf.glance_store.vmware_api_retry_count <= 0:
msg = _('vmware_api_retry_count should be greater than zero')
LOG.error(msg)
raise exceptions.BadStoreConfiguration(
store_name='vmware_datastore', reason=msg)
if self.conf.glance_store.vmware_task_poll_interval <= 0:
msg = _('vmware_task_poll_interval should be greater than zero')
LOG.error(msg)
raise exceptions.BadStoreConfiguration(
store_name='vmware_datastore', reason=msg)
if not (self.conf.glance_store.vmware_datastore_name
or self.conf.glance_store.vmware_datastores):
msg = (_("Specify at least 'vmware_datastore_name' or "
"'vmware_datastores' option"))
LOG.error(msg)
raise exceptions.BadStoreConfiguration(
store_name='vmware_datastore', reason=msg)
if (self.conf.glance_store.vmware_datastore_name and
self.conf.glance_store.vmware_datastores):
msg = (_("Specify either 'vmware_datastore_name' or "
"'vmware_datastores' option"))
LOG.error(msg)
raise exceptions.BadStoreConfiguration(
store_name='vmware_datastore', reason=msg)
def configure(self, re_raise_bsc=False):
self._sanity_check()
self.scheme = STORE_SCHEME
self.server_host = self._option_get('vmware_server_host')
self.server_username = self._option_get('vmware_server_username')
self.server_password = self._option_get('vmware_server_password')
self.api_retry_count = self.conf.glance_store.vmware_api_retry_count
self.tpoll_interval = self.conf.glance_store.vmware_task_poll_interval
self.api_insecure = self.conf.glance_store.vmware_api_insecure
if api is None:
msg = _("Missing dependencies: oslo_vmware")
raise exceptions.BadStoreConfiguration(
store_name="vmware_datastore", reason=msg)
self.session = self.reset_session()
super(Store, self).configure(re_raise_bsc=re_raise_bsc)
def _get_datacenter(self, datacenter_path):
search_index_moref = self.session.vim.service_content.searchIndex
dc_moref = self.session.invoke_api(
self.session.vim,
'FindByInventoryPath',
search_index_moref,
inventoryPath=datacenter_path)
dc_name = datacenter_path.rsplit('/', 1)[-1]
# TODO(sabari): Add datacenter_path attribute in oslo.vmware
dc_obj = oslo_datacenter.Datacenter(ref=dc_moref, name=dc_name)
dc_obj.path = datacenter_path
return dc_obj
def _get_datastore(self, datacenter_path, datastore_name):
dc_obj = self._get_datacenter(datacenter_path)
datastore_ret = self.session.invoke_api(
vim_util, 'get_object_property', self.session.vim, dc_obj.ref,
'datastore')
if datastore_ret:
datastore_refs = datastore_ret.ManagedObjectReference
for ds_ref in datastore_refs:
ds_obj = oslo_datastore.get_datastore_by_ref(self.session,
ds_ref)
if ds_obj.name == datastore_name:
ds_obj.datacenter = dc_obj
return ds_obj
def _get_freespace(self, ds_obj):
# TODO(sabari): Move this function into oslo_vmware's datastore object.
return self.session.invoke_api(
vim_util, 'get_object_property', self.session.vim, ds_obj.ref,
'summary.freeSpace')
def _parse_datastore_info_and_weight(self, datastore):
weight = 0
parts = [part.strip() for part in datastore.rsplit(":", 2)]
if len(parts) < 2:
msg = _('vmware_datastores format must be '
'datacenter_path:datastore_name:weight or '
'datacenter_path:datastore_name')
LOG.error(msg)
raise exceptions.BadStoreConfiguration(
store_name='vmware_datastore', reason=msg)
if len(parts) == 3 and parts[2]:
weight = parts[2]
if not weight.isdigit():
msg = (_('Invalid weight value %(weight)s in '
'vmware_datastores configuration') %
{'weight': weight})
LOG.exception(msg)
raise exceptions.BadStoreConfiguration(
store_name="vmware_datastore", reason=msg)
datacenter_path, datastore_name = parts[0], parts[1]
if not datacenter_path or not datastore_name:
msg = _('Invalid datacenter_path or datastore_name specified '
'in vmware_datastores configuration')
LOG.exception(msg)
raise exceptions.BadStoreConfiguration(
store_name="vmware_datastore", reason=msg)
return datacenter_path, datastore_name, weight
def _build_datastore_weighted_map(self, datastores):
"""Build an ordered map where the key is a weight and the value is a
Datastore object.
:param: a list of datastores in the format
datacenter_path:datastore_name:weight
:return: a map with key-value <weight>:<Datastore>
"""
ds_map = {}
for ds in datastores:
dc_path, name, weight = self._parse_datastore_info_and_weight(ds)
# Fetch the server side reference.
ds_obj = self._get_datastore(dc_path, name)
if not ds_obj:
msg = (_("Could not find datastore %(ds_name)s "
"in datacenter %(dc_path)s")
% {'ds_name': name,
'dc_path': dc_path})
LOG.error(msg)
raise exceptions.BadStoreConfiguration(
store_name='vmware_datastore', reason=msg)
ds_map.setdefault(int(weight), []).append(ds_obj)
return ds_map
def configure_add(self):
if self.conf.glance_store.vmware_datastores:
datastores = self.conf.glance_store.vmware_datastores
else:
# Backwards compatibility for vmware_datastore_name and
# vmware_datacenter_path.
datacenter_path = self.conf.glance_store.vmware_datacenter_path
datastore_name = self._option_get('vmware_datastore_name')
datastores = ['%s:%s:%s' % (datacenter_path, datastore_name, 0)]
self.datastores = self._build_datastore_weighted_map(datastores)
self.store_image_dir = self.conf.glance_store.vmware_store_image_dir
def select_datastore(self, image_size):
"""Select a datastore with free space larger than image size."""
for k, v in sorted(six.iteritems(self.datastores), reverse=True):
max_ds = None
max_fs = 0
for ds in v:
# Update with current freespace
ds.freespace = self._get_freespace(ds)
if ds.freespace > max_fs:
max_ds = ds
max_fs = ds.freespace
if max_ds and max_ds.freespace >= image_size:
return max_ds
msg = _LE("No datastore found with enough free space to contain an "
"image of size %d") % image_size
LOG.error(msg)
raise exceptions.StorageFull()
def _option_get(self, param):
result = getattr(self.conf.glance_store, param)
if not result:
reason = (_("Could not find %(param)s in configuration "
"options.") % {'param': param})
raise exceptions.BadStoreConfiguration(
store_name='vmware_datastore', reason=reason)
return result
def _build_vim_cookie_header(self, verify_session=False):
"""Build ESX host session cookie header."""
if verify_session and not self.session.is_current_session_active():
self.reset_session()
vim_cookies = self.session.vim.client.options.transport.cookiejar
if len(list(vim_cookies)) > 0:
cookie = list(vim_cookies)[0]
return cookie.name + '=' + cookie.value
@capabilities.check
def add(self, image_id, image_file, image_size, context=None):
"""Stores an image file with supplied identifier to the backend
storage system and returns a tuple containing information
about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:retval tuple of URL in backing store, bytes written, checksum
and a dictionary with storage system specific information
:raises `glance.common.exceptions.Duplicate` if the image already
existed
`glance.common.exceptions.UnexpectedStatus` if the upload
request returned an unexpected status. The expected responses
are 201 Created and 200 OK.
"""
ds = self.select_datastore(image_size)
if image_size > 0:
headers = {'Content-Length': image_size}
image_file = _Reader(image_file)
else:
# NOTE (arnaud): use chunk encoding when the image is still being
# generated by the server (ex: stream optimized disks generated by
# Nova).
headers = {'Transfer-Encoding': 'chunked'}
image_file = _ChunkReader(image_file)
loc = StoreLocation({'scheme': self.scheme,
'server_host': self.server_host,
'image_dir': self.store_image_dir,
'datacenter_path': ds.datacenter.path,
'datastore_name': ds.name,
'image_id': image_id}, self.conf)
# NOTE(arnaud): use a decorator when the config is not tied to self
cookie = self._build_vim_cookie_header(True)
headers = dict(headers)
headers['Cookie'] = cookie
conn_class = self._get_http_conn_class()
conn = conn_class(loc.server_host)
url = urlparse.quote('%s?%s' % (loc.path, loc.query))
try:
conn.request('PUT', url, image_file, headers)
except IOError as e:
# When a session is not authenticated, the socket is closed by
# the server after sending the response. httplib has an open
# issue with https that raises Broken Pipe
# error instead of returning the response.
# See http://bugs.python.org/issue16062. Here, we log the error
# and continue to look into the response.
msg = _LE('Communication error sending http %(method)s request'
'to the url %(url)s.\n'
'Got IOError %(e)s') % {'method': 'PUT',
'url': url,
'e': e}
LOG.error(msg)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to upload content of image '
'%(image)s'), {'image': image_id})
res = conn.getresponse()
if res.status == httplib.CONFLICT:
raise exceptions.Duplicate(_("Image file %(image_id)s already "
"exists!") %
{'image_id': image_id})
if res.status not in (httplib.CREATED, httplib.OK):
msg = (_LE('Failed to upload content of image %(image)s. '
'The request returned an unexpected status: %(status)s.'
'\nThe response body:\n%(body)s') %
{'image': image_id,
'status': res.status,
'body': res.body})
LOG.error(msg)
raise exceptions.BackendException(msg)
return (loc.get_uri(), image_file.size,
image_file.checksum.hexdigest(), {})
@capabilities.check
def get(self, location, offset=0, chunk_size=None, context=None):
"""Takes a `glance_store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
"""
conn, resp, content_length = self._query(location, 'GET')
iterator = http_response_iterator(conn, resp, self.READ_CHUNKSIZE)
class ResponseIndexable(glance_store.Indexable):
def another(self):
try:
return next(self.wrapped)
except StopIteration:
return ''
return (ResponseIndexable(iterator, content_length), content_length)
def get_size(self, location, context=None):
"""Takes a `glance_store.location.Location` object that indicates
where to find the image file, and returns the size
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
"""
return self._query(location, 'HEAD')[2]
@capabilities.check
def delete(self, location, context=None):
"""Takes a `glance_store.location.Location` object that indicates
where to find the image file to delete
:location `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:raises NotFound if image does not exist
"""
file_path = '[%s] %s' % (
location.store_location.datastore_name,
location.store_location.path[len(DS_URL_PREFIX):])
dc_obj = self._get_datacenter(location.store_location.datacenter_path)
delete_task = self.session.invoke_api(
self.session.vim,
'DeleteDatastoreFile_Task',
self.session.vim.service_content.fileManager,
name=file_path,
datacenter=dc_obj.ref)
try:
self.session.wait_for_task(delete_task)
except vexc.FileNotFoundException:
msg = _('Image file %s not found') % file_path
LOG.warn(msg)
raise exceptions.NotFound(message=msg)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to delete image %(image)s '
'content.') % {'image': location.image_id})
def _query(self, location, method, depth=0):
if depth > MAX_REDIRECTS:
msg = ("The HTTP URL exceeded %(max_redirects)s maximum "
"redirects.", {'max_redirects': MAX_REDIRECTS})
LOG.debug(msg)
raise exceptions.MaxRedirectsExceeded(redirects=MAX_REDIRECTS)
loc = location.store_location
# NOTE(arnaud): use a decorator when the config is not tied to self
for i in range(self.api_retry_count + 1):
cookie = self._build_vim_cookie_header()
headers = {'Cookie': cookie}
try:
conn = self._get_http_conn(method, loc, headers)
resp = conn.getresponse()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to access image %(image)s '
'content.') % {'image':
location.image_id})
if resp.status >= 400:
if resp.status == httplib.UNAUTHORIZED:
self.reset_session()
continue
if resp.status == httplib.NOT_FOUND:
reason = _('VMware datastore could not find image at URI.')
LOG.info(reason)
raise exceptions.NotFound(message=reason)
msg = ('HTTP request returned a %(status)s status code.'
% {'status': resp.status})
LOG.debug(msg)
raise exceptions.BadStoreUri(msg)
break
location_header = resp.getheader('location')
if location_header:
if resp.status not in (301, 302):
reason = (_("The HTTP URL %(path)s attempted to redirect "
"with an invalid %(status)s status code.")
% {'path': loc.path, 'status': resp.status})
LOG.info(reason)
raise exceptions.BadStoreUri(message=reason)
location_class = glance_store.location.Location
new_loc = location_class(location.store_name,
location.store_location.__class__,
uri=location_header,
image_id=location.image_id,
store_specs=location.store_specs)
return self._query(new_loc, method, depth + 1)
content_length = int(resp.getheader('content-length', 0))
return (conn, resp, content_length)
def _get_http_conn(self, method, loc, headers, content=None):
conn_class = self._get_http_conn_class()
conn = conn_class(loc.server_host)
url = urlparse.quote('%s?%s' % (loc.path, loc.query))
conn.request(method, url, content, headers)
return conn
def _get_http_conn_class(self):
if self.api_insecure:
return httplib.HTTPConnection
return httplib.HTTPSConnection
|
|
import unittest
import os
import os.path
from robot.utils import abspath, normpath, get_link_path, WINDOWS, PY3
from robot.utils.robotpath import CASE_INSENSITIVE_FILESYSTEM
from robot.utils.asserts import assert_equal, assert_true
if PY3:
unicode = str
class TestAbspathNormpath(unittest.TestCase):
def test_abspath(self):
for inp, exp in self._get_inputs():
exp = os.path.abspath(exp)
path = abspath(inp)
assert_equal(path, exp, inp)
assert_true(isinstance(path, unicode), inp)
exp = exp.lower() if CASE_INSENSITIVE_FILESYSTEM else exp
path = abspath(inp, case_normalize=True)
assert_equal(path, exp, inp)
assert_true(isinstance(path, unicode), inp)
def test_abspath_when_cwd_is_non_ascii(self):
orig = abspath('.')
nonasc = u'\xe4'
os.mkdir(nonasc)
os.chdir(nonasc)
try:
assert_equal(abspath('.'), orig + os.sep + nonasc)
finally:
os.chdir('..')
os.rmdir(nonasc)
if WINDOWS:
unc_path = r'\\server\D$\dir\.\f1\..\\f2'
unc_exp = r'\\server\D$\dir\f2'
def test_unc_path(self):
assert_equal(abspath(self.unc_path), self.unc_exp)
def test_unc_path_when_chdir_is_root(self):
orig = abspath('.')
os.chdir('\\')
try:
assert_equal(abspath(self.unc_path), self.unc_exp)
finally:
os.chdir(orig)
def test_add_drive(self):
drive = os.path.abspath(__file__)[:2]
for path in ['.', os.path.basename(__file__), r'\abs\path']:
abs = abspath(path)
assert_equal(abs, os.path.abspath(path))
assert_true(abs.startswith(drive))
def test_normpath(self):
for inp, exp in self._get_inputs():
path = normpath(inp)
assert_equal(path, exp, inp)
assert_true(isinstance(path, unicode), inp)
exp = exp.lower() if CASE_INSENSITIVE_FILESYSTEM else exp
path = normpath(inp, case_normalize=True)
assert_equal(path, exp, inp)
assert_true(isinstance(path, unicode), inp)
def _get_inputs(self):
inputs = self._windows_inputs if WINDOWS else self._posix_inputs
for inp, exp in inputs():
yield inp, exp
if inp not in ['', os.sep]:
for ext in [os.sep, os.sep+'.', os.sep+'.'+os.sep]:
yield inp + ext, exp
if inp.endswith(os.sep):
for ext in ['.', '.'+os.sep, '.'+os.sep+'.']:
yield inp + ext, exp
yield inp + 'foo' + os.sep + '..', exp
def _posix_inputs(self):
return [('/tmp/', '/tmp'),
('/var/../opt/../tmp/.', '/tmp'),
('/non/Existing/..', '/non'),
('/', '/')] + self._generic_inputs()
def _windows_inputs(self):
inputs = [('c:\\temp', 'c:\\temp'),
('C:\\TEMP\\', 'C:\\TEMP'),
('C:\\xxx\\..\\yyy\\..\\temp\\.', 'C:\\temp'),
('c:\\Non\\Existing\\..', 'c:\\Non')]
for x in 'ABCDEFGHIJKLMNOPQRSTUVXYZ':
base = '%s:\\' % x
inputs.append((base, base))
inputs.append((base.lower(), base.lower()))
inputs.append((base[:2], base))
inputs.append((base[:2].lower(), base.lower()))
inputs.append((base+'\\foo\\..\\.\\BAR\\\\', base+'BAR'))
inputs += [(inp.replace('/', '\\'), exp) for inp, exp in inputs]
for inp, exp in self._generic_inputs():
exp = exp.replace('/', '\\')
inputs.extend([(inp, exp), (inp.replace('/', '\\'), exp)])
return inputs
def _generic_inputs(self):
return [('', '.'),
('.', '.'),
('./', '.'),
('..', '..'),
('../', '..'),
('../..', '../..'),
('foo', 'foo'),
('foo/bar', 'foo/bar'),
(u'\xe4', u'\xe4'),
(u'\xe4/\xf6', u'\xe4/\xf6'),
('./foo', 'foo'),
('foo/.', 'foo'),
('foo/..', '.'),
('foo/../bar', 'bar'),
('foo/bar/zap/..', 'foo/bar')]
class TestGetLinkPath(unittest.TestCase):
def test_basics(self):
for base, target, expected in self._get_basic_inputs():
assert_equal(get_link_path(target, base).replace('R:', 'r:'),
expected, '%s -> %s' % (target, base))
def test_base_is_existing_file(self):
assert_equal(get_link_path(os.path.dirname(__file__), __file__), '.')
assert_equal(get_link_path(__file__, __file__),
self._expected_basename(__file__))
def test_non_existing_paths(self):
assert_equal(get_link_path('/nonex/target', '/nonex/base'), '../target')
assert_equal(get_link_path('/nonex/t.ext', '/nonex/b.ext'), '../t.ext')
assert_equal(get_link_path('/nonex', __file__),
os.path.relpath('/nonex', os.path.dirname(__file__)).replace(os.sep, '/'))
def test_non_ascii_paths(self):
assert_equal(get_link_path(u'\xe4\xf6.txt', ''), '%C3%A4%C3%B6.txt')
assert_equal(get_link_path(u'\xe4/\xf6.txt', u'\xe4'), '%C3%B6.txt')
def _get_basic_inputs(self):
directory = os.path.dirname(__file__)
inputs = [(directory, __file__, self._expected_basename(__file__)),
(directory, directory, '.'),
(directory, directory + '/Non/Ex', 'Non/Ex'),
(directory, directory + '/..', '..'),
(directory, directory + '/../X', '../X'),
(directory, directory + '/./.././..', '../..'),
(directory, '.',
os.path.relpath('.', directory).replace(os.sep, '/'))]
platform_inputs = (self._posix_inputs() if os.sep == '/' else
self._windows_inputs())
return inputs + platform_inputs
def _expected_basename(self, path):
return os.path.basename(path).replace('$py.class', '%24py.class')
def _posix_inputs(self):
return [('/tmp/', '/tmp/bar.txt', 'bar.txt'),
('/tmp', '/tmp/x/bar.txt', 'x/bar.txt'),
('/tmp/', '/tmp/x/y/bar.txt', 'x/y/bar.txt'),
('/tmp/', '/tmp/x/y/z/bar.txt', 'x/y/z/bar.txt'),
('/tmp', '/x/y/z/bar.txt', '../x/y/z/bar.txt'),
('/tmp/', '/x/y/z/bar.txt', '../x/y/z/bar.txt'),
('/tmp', '/x/bar.txt', '../x/bar.txt'),
('/tmp', '/x/y/z/bar.txt', '../x/y/z/bar.txt'),
('/', '/x/bar.txt', 'x/bar.txt'),
('/path/to', '/path/to/result_in_same_dir.html',
'result_in_same_dir.html'),
('/path/to/dir', '/path/to/result_in_parent_dir.html',
'../result_in_parent_dir.html'),
('/path/to', '/path/to/dir/result_in_sub_dir.html',
'dir/result_in_sub_dir.html'),
('/commonprefix/sucks/baR', '/commonprefix/sucks/baZ.txt',
'../baZ.txt'),
('/a/very/long/path', '/no/depth/limitation',
'../../../../no/depth/limitation'),
('/etc/hosts', '/path/to/existing/file',
'../path/to/existing/file'),
('/path/to/identity', '/path/to/identity', '.')]
def _windows_inputs(self):
return [('c:\\temp\\', 'c:\\temp\\bar.txt', 'bar.txt'),
('c:\\temp', 'c:\\temp\\x\\bar.txt', 'x/bar.txt'),
('c:\\temp\\', 'c:\\temp\\x\\y\\bar.txt', 'x/y/bar.txt'),
('c:\\temp', 'c:\\temp\\x\\y\\z\\bar.txt', 'x/y/z/bar.txt'),
('c:\\temp\\', 'c:\\x\\y\\bar.txt', '../x/y/bar.txt'),
('c:\\temp', 'c:\\x\\y\\bar.txt', '../x/y/bar.txt'),
('c:\\temp', 'c:\\x\\bar.txt', '../x/bar.txt'),
('c:\\temp', 'c:\\x\\y\\z\\bar.txt', '../x/y/z/bar.txt'),
('c:\\temp\\', 'r:\\x\\y\\bar.txt', 'file:///r:/x/y/bar.txt'),
('c:\\', 'c:\\x\\bar.txt', 'x/bar.txt'),
('c:\\path\\to', 'c:\\path\\to\\result_in_same_dir.html',
'result_in_same_dir.html'),
('c:\\path\\to\\dir', 'c:\\path\\to\\result_in_parent.dir',
'../result_in_parent.dir'),
('c:\\path\\to', 'c:\\path\\to\\dir\\result_in_sub_dir.html',
'dir/result_in_sub_dir.html'),
('c:\\commonprefix\\sucks\\baR',
'c:\\commonprefix\\sucks\\baZ.txt', '../baZ.txt'),
('c:\\a\\very\\long\\path', 'c:\\no\\depth\\limitation',
'../../../../no/depth/limitation'),
('c:\\windows\\explorer.exe',
'c:\\windows\\path\\to\\existing\\file',
'path/to/existing/file'),
('c:\\path\\2\\identity', 'c:\\path\\2\\identity', '.')]
if __name__ == '__main__':
unittest.main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Component tests for Shared Network functionality with Nuage VSP SDN plugin:
Public Shared Network IP Range
"""
# Import Local Modules
from nuageTestCase import nuageTestCase
from marvin.lib.utils import cleanup_resources
from marvin.lib.common import list_templates
from marvin.lib.base import (Account,
Domain,
User,
VirtualMachine,
Network,
NetworkOffering)
from marvin.cloudstackAPI import (createVlanIpRange,
listVlanIpRanges,
deleteVlanIpRange,
updateTemplate)
# Import System Modules
from nose.plugins.attrib import attr
import random
import string
class TestNuageSharedNetworkUserdata(nuageTestCase):
"""Test Shared Network functionality with Nuage VSP SDN plugin:
Public Shared Network IP Range
"""
@classmethod
def setUpClass(cls):
"""
Create the following domain tree and accounts that are required for
executing Nuage VSP SDN plugin test cases for shared networks:
Under ROOT - create domain D1
Under domain D1 - Create two subdomains D11 and D12
Under each of the domains - create one admin user and couple of
regular users.
Create shared network with the following scope:
1. Network with scope="all"
2. Network with scope="domain" with no subdomain access
3. Network with scope="domain" with subdomain access
4. Network with scope="account"
"""
super(TestNuageSharedNetworkUserdata, cls).setUpClass()
cls.sharednetworkdata = cls.test_data["acl"]
cls.nuagenetworkdata = cls.test_data["nuagevsp"]
cls.domain_1 = None
cls.domain_2 = None
try:
# backup default apikey and secretkey
cls.default_apikey = cls.api_client.connection.apiKey
cls.default_secretkey = cls.api_client.connection.securityKey
# Create domains
cls.domain_1 = Domain.create(
cls.api_client,
cls.sharednetworkdata["domain1"]
)
cls.domain_11 = Domain.create(
cls.api_client,
cls.sharednetworkdata["domain11"],
parentdomainid=cls.domain_1.id
)
cls.domain_12 = Domain.create(
cls.api_client,
cls.sharednetworkdata["domain12"],
parentdomainid=cls.domain_1.id
)
# Create 1 admin account and 2 user accounts for doamin_1
cls.account_d1 = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD1"],
admin=True,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d1)
cls.user_d1_apikey = user.apikey
cls.user_d1_secretkey = user.secretkey
cls.account_d1a = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD1A"],
admin=False,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d1a)
cls.user_d1a_apikey = user.apikey
cls.user_d1a_secretkey = user.secretkey
cls.account_d1b = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD1B"],
admin=False,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d1b)
cls.user_d1b_apikey = user.apikey
cls.user_d1b_secretkey = user.secretkey
# Create 1 admin and 2 user accounts for doamin_11
cls.account_d11 = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD11"],
admin=True,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d11)
cls.user_d11_apikey = user.apikey
cls.user_d11_secretkey = user.secretkey
cls.account_d11a = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD11A"],
admin=False,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d11a)
cls.user_d11a_apikey = user.apikey
cls.user_d11a_secretkey = user.secretkey
cls.account_d11b = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD11B"],
admin=False,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d11b)
cls.user_d11b_apikey = user.apikey
cls.user_d11b_secretkey = user.secretkey
# Create 2 user accounts for doamin_12
cls.account_d12a = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD12A"],
admin=False,
domainid=cls.domain_12.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d12a)
cls.user_d12a_apikey = user.apikey
cls.user_d12a_secretkey = user.secretkey
cls.account_d12b = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD12B"],
admin=False,
domainid=cls.domain_12.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d12b)
cls.user_d12b_apikey = user.apikey
cls.user_d12b_secretkey = user.secretkey
# Create 1 user account and admin account in "ROOT" domain
cls.account_roota = Account.create(
cls.api_client,
cls.sharednetworkdata["accountROOTA"],
admin=False,
)
user = cls.generateKeysForUser(cls.api_client, cls.account_roota)
cls.user_roota_apikey = user.apikey
cls.user_roota_secretkey = user.secretkey
cls.account_root = Account.create(
cls.api_client,
cls.sharednetworkdata["accountROOTA"],
admin=True,
)
user = cls.generateKeysForUser(cls.api_client, cls.account_root)
cls.user_root_apikey = user.apikey
cls.user_root_secretkey = user.secretkey
# service offering is already created in Nuagetestcase
cls.sharednetworkdata['mode'] = cls.zone.networktype
# As admin user , create shared network with scope "all", "domain"
# with subdomain access ,"domain" without subdomain access and
# "account"
cls.api_client.connection.apiKey = cls.default_apikey
cls.api_client.connection.securityKey = cls.default_secretkey
cls.nuagenetworkdata["shared_nuage_public_network_offering"][
"serviceProviderList"].update({"UserData": 'VirtualRouter'})
cls.nuagenetworkdata["shared_nuage_public_network_offering"][
"supportedservices"] = 'Dhcp,Connectivity,UserData'
for key, value in cls.test_data["nuagevsp"][
"shared_nuage_public_network_offering"]["serviceProviderList"]\
.iteritems():
cls.debug("elements are %s and value is %s" % (key, value))
cls.shared_network_offering = NetworkOffering.create(
cls.api_client,
cls.nuagenetworkdata["shared_nuage_public_network_offering"],
conservemode=False
)
# Enable Network offering
cls.shared_network_offering.update(cls.api_client, state='Enabled')
cls.shared_network_offering_id = cls.shared_network_offering.id
cls.shared_network_all = Network.create(
cls.api_client,
cls.nuagenetworkdata["network_all"],
networkofferingid=cls.shared_network_offering_id,
zoneid=cls.zone.id
)
cls.shared_network_domain_d11 = Network.create(
cls.api_client,
cls.nuagenetworkdata["network_all"],
networkofferingid=cls.shared_network_offering_id,
zoneid=cls.zone.id,
domainid=cls.domain_11.id,
subdomainaccess=False
)
cls.shared_network_domain_with_subdomain_d11 = Network.create(
cls.api_client,
cls.nuagenetworkdata["network_all"],
networkofferingid=cls.shared_network_offering_id,
zoneid=cls.zone.id,
domainid=cls.domain_11.id,
subdomainaccess=True
)
cls.shared_network_account_d111a = Network.create(
cls.api_client,
cls.nuagenetworkdata["network_all"],
networkofferingid=cls.shared_network_offering_id,
zoneid=cls.zone.id,
domainid=cls.domain_11.id,
accountid=cls.account_d11a.user[0].username
)
cls._cleanup = [
cls.account_root,
cls.account_roota,
cls.shared_network_all,
cls.shared_network_offering,
cls.service_offering,
]
user_data = ''.join(random.choice(
string.ascii_uppercase + string.digits) for x in range(2500))
cls.test_data["virtual_machine"]["userdata"] = user_data
except Exception as e:
cls.domain_1.delete(cls.api_client, cleanup="true")
cleanup_resources(cls.api_client, cls._cleanup)
raise Exception("Failed to create the setup required to execute "
"the test cases: %s" % e)
return
@classmethod
def tearDownClass(cls):
cls.api_client.connection.apiKey = cls.default_apikey
cls.api_client.connection.securityKey = cls.default_secretkey
cleanup_resources(cls.api_client, cls._cleanup)
return
def setUp(self):
self.api_client = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
# restore back default apikey and secretkey
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.debug("Cleaning up the resources")
for obj in reversed(self.cleanup):
try:
if isinstance(obj, VirtualMachine):
obj.delete(self.api_client, expunge=True)
else:
obj.delete(self.api_client)
except Exception as e:
self.error("Failed to cleanup %s, got %s" % (obj, e))
# cleanup_resources(self.api_client, self.cleanup)
self.cleanup = []
self.debug("Cleanup complete!")
self.updateTemplate(False)
return
def add_subnet_verify(self, network, services):
"""verify required nic is present in the VM"""
self.debug("Going to add new ip range in shared network %s" %
network.name)
cmd = createVlanIpRange.createVlanIpRangeCmd()
cmd.networkid = network.id
cmd.gateway = services["gateway"]
cmd.netmask = services["netmask"]
cmd.startip = services["startip"]
cmd.endip = services["endip"]
cmd.forVirtualNetwork = services["forvirtualnetwork"]
addedsubnet = self.api_client.createVlanIpRange(cmd)
self.debug("verify above iprange is successfully added in shared "
"network %s or not" % network.name)
cmd1 = listVlanIpRanges.listVlanIpRangesCmd()
cmd1.networkid = network.id
cmd1.id = addedsubnet.vlan.id
allsubnets = self.api_client.listVlanIpRanges(cmd1)
self.assertEqual(
allsubnets[0].id,
addedsubnet.vlan.id,
"Check New subnet is successfully added to the shared Network"
)
return addedsubnet
def delete_subnet_verify(self, network, subnet):
"""verify required nic is present in the VM"""
self.debug("Going to delete ip range in shared network %s" %
network.name)
cmd = deleteVlanIpRange.deleteVlanIpRangeCmd()
cmd.id = subnet.vlan.id
self.api_client.deleteVlanIpRange(cmd)
self.debug("verify above iprange is successfully deleted from shared "
"network %s or not" % network.name)
cmd1 = listVlanIpRanges.listVlanIpRangesCmd()
cmd1.networkid = network.id
cmd1.id = subnet.vlan.id
try:
allsubnets = self.api_client.listVlanIpRanges(cmd1)
self.assertEqual(
allsubnets[0].id,
subnet.vlan.id,
"Check Subnet is not present to the shared Network"
)
self.fail("iprange is not successfully deleted from shared "
"network %s" % network.name)
except Exception as e:
self.debug("iprange is successfully deleted from shared "
"network %s" % network.name)
self.debug("exception msg is %s" % e)
def shared_subnet_not_present(self, network, subnetid):
shared_resources = self.vsd.get_shared_network_resource(
filter=self.get_externalID_filter(subnetid))
try:
self.assertEqual(shared_resources.description, network.name,
"VSD shared resources description should match "
"network name in CloudStack"
)
self.fail("still shared resource are present on VSD")
except Exception as e:
self.debug("sharedNetwork resources is successfully deleted from "
"VSD")
self.debug("exception msg is %s" % e)
# updateTemplate - Updates value of the guest VM template's password
# enabled setting
def updateTemplate(self, value):
self.debug("Updating value of guest VM template's password enabled "
"setting")
cmd = updateTemplate.updateTemplateCmd()
cmd.id = self.template.id
cmd.passwordenabled = value
self.api_client.updateTemplate(cmd)
list_template_response = list_templates(self.api_client,
templatefilter="all",
id=self.template.id)
self.template = list_template_response[0]
self.debug("Updated guest VM template")
# Test cases relating to VR IP check on Shared Network
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_01_verify_deployvm_fail_startip_sharednetwork_scope_all(self):
"""Validate that deploy vm fails if user specify the first ip of subnet
because that is reserved for VR shared network with scope=all
"""
# Add vm as start ip of subnet
self.debug("Adding VM as start IP of Subnet")
self.test_data["virtual_machine"]["ipaddress"] = \
self.nuagenetworkdata["network_all"]["startip"]
try:
self.create_VM(self.shared_network_all, account=self.account_d11a)
self.fail("VM with subnet start IP is deployed successfully")
except Exception as e:
self.debug("Deploy vm fails as expected with exception %s" % e)
self.debug("Going to verify the exception message")
exceptionmsg = "Unable to start a VM due to insufficient capacity"
if exceptionmsg in str(e):
self.debug("correct exception is raised")
else:
self.fail("correct exception is not raised")
# Test cases relating to add/delete Shared Network IP ranges
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_02_add_delete_Subnet_restart_public_sharednetwork_scope_all(self):
"""Validate that subnet of same gateway can be added to shared network
with scope=all and restart network with clean up works
"""
self.debug("Deploy VM to shared Network scope as all")
self.test_data["virtual_machine"]["ipaddress"] = \
self.nuagenetworkdata["network_all"]["endip"]
vm_1 = self.create_VM(
self.shared_network_all, account=self.account_d11a)
# Verify shared Network and VM in VSD
self.verify_vsd_shared_network(
self.account_d11a.domainid, self.shared_network_all,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_all.id,
self.nuagenetworkdata["network_all"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d11a.domainid, self.shared_network_all, vm_1,
sharedsubnetid=subnet_id)
# verify VR
vr = self.get_Router(self.shared_network_all)
self.check_Router_state(vr, state="Running")
self.verify_vsd_router(vr)
# Add subnet with same cidr
self.debug("Adding subnet of same cidr to shared Network scope as all")
subnet1 = self.add_subnet_verify(
self.shared_network_all, self.nuagenetworkdata["publiciprange3"])
self.test_data["virtual_machine"]["ipaddress"] = \
self.nuagenetworkdata["publiciprange3"]["startip"]
vm_2 = self.create_VM(
self.shared_network_all, account=self.account_d11a)
# verify on VSD
self.verify_vsd_shared_network(
self.account_d11a.domainid, self.shared_network_all,
gateway=self.nuagenetworkdata["publiciprange3"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_all.id,
self.nuagenetworkdata["publiciprange3"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d11a.domainid, self.shared_network_all, vm_2,
sharedsubnetid=subnet_id)
# Restart network with cleanup
self.debug("Restarting shared Network with cleanup")
self.shared_network_all.restart(self.api_client, cleanup=True)
self.debug("validating SharedNetwork on VSD")
self.verify_vsd_shared_network(
self.account_d11a.domainid, self.shared_network_all,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
# verify VR
vr = self.get_Router(self.shared_network_all)
self.check_Router_state(vr, state="Running")
self.verify_vsd_router(vr)
# put ping here
self.delete_VM(vm_1)
self.delete_VM(vm_2)
self.delete_subnet_verify(self.shared_network_all, subnet1)
# Test cases relating to add/delete Shared Network IP ranges
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_03_add_delete_Subnet_restart_sharednetwork_scope_domain(self):
"""Validate that subnet of same gateway can be added to shared network
with scope=all and restart network with clean up works
"""
self.debug("Deploy VM to shared Network scope domain as all")
self.test_data["virtual_machine"]["ipaddress"] = \
self.nuagenetworkdata["network_all"]["endip"]
vm_1 = self.create_VM(
self.shared_network_domain_with_subdomain_d11,
account=self.account_d11a)
# Verify shared Network and VM in VSD
self.verify_vsd_shared_network(
self.account_d11a.domainid,
self.shared_network_domain_with_subdomain_d11,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_domain_with_subdomain_d11.id,
self.nuagenetworkdata["network_all"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d11a.domainid,
self.shared_network_domain_with_subdomain_d11, vm_1,
sharedsubnetid=subnet_id)
# verify VR
vr = self.get_Router(self.shared_network_domain_with_subdomain_d11)
self.check_Router_state(vr, state="Running")
self.verify_vsd_router(vr)
# Add subnet with same cidr
self.debug("Adding subnet of same cidr to shared Network scope as all")
subnet1 = self.add_subnet_verify(
self.shared_network_domain_with_subdomain_d11,
self.nuagenetworkdata["publiciprange3"])
self.test_data["virtual_machine"]["ipaddress"] = \
self.nuagenetworkdata["publiciprange3"]["startip"]
vm_2 = self.create_VM(
self.shared_network_domain_with_subdomain_d11,
account=self.account_d11a)
# VSD check points
self.verify_vsd_shared_network(
self.account_d11a.domainid,
self.shared_network_domain_with_subdomain_d11,
gateway=self.nuagenetworkdata["publiciprange3"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_domain_with_subdomain_d11.id,
self.nuagenetworkdata["publiciprange3"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d11a.domainid,
self.shared_network_domain_with_subdomain_d11, vm_2,
sharedsubnetid=subnet_id)
# Restart network with cleanup
self.debug("Restarting shared Network with cleanup")
self.shared_network_domain_with_subdomain_d11.restart(self.api_client,
cleanup=True)
self.debug("validating SharedNetwork on VSD")
self.verify_vsd_shared_network(
self.account_d11a.domainid,
self.shared_network_domain_with_subdomain_d11,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
# verify VR
vr = self.get_Router(self.shared_network_domain_with_subdomain_d11)
self.check_Router_state(vr, state="Running")
self.verify_vsd_router(vr)
# put ping here
self.delete_VM(vm_1)
self.delete_VM(vm_2)
self.delete_subnet_verify(
self.shared_network_domain_with_subdomain_d11, subnet1)
# Test cases relating to add/delete Shared Network IP ranges
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_04_add_delete_Subnet_restart_scope_domain_nosubdomain(self):
"""Validate that subnet of same gateway can be added to shared network
with scope domain nosubdomain and restart network with clean up works
"""
self.debug("Deploy VM to shared Network scope domain no subdomain")
self.test_data["virtual_machine"]["ipaddress"] = \
self.nuagenetworkdata["network_all"]["endip"]
vm_1 = self.create_VM(
self.shared_network_domain_d11, account=self.account_d11a)
# Verify shared Network and VM in VSD
self.verify_vsd_shared_network(
self.account_d11a.domainid, self.shared_network_domain_d11,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_domain_d11.id,
self.nuagenetworkdata["network_all"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d11a.domainid, self.shared_network_domain_d11, vm_1,
sharedsubnetid=subnet_id)
# verify VR
vr = self.get_Router(self.shared_network_domain_d11)
self.check_Router_state(vr, state="Running")
self.verify_vsd_router(vr)
# Add subnet with same cidr
self.debug("Adding subnet of same cidr to shared Network scope as all")
subnet1 = self.add_subnet_verify(
self.shared_network_domain_d11,
self.nuagenetworkdata["publiciprange3"])
self.test_data["virtual_machine"]["ipaddress"] = \
self.nuagenetworkdata["publiciprange3"]["startip"]
vm_2 = self.create_VM(
self.shared_network_domain_d11, account=self.account_d11a)
self.verify_vsd_shared_network(
self.account_d11a.domainid, self.shared_network_domain_d11,
gateway=self.nuagenetworkdata["publiciprange3"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_domain_d11.id,
self.nuagenetworkdata["publiciprange3"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d11a.domainid, self.shared_network_domain_d11, vm_2,
sharedsubnetid=subnet_id)
# Restart network with cleanup
self.debug("Restarting shared Network with cleanup")
self.shared_network_domain_d11.restart(self.api_client, cleanup=True)
self.debug("validating SharedNetwork on VSD")
self.verify_vsd_shared_network(
self.account_d11a.domainid, self.shared_network_domain_d11,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
# verify VR
vr = self.get_Router(self.shared_network_domain_d11)
self.check_Router_state(vr, state="Running")
self.verify_vsd_router(vr)
# put ping here
self.delete_VM(vm_1)
self.delete_VM(vm_2)
self.delete_subnet_verify(self.shared_network_domain_d11, subnet1)
# Test cases relating to add/delete Shared Network IP ranges
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_05_add_delete_Subnet_restart_scope_account(self):
"""Validate that subnet of same gateway can be added to shared network
with scope as account and restart network with clean up works
"""
self.debug("Deploy VM to shared Network scope as account")
self.test_data["virtual_machine"]["ipaddress"] = \
self.nuagenetworkdata["network_all"]["endip"]
vm_1 = self.create_VM(
self.shared_network_account_d111a, account=self.account_d11a)
# Verify shared Network and VM in VSD
self.verify_vsd_shared_network(
self.account_d11a.domainid, self.shared_network_account_d111a,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_account_d111a.id,
self.nuagenetworkdata["network_all"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d11a.domainid, self.shared_network_account_d111a,
vm_1, sharedsubnetid=subnet_id)
# verify VR
vr = self.get_Router(self.shared_network_account_d111a)
self.check_Router_state(vr, state="Running")
self.verify_vsd_router(vr)
# Add subnet with same cidr
self.debug("Add subnet of same cidr shared Network scope as account")
subnet1 = self.add_subnet_verify(
self.shared_network_account_d111a,
self.nuagenetworkdata["publiciprange3"])
self.test_data["virtual_machine"]["ipaddress"] = \
self.nuagenetworkdata["publiciprange3"]["startip"]
vm_2 = self.create_VM(
self.shared_network_account_d111a, account=self.account_d11a)
self.verify_vsd_shared_network(
self.account_d11a.domainid, self.shared_network_account_d111a,
gateway=self.nuagenetworkdata["publiciprange3"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_account_d111a.id,
self.nuagenetworkdata["publiciprange3"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d11a.domainid, self.shared_network_account_d111a,
vm_2, sharedsubnetid=subnet_id)
# Restart network with cleanup
self.debug("Restarting shared Network with cleanup")
self.shared_network_account_d111a.restart(self.api_client,
cleanup=True)
self.debug("validating SharedNetwork on VSD")
self.verify_vsd_shared_network(
self.account_d11a.domainid, self.shared_network_account_d111a,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
# verify VR
vr = self.get_Router(self.shared_network_account_d111a)
self.check_Router_state(vr, state="Running")
self.verify_vsd_router(vr)
# put ping here
self.delete_VM(vm_1)
self.delete_VM(vm_2)
self.delete_subnet_verify(self.shared_network_account_d111a, subnet1)
# Test cases relating to VR IP check on Shared Network
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_06_verify_different_gateway_subnet_fails_sharednetwork_all(self):
"""Validate that Different gateway subnet fail as it is not supported
for userdata service shared network with scope=all
"""
# Add subnet of different gateway
self.debug("Adding subnet of different gateway")
try:
subnet2 = self.add_subnet_verify(
self.shared_network_all,
self.nuagenetworkdata["publiciprange2"])
self.test_data["virtual_machine"]["ipaddress"] = \
self.nuagenetworkdata["network_all"]["endip"]
vm_1 = self.create_VM(
self.shared_network_all, account=self.account_d11a)
self.delete_VM(vm_1)
self.delete_subnet_verify(self.shared_network_all, subnet2)
self.fail("VM is successfully added which is not expected")
except Exception as e:
self.debug("different gateway subnet "
"fails as expected with exception %s" % e)
self.debug("Going to verify the exception message")
self.delete_subnet_verify(self.shared_network_all, subnet2)
exceptionmsg = "Unable to start VM instance"
if exceptionmsg in str(e):
self.debug("correct exception is raised")
else:
self.fail("correct exception is not raised")
# Test cases relating to different gateway subnet check on Shared Network
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_07_different_gateway_subnet_fails_sharednetwork_domain(self):
"""Validate that Different gateway subnet fail as it is not supported
for userdata service shared network with scope domain
"""
# Add subnet of different gateway
self.debug("Adding subnet of different gateway")
try:
subnet2 = self.add_subnet_verify(
self.shared_network_domain_with_subdomain_d11,
self.nuagenetworkdata["publiciprange2"])
self.test_data["virtual_machine"]["ipaddress"] = \
self.nuagenetworkdata["network_all"]["endip"]
vm_1 = self.create_VM(
self.shared_network_domain_with_subdomain_d11,
account=self.account_d11a)
self.delete_VM(vm_1)
self.delete_subnet_verify(
self.shared_network_domain_with_subdomain_d11, subnet2)
self.fail("VM is successfully added which is not expected")
except Exception as e:
self.debug("different gateway subnet "
"fails as expected with exception %s" % e)
self.debug("Going to verify the exception message")
self.delete_subnet_verify(
self.shared_network_domain_with_subdomain_d11, subnet2)
exceptionmsg = "Unable to start VM instance"
if exceptionmsg in str(e):
self.debug("correct exception is raised")
else:
self.fail("correct exception is not raised")
# Test cases relating to different gateway subnet check on Shared Network
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_08_different_gateway_subnet_fails_sharednetwork_nosubdomain(self):
"""Validate that Different gateway subnet fail as it is not supported
for userdata service shared network with scope nosubdomain
"""
# Add subnet of different gateway
self.debug("Adding subnet of different gateway")
try:
subnet2 = self.add_subnet_verify(
self.shared_network_domain_d11,
self.nuagenetworkdata["publiciprange2"])
self.test_data["virtual_machine"]["ipaddress"] = \
self.nuagenetworkdata["network_all"]["endip"]
vm_1 = self.create_VM(
self.shared_network_domain_d11, account=self.account_d11a)
self.delete_VM(vm_1)
self.delete_subnet_verify(
self.shared_network_domain_d11, subnet2)
self.fail("VM is successfully added which is not expected")
except Exception as e:
self.debug("different gateway subnet"
" fails as expected with exception %s" % e)
self.debug("Going to verify the exception message")
self.delete_subnet_verify(
self.shared_network_domain_d11, subnet2)
exceptionmsg = "Unable to start VM instance"
if exceptionmsg in str(e):
self.debug("correct exception is raised")
else:
self.fail("correct exception is not raised")
# Test cases relating to different gateway subnet check on Shared Network
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_09_different_gateway_subnet_fails_sharednetwork_account(self):
"""Validate that Different gateway subnet fail as it is not supported
for userdata service shared network with scope account
"""
# Add subnet of different gateway
self.debug("Adding subnet of different gateway")
try:
subnet2 = self.add_subnet_verify(
self.shared_network_account_d111a,
self.nuagenetworkdata["publiciprange2"])
self.test_data["virtual_machine"]["ipaddress"] = \
self.nuagenetworkdata["network_all"]["endip"]
vm_1 = self.create_VM(
self.shared_network_account_d111a, account=self.account_d11a)
self.delete_VM(vm_1)
self.delete_subnet_verify(
self.shared_network_account_d111a, subnet2)
self.fail("VM is successfully added which is not expected")
except Exception as e:
self.debug("different gateway subnet"
" fails as expected with exception %s" % e)
self.debug("Going to verify the exception message")
self.delete_subnet_verify(
self.shared_network_account_d111a, subnet2)
exceptionmsg = "Unable to start VM instance"
if exceptionmsg in str(e):
self.debug("correct exception is raised")
else:
self.fail("correct exception is not raised")
# Test cases relating to reset password in Shared Network
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_10_password_reset_public_sharednetwork_scope_all(self):
"""Validate that reset password works fine in shared network
with scope=all
"""
self.updateTemplate(True)
self.debug("Deploy VM to shared Network scope as all")
self.test_data["virtual_machine"]["ipaddress"] = None
vm_1 = self.create_VM(
self.shared_network_all, account=self.account_d11a)
# verify VR
vr = self.get_Router(self.shared_network_all)
self.check_Router_state(vr, state="Running")
self.verify_vsd_router(vr)
self.debug("Stopping VM: %s" % vm_1.name)
vm_1.stop(self.api_client)
self.debug("Resetting VM password for VM: %s" % vm_1.name)
password = vm_1.resetPassword(self.api_client)
self.debug("Password reset to: %s" % password)
vm_1.start(self.api_client)
# put login to vm here
self.delete_VM(vm_1)
# Test cases relating to reset password in Shared Network
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_11_password_reset_public_sharednetwork_scope_domain(self):
"""Validate that reset password works fine in shared network
with scope as domain with subdomain access
"""
self.updateTemplate(True)
self.debug("Deploy VM to shared Network scope as all")
self.test_data["virtual_machine"]["ipaddress"] = None
vm_1 = self.create_VM(
self.shared_network_domain_with_subdomain_d11,
account=self.account_d11a)
# verify VR
vr = self.get_Router(self.shared_network_domain_with_subdomain_d11)
self.check_Router_state(vr, state="Running")
self.verify_vsd_router(vr)
self.debug("Stopping VM: %s" % vm_1.name)
vm_1.stop(self.api_client)
self.debug("Resetting VM password for VM: %s" % vm_1.name)
password = vm_1.resetPassword(self.api_client)
self.debug("Password reset to: %s" % password)
vm_1.start(self.api_client)
# put login to vm here
self.delete_VM(vm_1)
# Test cases relating to reset password in Shared Network
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_12_password_reset_public_sharednetwork_scope_account(self):
"""Validate that reset password works fine in shared network
with scope as Account
"""
self.updateTemplate(True)
self.debug("Deploy VM to shared Network scope as all")
self.test_data["virtual_machine"]["ipaddress"] = None
vm_1 = self.create_VM(
self.shared_network_account_d111a, account=self.account_d11a)
# verify VR
vr = self.get_Router(self.shared_network_account_d111a)
self.check_Router_state(vr, state="Running")
self.verify_vsd_router(vr)
self.debug("Stopping VM: %s" % vm_1.name)
vm_1.stop(self.api_client)
self.debug("Resetting VM password for VM: %s" % vm_1.name)
password = vm_1.resetPassword(self.api_client)
self.debug("Password reset to: %s" % password)
vm_1.start(self.api_client)
# put login to vm here
self.delete_VM(vm_1)
def test_13_public_sharednetwork_domain_cleanup(self):
"""Validate that sharedNetwork Parent domain is cleaned up properly
"""
try:
vm_1 = self.create_VM(
self.shared_network_domain_with_subdomain_d11,
account=self.account_d11a)
self.verify_vsd_shared_network(
self.account_d11a.domainid,
self.shared_network_domain_with_subdomain_d11,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
subnet_id_subdomain = self.get_subnet_id(
self.shared_network_domain_with_subdomain_d11.id,
self.nuagenetworkdata["network_all"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d11a.domainid,
self.shared_network_domain_with_subdomain_d11,
vm_1, sharedsubnetid=subnet_id_subdomain)
subnet_id_subdomain1 = self.get_subnet_id(
self.shared_network_domain_with_subdomain_d11.id,
self.nuagenetworkdata["publiciprange2"]["gateway"])
self.domain_1.delete(self.api_client, cleanup="true")
except Exception as e:
self.debug("test case Fail")
self.debug("exception msg is %s" % e)
self.domain_1.delete(self.api_client, cleanup="true")
self.fail("Fail to delete the Parent domain")
self.shared_subnet_not_present(
self.shared_network_domain_with_subdomain_d11,
subnet_id_subdomain)
self.shared_subnet_not_present(
self.shared_network_domain_with_subdomain_d11,
subnet_id_subdomain1)
@staticmethod
def generateKeysForUser(api_client, account):
user = User.list(
api_client,
account=account.name,
domainid=account.domainid)[0]
return (User.registerUserKeys(
api_client,
user.id))
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Base handler class for all mapreduce handlers."""
import httplib
import logging
import google
import simplejson
try:
from google.appengine.ext.mapreduce import pipeline_base
except ImportError:
pipeline_base = None
try:
from google.appengine._internal import cloudstorage
if hasattr(cloudstorage, "_STUB"):
cloudstorage = None
except ImportError:
cloudstorage = None
from google.appengine.api.namespace_manager import namespace_manager
from google.appengine.ext import webapp
from google.appengine.ext.mapreduce import errors
from google.appengine.ext.mapreduce import json_util
from google.appengine.ext.mapreduce import model
from google.appengine.ext.mapreduce import parameters
class Error(Exception):
"""Base-class for exceptions in this module."""
class BadRequestPathError(Error):
"""The request path for the handler is invalid."""
class TaskQueueHandler(webapp.RequestHandler):
"""Base class for handlers intended to be run only from the task queue.
Sub-classes should implement
1. the 'handle' method for all POST request.
2. '_preprocess' method for decoding or validations before handle.
3. '_drop_gracefully' method if task has failed too many times and has to
be dropped.
In Python27 runtime, webapp2 will automatically replace webapp.
"""
_DEFAULT_USER_AGENT = "AppEngine-Python-MR"
def __init__(self, *args, **kwargs):
self._preprocess_success = False
super(TaskQueueHandler, self).__init__(*args, **kwargs)
if cloudstorage:
cloudstorage.set_default_retry_params(
cloudstorage.RetryParams(
min_retries=5,
max_retries=10,
urlfetch_timeout=parameters._GCS_URLFETCH_TIMEOUT_SEC,
save_access_token=parameters.config.PERSIST_GCS_ACCESS_TOKEN,
memcache_access_token=parameters.config.PERSIST_GCS_ACCESS_TOKEN,
_user_agent=self._DEFAULT_USER_AGENT))
def initialize(self, request, response):
"""Initialize.
1. call webapp init.
2. check request is indeed from taskqueue.
3. check the task has not been retried too many times.
4. run handler specific processing logic.
5. run error handling logic if precessing failed.
Args:
request: a webapp.Request instance.
response: a webapp.Response instance.
"""
super(TaskQueueHandler, self).initialize(request, response)
if "X-AppEngine-QueueName" not in self.request.headers:
logging.error(self.request.headers)
logging.error("Task queue handler received non-task queue request")
self.response.set_status(
403, message="Task queue handler received non-task queue request")
return
if self.task_retry_count() + 1 > parameters.config.TASK_MAX_ATTEMPTS:
logging.error(
"Task %s has been attempted %s times. Dropping it permanently.",
self.request.headers["X-AppEngine-TaskName"],
self.task_retry_count() + 1)
self._drop_gracefully()
return
self._preprocess()
self._preprocess_success = True
def post(self):
if self._preprocess_success:
self.handle()
def handle(self):
"""To be implemented by subclasses."""
raise NotImplementedError()
def _preprocess(self):
"""Preprocess.
This method is called after webapp initialization code has been run
successfully. It can thus access self.request, self.response and so on.
Failures will be retried by taskqueue.
"""
pass
def _drop_gracefully(self):
"""Drop task gracefully.
When task failed too many time, this method is called before it's dropped.
"""
pass
def task_retry_count(self):
"""Number of times this task has been retried."""
return int(self.request.headers.get("X-AppEngine-TaskExecutionCount", 0))
def retry_task(self):
"""Ask taskqueue to retry this task.
Even though raising an exception can cause a task retry, it
will flood logs with highly visible ERROR logs. Handlers should uses
this method to perform controlled task retries. Only raise exceptions
for those deserve ERROR log entries.
"""
self.response.set_status(httplib.SERVICE_UNAVAILABLE, "Retry task")
self.response.clear()
class JsonHandler(webapp.RequestHandler):
"""Base class for JSON handlers for user interface.
Sub-classes should implement the 'handle' method. They should put their
response data in the 'self.json_response' dictionary. Any exceptions raised
by the sub-class implementation will be sent in a JSON response with the
name of the error_class and the error_message.
"""
def __init__(self, *args):
"""Initializer."""
super(JsonHandler, self).__init__(*args)
self.json_response = {}
def base_path(self):
"""Base path for all mapreduce-related urls.
JSON handlers are mapped to /base_path/command/command_name thus they
require special treatment.
Raises:
BadRequestPathError: if the path does not end with "/command".
Returns:
The base path.
"""
path = self.request.path
base_path = path[:path.rfind("/")]
if not base_path.endswith("/command"):
raise BadRequestPathError(
"Json handlers should have /command path prefix")
return base_path[:base_path.rfind("/")]
def _handle_wrapper(self):
"""The helper method for handling JSON Post and Get requests."""
if self.request.headers.get("X-Requested-With") != "XMLHttpRequest":
logging.error("Got JSON request with no X-Requested-With header")
self.response.set_status(
403, message="Got JSON request with no X-Requested-With header")
return
self.json_response.clear()
try:
self.handle()
except errors.MissingYamlError:
logging.debug("Could not find 'mapreduce.yaml' file.")
self.json_response.clear()
self.json_response["error_class"] = "Notice"
self.json_response["error_message"] = "Could not find 'mapreduce.yaml'"
except Exception, e:
logging.exception("Error in JsonHandler, returning exception.")
self.json_response.clear()
self.json_response["error_class"] = e.__class__.__name__
self.json_response["error_message"] = str(e)
self.response.headers["Content-Type"] = "text/javascript"
try:
output = simplejson.dumps(self.json_response, cls=json_util.JsonEncoder)
except Exception, e:
logging.exception("Could not serialize to JSON")
self.response.set_status(500, message="Could not serialize to JSON")
return
else:
self.response.out.write(output)
def handle(self):
"""To be implemented by sub-classes."""
raise NotImplementedError()
class PostJsonHandler(JsonHandler):
"""JSON handler that accepts POST requests."""
def post(self):
self._handle_wrapper()
class GetJsonHandler(JsonHandler):
"""JSON handler that accepts GET posts."""
def get(self):
namespace_manager.set_namespace(
self.request.get("namespace", default_value=None))
self._handle_wrapper()
class HugeTaskHandler(TaskQueueHandler):
"""Base handler for processing HugeTasks."""
class _RequestWrapper(object):
"""Container of a request and associated parameters."""
def __init__(self, request):
self._request = request
self._params = model.HugeTask.decode_payload(request)
def get(self, name, default=""):
return self._params.get(name, default)
def set(self, name, value):
self._params[name] = value
def __getattr__(self, name):
return getattr(self._request, name)
def __init__(self, *args, **kwargs):
super(HugeTaskHandler, self).__init__(*args, **kwargs)
def _preprocess(self):
self.request = self._RequestWrapper(self.request)
if pipeline_base:
PipelineBase = pipeline_base.PipelineBase
else:
PipelineBase = None
|
|
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# From http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
|
from django.db import models
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from datetime import datetime, date, timedelta
from pendulum import utils
try:
"""
if the app is installed from the start of a site, the sites table does
not exist and it causes problems. This exception block seems to fix
the problem.
"""
CURRENT_SITE = Site.objects.get_current()
except:
CURRENT_SITE = Site.objects.all()
class PendulumConfiguration(models.Model):
"""
This will hold a single record that maintains the configuration of the
application. In the admin interface, if the configuration is marked as
"Is Monthly", that will take precedence over the "Install date" option (even
if the install_date and period_length fields have values). If you wish to
use the fixed-length (install_date + period_length) setup, uncheck the
is_monthly field.
"""
# tie the configuration to one site in particular
site = models.OneToOneField(Site, help_text="""Please choose the site that these settings will apply to.""")
"""
this represents whether the application will look for all entries in a
month-long period
"""
is_monthly = models.BooleanField(default=True, help_text="""If you check this box, you will be forced to use the monthly mode. Uncheck it to use fixed-length period""")
"""
this is used in conjunction with the monthly setup; end date is assumed
to be month_start - 1. For example, if the periods begin on the 16th of
each month, the end date would be assumed to be the 15th of each month
"""
month_start = models.PositiveIntegerField(default=1, blank=True, null=True,
help_text="""Enter 1 for periods that begin on the 1st day of each month and end on the last day of each month. Alternatively, enter any other number (between 2 and 31) for the day of the month that periods start. For example, enter 16 for periods that begin on the 16th of each month and end on the 15th of the following month.""")
"""
install_date represents the date the software was installed and began
being used. period_length represents the number of days in a period. Week-
long periods would have a period_length of 7. Two week-long periods would
be 14 days. You get the idea. These should be able to handle _most_
situations (maybe not all).
"""
install_date = models.DateField(blank=True, null=True, help_text="""The date that Pendulum was installed. Does not necessarily have to be the date, just a date to be used as a reference point for adding the number of days from period length below. For example, if you have periods with a fixed length of 2 weeks, enter 14 days for period length and choose any Sunday to be the install date.""")
period_length = models.PositiveIntegerField(blank=True, null=True, help_text="""The number of days in the fixed-length period. For example, enter 7 days for 1-week periods or 28 for 4-week long periods.""")
def __unicode__(self):
return u'Pendulum Configuration for %s' % self.site
def __current_mode(self):
if self.is_monthly:
return u'Month-long'
else:
return u'Fixed-length'
current_mode = property(__current_mode)
class ProjectManager(models.Manager):
"""
Return all active projects.
"""
def get_query_set(self):
return super(ProjectManager, self).get_query_set().filter(sites__exact=CURRENT_SITE)
def active(self):
return self.get_query_set().filter(is_active=True)
class Project(models.Model):
"""
This class will keep track of different projects that one may clock into
"""
name = models.CharField(max_length=100, unique=True,
help_text="""Please enter a name for this project.""")
description = models.TextField(blank=True, null=True,
help_text="""If necessary, enter something to describe the project.""")
is_active = models.BooleanField(default=True,
help_text="""Should this project be available for users to clock into?""")
sites = models.ManyToManyField(Site, related_name='pendulum_projects',
help_text="""Choose the site(s) that will display this project.""")
date_added = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
objects = ProjectManager()
def __unicode__(self):
"""
The string representation of an instance of this class
"""
return self.name
def log_count(self):
"""
Determine the number of entries associated with this project
"""
return self.entries.all().count()
def __total_hours(self):
"""
Determine the number of hours spent working on each project
"""
times = [e.total_hours for e in self.entries.all()]
return '%.02f' % sum(times)
total_hours = property(__total_hours)
class Meta:
ordering = ['name', 'date_added']
class ActivityManager(models.Manager):
"""
Return all active activities.
"""
def get_query_set(self):
return super(ActivityManager, self).get_query_set().filter(sites__exact=CURRENT_SITE)
class Activity(models.Model):
"""
Represents different types of activity: debugging, developing,
brainstorming, QA, etc...
"""
code = models.CharField(max_length=5, unique=True,
help_text="""Enter a short code to describe the type of activity that took place.""")
name = models.CharField(max_length=50,
help_text="""Now enter a more meaningful name for the activity.""")
sites = models.ManyToManyField(Site, related_name='pendulum_activities',
help_text="""Choose the site(s) that will display this activity.""")
objects = ActivityManager()
def __unicode__(self):
"""
The string representation of an instance of this class
"""
return self.name
def __log_count(self):
"""
Determine the number of entries associated with this activity
"""
return self.entries.all().count()
log_count = property(__log_count)
def __total_hours(self):
"""
Determine the number of hours spent doing each type of activity
"""
times = [e.total_hours for e in self.entries.all()]
return '%.02f' % sum(times)
total_hours = property(__total_hours)
class Meta:
ordering = ['name']
verbose_name_plural = 'activities'
class EntryManager(models.Manager):
#def get_query_set(self):
# return super(EntryManager, self).get_query_set().filter(site__exact=CURRENT_SITE)
def current(self, user=None):
"""
This will pull back any log entries for the current period.
"""
try:
set = self.in_period(utils.determine_period())
except PendulumConfiguration.DoesNotExist:
raise Exception, "Please configure Pendulum!"
else:
if user:
return set.filter(user=user)
return set
def previous(self, delta, user=None):
set = self.in_period(utils.determine_period(delta=delta))
if user:
return set.filter(user=user)
return set
def in_period(self, period, user=None):
if not isinstance(period, tuple) or len(period) != 2:
raise Exception('Invalid period specified')
set = self.get_query_set().filter(start_time__range=period)
if user:
return set.filter(user=user)
return set
class Entry(models.Model):
"""
This class is where all of the time logs are taken care of
"""
user = models.ForeignKey(User, related_name='pendulum_entries')
project = models.ForeignKey(Project,
limit_choices_to={'is_active': True,
'sites': CURRENT_SITE},
related_name='entries')
activity = models.ForeignKey(Activity, blank=True, null=True, related_name='entries')
start_time = models.DateTimeField()
end_time = models.DateTimeField(blank=True, null=True)
seconds_paused = models.PositiveIntegerField(default=0)
pause_time = models.DateTimeField(blank=True, null=True)
comments = models.TextField(blank=True, null=True)
date_updated = models.DateTimeField(auto_now=True)
site = models.ForeignKey(Site, related_name='pendulum_entries')
objects = EntryManager()
def get_seconds(self):
"""
Determines the difference between the starting and ending time. The
result is returned as an integer of seconds.
"""
if self.start_time and self.end_time:
# only calculate when the start and end are defined
delta = self.end_time - self.start_time
seconds = delta.seconds - self.seconds_paused
else:
seconds = 0
delta = timedelta(days=0)
return seconds + (delta.days * 86400)
def __total_hours(self):
"""
Determined the total number of hours worked in this entry
"""
return self.get_seconds() / 3600.0
total_hours = property(__total_hours)
def __total_time(self):
"""
Determines the amount of time spent and return it as a string formatted
as HH:MM:SS
"""
return utils.get_total_time(self.get_seconds())
total_time = property(__total_time)
def __paused_time(self):
"""
Returns the total time paused for this entry in HH:MM:SS format
"""
return utils.get_total_time(self.seconds_paused)
paused_time = property(__paused_time)
def __hours(self):
"""
Print the hours in a nice, rounded format
"""
return "%.02f" % self.total_hours
hours = property(__hours)
def __is_paused(self):
"""
Determine whether or not this entry is paused
"""
return self.pause_time != None
is_paused = property(__is_paused)
def pause(self):
"""
If this entry is not paused, pause it.
"""
if not self.is_paused:
self.pause_time = datetime.now()
def unpause(self):
"""
If this entry is paused, unpause it
"""
if self.is_paused:
delta = datetime.now() - self.pause_time
self.seconds_paused += delta.seconds
self.pause_time = None
def toggle_paused(self):
"""
Toggle the paused state of this entry. If the entry is already paused,
it will be unpaused; if it is not paused, it will be paused.
"""
if self.is_paused:
self.unpause()
else:
self.pause()
def __is_closed(self):
"""
Determine whether this entry has been closed or not
"""
return self.end_time != None
is_closed = property(__is_closed)
def clock_in(self, user, project):
"""
Set this entry up for saving the first time, as an open entry.
"""
if not self.is_closed:
self.user = user
self.project = project
self.site = CURRENT_SITE
self.start_time = datetime.now()
def clock_out(self, activity, comments):
"""
Save some vital pieces of information about this entry upon closing
"""
if self.is_paused:
self.unpause()
if not self.is_closed:
self.end_time = datetime.now()
self.activity = activity
self.comments = comments
def __delete_key(self):
"""
Make it a little more interesting for deleting logs
"""
salt = '%i-%i-apple-%s-sauce' % (self.id, self.is_paused, self.is_closed)
try:
import hashlib
except ImportError:
import sha
key = sha.new(salt).hexdigest()
else:
key = hashlib.sha1(salt).hexdigest()
return key
delete_key = property(__delete_key)
def __unicode__(self):
"""
The string representation of an instance of this class
"""
return '%s on %s' % (self.user, self.project)
class Meta:
ordering = ['-start_time']
verbose_name_plural = 'entries'
permissions = (
('can_clock_in', 'Can use Pendulum to clock in'),
('can_pause', 'Can pause and unpause log entries'),
('can_clock_out', 'Can use Pendulum to clock out'),
)
# Add a utility method to the User class that will tell whether or not a
# particular user has any unclosed entries
User.clocked_in = property(lambda user: user.pendulum_entries.filter(end_time__isnull=True).count() > 0)
|
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A GUnicorn + Flask Debug Frontend for Transformer models."""
import json
from flask import Flask
from flask import jsonify
from flask import request
from flask import send_from_directory
from flask.json import JSONEncoder
from gunicorn.app.base import BaseApplication
from gunicorn.six import iteritems
import numpy as np
from tensor2tensor.insights import transformer_model
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("configuration", "",
"A JSON InsightConfiguration message that configures which "
"models to run in the insight frontend.")
flags.DEFINE_string("static_path", "",
"Path to static javascript and html files to serve.")
_NUMPY_INT_DTYPES = [
np.int8, np.int16, np.int32, np.int64
]
_NUMPY_FP_DTYPES = [
np.float16, np.float32, np.float64
]
class NumpySerializationFix(JSONEncoder):
"""json module cannot serialize numpy datatypes, reinterpret them first"""
def default(self, obj):
obj_type = type(obj)
if obj_type in _NUMPY_INT_DTYPES:
return int(obj)
if obj_type in _NUMPY_FP_DTYPES:
return float(obj)
return json.JSONEncoder.default(self, obj)
class DebugFrontendApplication(BaseApplication):
"""A local custom application for GUnicorns.
This custom application enables us to run with a custom main that parses
tensorflow ops and does some internal setup prior to processing queries. The
underlying app registered instances of this class will be forked.
"""
def __init__(self, app, options=None):
"""Creates the GUnicorn application.
Args:
app: A Flask application that will process requests.
options: A dict of GUnicorn options.
"""
self.options = options or {}
self.application = app
super(DebugFrontendApplication, self).__init__()
def load_config(self):
"""Loads the configuration."""
config = dict([(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
for key, value in iteritems(config):
self.cfg.set(key.lower(), value)
def load(self):
"""Loads the application.
Returns:
The Flask application.
"""
return self.application
def main(_):
# Create the models we support:
with open(FLAGS.configuration) as configuration_file:
configuration = json.load(configuration_file)
# Read in the set of query processors.
processors = {}
for processor_configuration in configuration["configuration"]:
key = (processor_configuration["source_language"],
processor_configuration["target_language"],
processor_configuration["label"])
processors[key] = transformer_model.TransformerModel(
processor_configuration)
# Read in the list of supported languages.
languages = {}
for language in configuration["language"]:
languages[language["code"]] = {
"code": language["code"],
"name": language["name"],
}
# Create flask to serve all paths starting with '/polymer' from the static
# path. This is to served non-vulcanized components.
app = Flask(
__name__.split(".")[0],
static_url_path="/polymer",
static_folder=FLAGS.static_path)
app.json_encoder = NumpySerializationFix
# Disable static file caching.
app.config["SEND_FILE_MAX_AGE_DEFAULT"] = 0
@app.route("/api/language_list/")
def language_list(): # pylint: disable=unused-variable
"""Responds to /api/language_list with the supported languages.
Returns:
JSON for the languages.
"""
return jsonify({
"language": list(languages.values())
})
@app.route("/api/list_models/")
def list_models(): # pylint: disable=unused-variable
"""Responds to /api/list_models with the supported modes.
Returns:
JSON for the supported models.
"""
configuration_list = []
for source_code, target_code, label in processors:
configuration_list.append({
"id": label,
"source_language": languages[source_code],
"target_language": languages[target_code],
})
return jsonify({
"configuration": configuration_list
})
@app.route("/debug", methods=["GET"])
def query(): # pylint: disable=unused-variable
"""Responds to /debug with processing results.
Returns:
JSON for the query's result.
"""
query = request.args.get("source")
source_language = request.args.get("sl")
target_language = request.args.get("tl")
model_name = request.args.get("id")
processor = processors[(source_language, target_language, model_name)]
return jsonify(processor.process(query))
# Catchall for all other paths. Any other path should get the basic index
# page, the polymer side will determine what view to show and what REST calls
# to make for data.
@app.route("/", defaults={"path": ""})
@app.route("/<path:path>")
def root(path): # pylint: disable=unused-variable
"""Responds to all other non-static paths with index.html.
Args:
path: Unused path.
Returns:
The landing page html text.
"""
if (path == "index.js" or
path == "webcomponentsjs/custom-elements-es5-adapter.js" or
path == "webcomponentsjs/webcomponents-lite.js"):
# Some vulcanizing methods bundle the javascript into a index.js file
# paired with index.html but leave two important webcomponents js files
# outside of the bundle. If requesting those special files, fetch them
# directly rather than from a /static sub-directory.
return send_from_directory(FLAGS.static_path, path)
# Everything else should redirect to the main landing page. Since we
# use a single page app, any initial url requests may include random
# paths (that don't start with /api or /static) which all should be
# served by the main landing page.
return send_from_directory(FLAGS.static_path, "index.html")
# Run the server.
tf.logging.info("############# READY ##################")
options = {
"bind": ":8010",
"timeout": 600,
"workers": 4,
"reload": True,
"spew": True,
"worker_class": "gevent",
}
DebugFrontendApplication(app, options).run()
if __name__ == "__main__":
tf.app.run()
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six import string_types
from six.moves import zip_longest
import re
from types import GeneratorType
from collections import Counter, defaultdict, Hashable
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import pandas as pd
from skbio import Sequence
class SequenceSubclass(Sequence):
"""Used for testing purposes."""
pass
class TestSequence(TestCase):
def setUp(self):
self.sequence_kinds = frozenset([
str, Sequence, lambda s: np.fromstring(s, dtype='|S1'),
lambda s: np.fromstring(s, dtype=np.uint8)])
def empty_generator():
raise StopIteration()
yield
self.getitem_empty_indices = [
[],
(),
{},
empty_generator(),
# ndarray of implicit float dtype
np.array([]),
np.array([], dtype=int)]
def test_init_default_parameters(self):
seq = Sequence('.ABC123xyz-')
npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c'))
self.assertEqual(seq.id, "")
self.assertEqual(seq.description, "")
self.assertIsNone(seq.quality)
def test_init_nondefault_parameters(self):
seq = Sequence('.ABC123xyz-', id='foo', description='bar baz',
quality=range(11))
npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c'))
self.assertEqual(seq.id, 'foo')
self.assertEqual(seq.description, 'bar baz')
npt.assert_equal(seq.quality, np.array(range(11), dtype='int'))
def test_init_empty_sequence(self):
# Test constructing an empty sequence using each supported input type.
for s in (b'', # bytes
u'', # unicode
np.array('', dtype='c'), # char vector
np.fromstring('', dtype=np.uint8), # byte vec
Sequence('')): # another Sequence object
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (0,))
npt.assert_equal(seq.values, np.array('', dtype='c'))
def test_init_single_character_sequence(self):
for s in (b'A',
u'A',
np.array('A', dtype='c'),
np.fromstring('A', dtype=np.uint8),
Sequence('A')):
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (1,))
npt.assert_equal(seq.values, np.array('A', dtype='c'))
def test_init_multiple_character_sequence(self):
for s in (b'.ABC\t123 xyz-',
u'.ABC\t123 xyz-',
np.array('.ABC\t123 xyz-', dtype='c'),
np.fromstring('.ABC\t123 xyz-', dtype=np.uint8),
Sequence('.ABC\t123 xyz-')):
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (14,))
npt.assert_equal(seq.values,
np.array('.ABC\t123 xyz-', dtype='c'))
def test_init_from_sequence_object(self):
# We're testing this in its simplest form in other tests. This test
# exercises more complicated cases of building a sequence from another
# sequence.
# just the sequence, no other metadata
seq = Sequence('ACGT')
self.assertEqual(Sequence(seq), seq)
# sequence with metadata should have everything propagated
seq = Sequence('ACGT', id='foo', description='bar baz',
quality=range(4))
self.assertEqual(Sequence(seq), seq)
# should be able to override metadata
self.assertEqual(
Sequence(seq, id='abc', description='123', quality=[42] * 4),
Sequence('ACGT', id='abc', description='123', quality=[42] * 4))
# subclasses work too
seq = SequenceSubclass('ACGT', id='foo', description='bar baz',
quality=range(4))
self.assertEqual(
Sequence(seq),
Sequence('ACGT', id='foo', description='bar baz',
quality=range(4)))
def test_init_from_contiguous_sequence_bytes_view(self):
bytes = np.array([65, 42, 66, 42, 65], dtype=np.uint8)
view = bytes[:3]
seq = Sequence(view)
# sequence should be what we'd expect
self.assertEqual(seq, Sequence('A*B'))
# we shouldn't own the memory because no copy should have been made
self.assertFalse(seq._owns_bytes)
# can't mutate view because it isn't writeable anymore
with self.assertRaises(ValueError):
view[1] = 100
# sequence shouldn't have changed
self.assertEqual(seq, Sequence('A*B'))
# mutate bytes (*not* the view)
bytes[0] = 99
# Sequence changed because we are only able to make the view read-only,
# not its source (bytes). This is somewhat inconsistent behavior that
# is (to the best of our knowledge) outside our control.
self.assertEqual(seq, Sequence('c*B'))
def test_init_from_noncontiguous_sequence_bytes_view(self):
bytes = np.array([65, 42, 66, 42, 65], dtype=np.uint8)
view = bytes[::2]
seq = Sequence(view)
# sequence should be what we'd expect
self.assertEqual(seq, Sequence('ABA'))
# we should own the memory because a copy should have been made
self.assertTrue(seq._owns_bytes)
# mutate bytes and its view
bytes[0] = 99
view[1] = 100
# sequence shouldn't have changed
self.assertEqual(seq, Sequence('ABA'))
def test_init_no_copy_of_sequence(self):
bytes = np.array([65, 66, 65], dtype=np.uint8)
seq = Sequence(bytes)
# should share the same memory
self.assertIs(seq._bytes, bytes)
# shouldn't be able to mutate the Sequence object's internals by
# mutating the shared memory
with self.assertRaises(ValueError):
bytes[1] = 42
def test_init_empty_id(self):
seq = Sequence('', id='')
self.assertIsInstance(seq.id, string_types)
self.assertEqual(seq.id, '')
def test_init_single_character_id(self):
seq = Sequence('', id='z')
self.assertIsInstance(seq.id, string_types)
self.assertEqual(seq.id, 'z')
def test_init_multiple_character_id(self):
seq = Sequence('', id='\nabc\tdef G123')
self.assertIsInstance(seq.id, string_types)
self.assertEqual(seq.id, '\nabc\tdef G123')
def test_init_empty_description(self):
seq = Sequence('', description='')
self.assertIsInstance(seq.description, string_types)
self.assertEqual(seq.description, '')
def test_init_single_character_description(self):
seq = Sequence('', description='z')
self.assertIsInstance(seq.description, string_types)
self.assertEqual(seq.description, 'z')
def test_init_multiple_character_description(self):
seq = Sequence('', description='\nabc\tdef G123')
self.assertIsInstance(seq.description, string_types)
self.assertEqual(seq.description, '\nabc\tdef G123')
def test_init_empty_quality(self):
for q in ([], (), np.array([])):
seq = Sequence('', quality=q)
self.assertIsInstance(seq.quality, np.ndarray)
self.assertEqual(seq.quality.dtype, np.int)
self.assertEqual(seq.quality.shape, (0,))
npt.assert_equal(seq.quality, np.array([]))
def test_init_single_quality_score(self):
for q in (2, [2], (2,), np.array([2])):
seq = Sequence('G', quality=q)
self.assertIsInstance(seq.quality, np.ndarray)
self.assertEqual(seq.quality.dtype, np.int)
self.assertEqual(seq.quality.shape, (1,))
npt.assert_equal(seq.quality, np.array([2]))
def test_init_multiple_quality_scores(self):
for q in ([0, 42, 42, 1, 0, 8, 100, 0, 0],
(0, 42, 42, 1, 0, 8, 100, 0, 0),
np.array([0, 42, 42, 1, 0, 8, 100, 0, 0])):
seq = Sequence('G' * 9, quality=q)
self.assertIsInstance(seq.quality, np.ndarray)
self.assertEqual(seq.quality.dtype, np.int)
self.assertEqual(seq.quality.shape, (9,))
npt.assert_equal(seq.quality,
np.array([0, 42, 42, 1, 0, 8, 100, 0, 0]))
def test_init_no_copy_of_quality(self):
qual = np.array([22, 22, 1])
seq = Sequence('ACA', quality=qual)
self.assertIs(seq.quality, qual)
with self.assertRaises(ValueError):
qual[1] = 42
def test_init_invalid_sequence(self):
# invalid dtype (numpy.ndarray input)
with self.assertRaises(TypeError):
# int64
Sequence(np.array([1, 2, 3]))
with self.assertRaises(TypeError):
# |S21
Sequence(np.array([1, "23", 3]))
with self.assertRaises(TypeError):
# object
Sequence(np.array([1, {}, ()]))
# invalid input type (non-numpy.ndarray input)
with self.assertRaisesRegexp(TypeError, 'tuple'):
Sequence(('a', 'b', 'c'))
with self.assertRaisesRegexp(TypeError, 'list'):
Sequence(['a', 'b', 'c'])
with self.assertRaisesRegexp(TypeError, 'set'):
Sequence({'a', 'b', 'c'})
with self.assertRaisesRegexp(TypeError, 'dict'):
Sequence({'a': 42, 'b': 43, 'c': 44})
with self.assertRaisesRegexp(TypeError, 'int'):
Sequence(42)
with self.assertRaisesRegexp(TypeError, 'float'):
Sequence(4.2)
with self.assertRaisesRegexp(TypeError, 'int64'):
Sequence(np.int_(50))
with self.assertRaisesRegexp(TypeError, 'float64'):
Sequence(np.float_(50))
with self.assertRaisesRegexp(TypeError, 'Foo'):
class Foo(object):
pass
Sequence(Foo())
# out of ASCII range
with self.assertRaises(UnicodeEncodeError):
Sequence(u'abc\u1F30')
def test_init_invalid_id(self):
with self.assertRaises(TypeError):
Sequence('abc', id=('f', 'o', 'o'))
def test_init_invalid_description(self):
with self.assertRaises(TypeError):
Sequence('abc', description=('f', 'o', 'o'))
def test_init_invalid_quality(self):
# invalid dtype
with self.assertRaises(TypeError):
Sequence('ACGT', quality=[2, 3, 4.1, 5])
with self.assertRaises(TypeError):
Sequence('ACGT', quality=[2, np.nan, 4, 5])
# wrong number of dimensions
with self.assertRaisesRegexp(ValueError, '2.*1-D'):
Sequence('ACGT', quality=[[2, 3], [4, 5]])
# wrong number of elements
with self.assertRaisesRegexp(ValueError, '\(3\).*\(4\)'):
Sequence('ACGT', quality=[2, 3, 4])
# negatives
with self.assertRaisesRegexp(ValueError,
'Quality scores.*greater than.*zero'):
Sequence('ACGT', quality=[2, 3, -1, 4])
def test_value_property(self):
# Property tests are only concerned with testing the interface
# provided by the property: that it can be accessed, can't be
# reassigned or mutated in place, and that the correct type is
# returned. More extensive testing of border cases (e.g., different
# sequence lengths or input types, odd characters, etc.) are performed
# in Sequence.__init__ tests.
seq = Sequence('ACGT')
# should get back a numpy.ndarray of '|S1' dtype
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
npt.assert_equal(seq.values, np.array('ACGT', dtype='c'))
# test that we can't mutate the property
with self.assertRaises(ValueError):
seq.values[1] = 'A'
# test that we can't set the property
with self.assertRaises(AttributeError):
seq.values = np.array("GGGG", dtype='c')
def test_id_property(self):
seq = Sequence('', id='foo')
self.assertIsInstance(seq.id, string_types)
self.assertEqual(seq.id, 'foo')
with self.assertRaises(TypeError):
seq.id[1] = 42
with self.assertRaises(AttributeError):
seq.id = 'bar'
def test_description_property(self):
seq = Sequence('', description='foo')
self.assertIsInstance(seq.description, string_types)
self.assertEqual(seq.description, 'foo')
with self.assertRaises(TypeError):
seq.description[1] = 42
with self.assertRaises(AttributeError):
seq.description = 'bar'
def test_quality_property(self):
seq = Sequence('ACA', quality=[22, 22, 0])
self.assertIsInstance(seq.quality, np.ndarray)
self.assertEqual(seq.quality.dtype, np.int)
npt.assert_equal(seq.quality, np.array([22, 22, 0]))
with self.assertRaises(ValueError):
seq.quality[1] = 42
with self.assertRaises(AttributeError):
seq.quality = [22, 22, 42]
def test_has_quality(self):
seq = Sequence('')
self.assertFalse(seq._has_quality())
seq = Sequence('', quality=[])
self.assertTrue(seq._has_quality())
seq = Sequence('ACA', quality=(5, 4, 67))
self.assertTrue(seq._has_quality())
seq = Sequence('ACA')
self.assertFalse(seq._has_quality())
def test_eq_and_ne(self):
seq_a = Sequence("A")
seq_b = Sequence("B")
self.assertTrue(seq_a == seq_a)
self.assertTrue(Sequence("a") == Sequence("a"))
self.assertTrue(Sequence("a", id='b') == Sequence("a", id='b'))
self.assertTrue(Sequence("a", id='b', description='c') ==
Sequence("a", id='b', description='c'))
self.assertTrue(Sequence("a", id='b', description='c', quality=[1]) ==
Sequence("a", id='b', description='c', quality=[1]))
self.assertTrue(seq_a != seq_b)
self.assertTrue(SequenceSubclass("a") != Sequence("a"))
self.assertTrue(Sequence("a") != Sequence("b"))
self.assertTrue(Sequence("a") != Sequence("a", id='b'))
self.assertTrue(Sequence("a", id='c') !=
Sequence("a", id='c', description='t'))
self.assertTrue(Sequence("a", quality=[1]) != Sequence("a"))
self.assertTrue(Sequence("a", quality=[2]) !=
Sequence("a", quality=[1]))
self.assertTrue(Sequence("c", quality=[3]) !=
Sequence("b", quality=[3]))
self.assertTrue(Sequence("a", id='b') != Sequence("c", id='b'))
def test_getitem_gives_new_sequence(self):
seq = Sequence("Sequence string !1@2#3?.,")
self.assertFalse(seq is seq[:])
def test_getitem_with_int_has_qual(self):
s = "Sequence string !1@2#3?.,"
length = len(s)
seq = Sequence(s, id='id', description='dsc',
quality=np.arange(length))
eseq = Sequence("S", id='id', description='dsc', quality=np.array([0]))
self.assertEqual(seq[0], eseq)
eseq = Sequence(",", id='id', description='dsc',
quality=np.array([len(seq) - 1]))
self.assertEqual(seq[len(seq) - 1], eseq)
eseq = Sequence("t", id='id', description='dsc',
quality=[10])
self.assertEqual(seq[10], eseq)
def test_getitem_with_int_no_qual(self):
seq = Sequence("Sequence string !1@2#3?.,", id='id2',
description='no_qual')
eseq = Sequence("t", id='id2', description='no_qual')
self.assertEqual(seq[10], eseq)
def test_getitem_with_slice_has_qual(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, id='id3', description="dsc3",
quality=np.arange(length))
eseq = Sequence("012", id='id3', description="dsc3",
quality=np.arange(3))
self.assertEquals(seq[0:3], eseq)
self.assertEquals(seq[:3], eseq)
self.assertEquals(seq[:3:1], eseq)
eseq = Sequence("def", id='id3', description="dsc3",
quality=[13, 14, 15])
self.assertEquals(seq[-3:], eseq)
self.assertEquals(seq[-3::1], eseq)
eseq = Sequence("02468ace", id='id3', description='dsc3',
quality=[0, 2, 4, 6, 8, 10, 12, 14])
self.assertEquals(seq[0:length:2], eseq)
self.assertEquals(seq[::2], eseq)
eseq = Sequence(s[::-1], id='id3', description='dsc3',
quality=np.arange(length)[::-1])
self.assertEquals(seq[length::-1], eseq)
self.assertEquals(seq[::-1], eseq)
eseq = Sequence('fdb97531', id='id3', description='dsc3',
quality=[15, 13, 11, 9, 7, 5, 3, 1])
self.assertEquals(seq[length::-2], eseq)
self.assertEquals(seq[::-2], eseq)
self.assertEquals(seq[0:500:], seq)
eseq = Sequence('', id='id3', description='dsc3',
quality=[])
self.assertEquals(seq[length:0], eseq)
self.assertEquals(seq[-length:0], eseq)
self.assertEquals(seq[1:0], eseq)
eseq = Sequence("0", id='id3', description='dsc3',
quality=[0])
self.assertEquals(seq[0:1], eseq)
self.assertEquals(seq[0:1:1], eseq)
self.assertEquals(seq[-length::-1], eseq)
def test_getitem_with_slice_no_qual(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, id='id4', description="no_qual4")
eseq = Sequence("02468ace", id='id4', description='no_qual4')
self.assertEquals(seq[0:length:2], eseq)
self.assertEquals(seq[::2], eseq)
def test_getitem_with_tuple_of_mixed_with_qual(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, id='id5', description="dsc5",
quality=np.arange(length))
eseq = Sequence("00000", id='id5', description='dsc5',
quality=[0, 0, 0, 0, 0])
self.assertEquals(seq[0, 0, 0, 0, 0], eseq)
self.assertEquals(seq[0, 0:1, 0, -length::-1, 0, 1:0], eseq)
self.assertEquals(seq[0:1, 0:1, 0:1, 0:1, 0:1], eseq)
self.assertEquals(seq[0:1, 0, 0, 0, 0], eseq)
eseq = Sequence("0123fed9", id='id5', description='dsc5',
quality=[0, 1, 2, 3, 15, 14, 13, 9])
self.assertEquals(seq[0, 1, 2, 3, 15, 14, 13, 9], eseq)
self.assertEquals(seq[0, 1, 2, 3, :-4:-1, 9], eseq)
self.assertEquals(seq[0:4, :-4:-1, 9, 1:0], eseq)
self.assertEquals(seq[0:4, :-4:-1, 9:10], eseq)
def test_getitem_with_tuple_of_mixed_no_qual(self):
seq = Sequence("0123456789abcdef", id='id6', description="no_qual6")
eseq = Sequence("0123fed9", id='id6', description='no_qual6')
self.assertEquals(seq[0, 1, 2, 3, 15, 14, 13, 9], eseq)
self.assertEquals(seq[0, 1, 2, 3, :-4:-1, 9], eseq)
self.assertEquals(seq[0:4, :-4:-1, 9], eseq)
self.assertEquals(seq[0:4, :-4:-1, 9:10], eseq)
def test_getitem_with_iterable_of_mixed_has_qual(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, id='id7', description="dsc7",
quality=np.arange(length))
def generator():
yield slice(0, 4)
yield slice(200, 400)
yield -1
yield slice(-2, -4, -1)
yield 9
eseq = Sequence("0123fed9", id='id7', description='dsc7',
quality=[0, 1, 2, 3, 15, 14, 13, 9])
self.assertEquals(seq[[0, 1, 2, 3, 15, 14, 13, 9]], eseq)
self.assertEquals(seq[generator()], eseq)
self.assertEquals(seq[[slice(0, 4), slice(None, -4, -1), 9]], eseq)
self.assertEquals(seq[
[slice(0, 4), slice(None, -4, -1), slice(9, 10)]], eseq)
def test_getitem_with_iterable_of_mixed_no_qual(self):
s = "0123456789abcdef"
seq = Sequence(s, id='id7', description="dsc7")
def generator():
yield slice(0, 4)
yield slice(200, 400)
yield slice(None, -4, -1)
yield 9
eseq = Sequence("0123fed9", id='id7', description='dsc7')
self.assertEquals(seq[[0, 1, 2, 3, 15, 14, 13, 9]], eseq)
self.assertEquals(seq[generator()], eseq)
self.assertEquals(seq[[slice(0, 4), slice(None, -4, -1), 9]], eseq)
self.assertEquals(seq[
[slice(0, 4), slice(None, -4, -1), slice(9, 10)]], eseq)
def test_getitem_with_numpy_index_has_qual(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, id='id9', description="dsc9",
quality=np.arange(length))
eseq = Sequence("0123fed9", id='id9', description='dsc9',
quality=[0, 1, 2, 3, 15, 14, 13, 9])
self.assertEquals(seq[np.array([0, 1, 2, 3, 15, 14, 13, 9])], eseq)
def test_getitem_with_numpy_index_no_qual(self):
s = "0123456789abcdef"
seq = Sequence(s, id='id10', description="dsc10")
eseq = Sequence("0123fed9", id='id10', description='dsc10')
self.assertEquals(seq[np.array([0, 1, 2, 3, 15, 14, 13, 9])], eseq)
def test_getitem_with_empty_indices_empty_seq_has_qual(self):
s = ""
length = len(s)
seq = Sequence(s, id='id9', description="dsc9",
quality=np.arange(length))
eseq = Sequence('', id='id9', description='dsc9', quality=[])
tested = 0
for index in self.getitem_empty_indices:
tested += 1
self.assertEqual(seq[index], eseq)
self.assertEqual(tested, 6)
def test_getitem_with_empty_indices_empty_seq_no_qual(self):
s = ""
seq = Sequence(s, id='id10', description="dsc10")
eseq = Sequence('', id='id10', description='dsc10')
tested = 0
for index in self.getitem_empty_indices:
tested += 1
self.assertEqual(seq[index], eseq)
self.assertEqual(tested, 6)
def test_getitem_with_empty_indices_non_empty_seq_has_qual(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, id='id9', description="dsc9",
quality=np.arange(length))
eseq = Sequence('', id='id9', description='dsc9', quality=[])
tested = 0
for index in self.getitem_empty_indices:
tested += 1
self.assertEqual(seq[index], eseq)
self.assertEqual(tested, 6)
def test_getitem_with_empty_indices_non_empty_seq_no_qual(self):
s = "0123456789abcdef"
seq = Sequence(s, id='id10', description="dsc10")
eseq = Sequence('', id='id10', description='dsc10')
tested = 0
for index in self.getitem_empty_indices:
tested += 1
self.assertEqual(seq[index], eseq)
self.assertEqual(tested, 6)
def test_getitem_with_boolean_vector_has_qual(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, id='id11', description="dsc11",
quality=np.arange(length))
eseq = Sequence("13579bdf", id='id11', description="dsc11",
quality=[1, 3, 5, 7, 9, 11, 13, 15])
self.assertEqual(seq[np.array([False, True] * 8)], eseq)
self.assertEqual(seq[[False, True] * 8], eseq)
def test_getitem_with_boolean_vector_no_qual(self):
s = "0123456789abcdef"
seq = Sequence(s, id='id11', description="dsc11")
eseq = Sequence("13579bdf", id='id11', description="dsc11")
self.assertEqual(seq[np.array([False, True] * 8)], eseq)
def test_getitem_with_invalid(self):
seq = Sequence("123456", id='idm', description='description',
quality=[1, 2, 3, 4, 5, 6])
with self.assertRaises(IndexError):
seq['not an index']
with self.assertRaises(IndexError):
seq[['1', '2']]
with self.assertRaises(IndexError):
seq[[1, slice(1, 2), 'a']]
with self.assertRaises(IndexError):
seq[[1, slice(1, 2), True]]
with self.assertRaises(IndexError):
seq[True]
with self.assertRaises(IndexError):
seq[np.array([True, False])]
with self.assertRaises(IndexError):
seq[99999999999999999]
with self.assertRaises(IndexError):
seq[0, 0, 99999999999999999]
# numpy 1.8.1 and 1.9.2 raise different error types
# (ValueError, IndexError).
with self.assertRaises(Exception):
seq[100 * [True, False, True]]
def test_len(self):
self.assertEqual(len(Sequence("")), 0)
self.assertEqual(len(Sequence("a")), 1)
self.assertEqual(len(Sequence("abcdef")), 6)
def test_contains(self):
seq = Sequence("#@ACGT,24.13**02")
tested = 0
for c in self.sequence_kinds:
tested += 1
self.assertTrue(c(',24') in seq)
self.assertTrue(c('*') in seq)
self.assertTrue(c('') in seq)
self.assertFalse(c("$") in seq)
self.assertFalse(c("AGT") in seq)
self.assertEqual(tested, 4)
def test_contains_sequence_subclass(self):
with self.assertRaises(TypeError):
SequenceSubclass("A") in Sequence("AAA")
self.assertTrue(SequenceSubclass("A").values in Sequence("AAA"))
def test_hash(self):
with self.assertRaises(TypeError):
hash(Sequence("ABCDEFG"))
self.assertNotIsInstance(Sequence("ABCDEFG"), Hashable)
def test_iter_has_quality(self):
tested = False
seq = Sequence("0123456789", id="a", description="b",
quality=np.arange(10))
for i, s in enumerate(seq):
tested = True
self.assertEqual(s, Sequence(str(i), id='a', description='b',
quality=[i]))
self.assertTrue(tested)
def test_iter_no_quality(self):
tested = False
seq = Sequence("0123456789", id="a", description="b")
for i, s in enumerate(seq):
tested = True
self.assertEqual(s, Sequence(str(i), id='a', description='b'))
self.assertTrue(tested)
def test_reversed_has_quality(self):
tested = False
seq = Sequence("0123456789", id="a", description="b",
quality=np.arange(10))
for i, s in enumerate(reversed(seq)):
tested = True
self.assertEqual(s, Sequence(str(9 - i), id='a', description='b',
quality=[9 - i]))
self.assertTrue(tested)
def test_reversed_no_quality(self):
tested = False
seq = Sequence("0123456789", id="a", description="b")
for i, s in enumerate(reversed(seq)):
tested = True
self.assertEqual(s, Sequence(str(9 - i), id='a', description='b'))
self.assertTrue(tested)
def test_repr(self):
seq_simple = Sequence("ACGT")
seq_med = Sequence("ACGT", id="id", description="desc",
quality=[1, 2, 3, 4])
seq_complex = Sequence(("ASDKJHDJHFGUGF*&@KFHKHSDGKASDHGKDUYGKFHJ#&*YJ"
"FE&I@#JH@#ASJDHGF*&@#IG#*&IGUJKSADHAKSDJHI#*Y"
"LFUFLIU#RHL*Y#HHFLI#*FHL@#(*HJ"),
id="This is a long id", description="desc",
quality=([1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2] *
10))
self.assertEqual(repr(seq_simple), "Sequence('ACGT', length=4)")
self.assertEqual(repr(seq_med),
("Sequence('ACGT', length=4, id='id',"
" description='desc', quality=[1, 2, 3, 4])"))
self.assertEqual(repr(seq_complex),
("Sequence('ASDKJH ... @#(*HJ', length=120, id='This"
" is a long id', \n description='desc', "
"quality=[1, 2, 3, 4, 5, 6, ..., 7, 8, 9, 0, 1, 2])")
)
def test_str(self):
self.assertEqual(str(Sequence("GATTACA")), "GATTACA")
self.assertEqual(str(Sequence("ACCGGTACC")), "ACCGGTACC")
self.assertEqual(str(Sequence("GREG")), "GREG")
self.assertEqual(str(Sequence("ABC", quality=[1, 2, 3])), "ABC")
self.assertIs(type(str(Sequence("A"))), str)
def test_to_default_behavior(self):
# minimal sequence, sequence with all optional attributes present, and
# a subclass of Sequence
for seq in (Sequence('ACGT'),
Sequence('ACGT', id='foo', description='bar',
quality=range(4)),
SequenceSubclass('ACGU', id='rna seq')):
to = seq._to()
self.assertTrue(seq.equals(to))
self.assertFalse(seq is to)
def test_to_update_single_attribute(self):
seq = Sequence('HE..--..LLO', id='hello',
description='gapped hello',
quality=range(11))
to = seq._to(id='new id')
self.assertFalse(seq is to)
# they don't compare equal when we compare all attributes...
self.assertFalse(seq.equals(to))
# ...but they *do* compare equal when we ignore id, as that was the
# only attribute that changed
self.assertTrue(seq.equals(to, ignore=['id']))
# id should be what we specified in the _to call...
self.assertEqual(to.id, 'new id')
# ...and shouldn't have changed on the original sequence
self.assertEqual(seq.id, 'hello')
def test_to_update_multiple_attributes(self):
seq = Sequence('HE..--..LLO', id='hello',
description='gapped hello',
quality=range(11))
to = seq._to(id='new id', quality=range(20, 25),
sequence='ACGTA', description='new desc')
self.assertFalse(seq is to)
self.assertFalse(seq.equals(to))
# attributes should be what we specified in the _to call...
self.assertEqual(to.id, 'new id')
npt.assert_array_equal(to.quality, np.array([20, 21, 22, 23, 24]))
npt.assert_array_equal(to.values, np.array('ACGTA', dtype='c'))
self.assertEqual(to.description, 'new desc')
# ...and shouldn't have changed on the original sequence
self.assertEqual(seq.id, 'hello')
npt.assert_array_equal(seq.quality, range(11))
npt.assert_array_equal(seq.values, np.array('HE..--..LLO',
dtype='c'))
self.assertEqual(seq.description, 'gapped hello')
def test_to_invalid_kwargs(self):
seq = Sequence('ACCGGTACC', id="test-seq",
description="A test sequence")
with self.assertRaises(TypeError):
seq._to(id='bar', unrecognized_kwarg='baz')
def test_to_extra_non_attribute_kwargs(self):
# test that we can pass through additional kwargs to the constructor
# that aren't related to biological sequence attributes (i.e., they
# aren't state that has to be copied)
class SequenceSubclassWithNewSignature(Sequence):
def __init__(self, sequence, id='', description='', quality=None,
foo=False):
super(SequenceSubclassWithNewSignature, self).__init__(
sequence, id=id, description=description, quality=quality)
self.foo = foo
seq = SequenceSubclassWithNewSignature('ACTG', description='foo')
# _to() without specifying `foo`
to = seq._to()
self.assertTrue(seq.equals(to))
self.assertFalse(seq is to)
self.assertFalse(seq.foo)
# `foo` should default to False
self.assertFalse(to.foo)
# _to() with `foo` specified
to = seq._to(foo=True)
self.assertTrue(seq.equals(to))
self.assertFalse(seq is to)
self.assertFalse(seq.foo)
# `foo` should now be True
self.assertTrue(to.foo)
def test_equals_sequences_without_metadata_compare_equal(self):
self.assertTrue(Sequence('').equals(Sequence('')))
self.assertTrue(Sequence('z').equals(Sequence('z')))
self.assertTrue(
Sequence('ACGT').equals(Sequence('ACGT')))
def test_equals_sequences_with_metadata_compare_equal(self):
seq1 = Sequence('ACGT', id='foo', description='abc',
quality=[1, 2, 3, 4])
seq2 = Sequence('ACGT', id='foo', description='abc',
quality=[1, 2, 3, 4])
self.assertTrue(seq1.equals(seq2))
# order shouldn't matter
self.assertTrue(seq2.equals(seq1))
def test_equals_sequences_from_different_sources_compare_equal(self):
# sequences that have the same data but are constructed from different
# types of data should compare equal
seq1 = Sequence('ACGT', id='foo', description='abc',
quality=(1, 2, 3, 4))
seq2 = Sequence(np.array([65, 67, 71, 84], dtype=np.uint8),
id='foo', description='abc',
quality=np.array([1, 2, 3, 4]))
self.assertTrue(seq1.equals(seq2))
def test_equals_ignore_type(self):
seq1 = Sequence('ACGT')
seq2 = SequenceSubclass('ACGT')
self.assertTrue(seq1.equals(seq2, ignore=['type']))
def test_equals_ignore_id(self):
seq1 = Sequence('ACGT', id='foo')
seq2 = Sequence('ACGT', id='bar')
self.assertTrue(seq1.equals(seq2, ignore=['id']))
def test_equals_ignore_description(self):
seq1 = Sequence('ACGT', description='foo')
seq2 = Sequence('ACGT', description='bar')
self.assertTrue(seq1.equals(seq2, ignore=['description']))
def test_equals_ignore_quality(self):
seq1 = Sequence('ACGT', quality=[1, 2, 3, 4])
seq2 = Sequence('ACGT', quality=[5, 6, 7, 8])
self.assertTrue(seq1.equals(seq2, ignore=['quality']))
def test_equals_ignore_sequence(self):
seq1 = Sequence('ACGA')
seq2 = Sequence('ACGT')
self.assertTrue(seq1.equals(seq2, ignore=['sequence']))
def test_equals_ignore_everything(self):
seq1 = Sequence('ACGA', id='foo', description='abc',
quality=[1, 2, 3, 4])
seq2 = SequenceSubclass('ACGT', id='bar', description='def',
quality=[5, 6, 7, 8])
self.assertTrue(seq1.equals(seq2,
ignore=['quality', 'description', 'id',
'sequence', 'type']))
def test_equals_type_mismatch(self):
seq1 = Sequence('ACGT', id='foo', description='abc',
quality=[1, 2, 3, 4])
seq2 = SequenceSubclass('ACGT', id='bar', description='def',
quality=[5, 6, 7, 8])
self.assertFalse(seq1.equals(seq2,
ignore=['quality', 'description', 'id']))
def test_equals_id_mismatch(self):
seq1 = Sequence('ACGT', id='foo')
seq2 = Sequence('ACGT', id='bar')
self.assertFalse(seq1.equals(seq2))
def test_equals_description_mismatch(self):
seq1 = Sequence('ACGT', description='foo')
seq2 = Sequence('ACGT', description='bar')
self.assertFalse(seq1.equals(seq2))
def test_equals_quality_mismatch(self):
# both provided
seq1 = Sequence('ACGT', quality=[1, 2, 3, 4])
seq2 = Sequence('ACGT', quality=[1, 2, 3, 5])
self.assertFalse(seq1.equals(seq2))
# one provided
seq1 = Sequence('ACGT', quality=[1, 2, 3, 4])
seq2 = Sequence('ACGT')
self.assertFalse(seq1.equals(seq2))
def test_equals_sequence_mismatch(self):
seq1 = Sequence('ACGT')
seq2 = Sequence('TGCA')
self.assertFalse(seq1.equals(seq2))
def test_count(self):
def construct_char_array(s):
return np.fromstring(s, dtype='|S1')
def construct_uint8_array(s):
return np.fromstring(s, dtype=np.uint8)
seq = Sequence("1234567899876555")
tested = 0
for c in self.sequence_kinds:
tested += 1
self.assertEqual(seq.count(c('4')), 1)
self.assertEqual(seq.count(c('8')), 2)
self.assertEqual(seq.count(c('5')), 4)
self.assertEqual(seq.count(c('555')), 1)
self.assertEqual(seq.count(c('555'), 0, 4), 0)
self.assertEqual(seq.count(c('555'), start=0, end=4), 0)
self.assertEqual(seq.count(c('5'), start=10), 3)
self.assertEqual(seq.count(c('5'), end=10), 1)
with self.assertRaises(ValueError):
seq.count(c(''))
self.assertEquals(tested, 4)
def test_count_on_subclass(self):
with self.assertRaises(TypeError) as cm:
Sequence("abcd").count(SequenceSubclass("a"))
self.assertIn("Sequence", str(cm.exception))
self.assertIn("SequenceSubclass", str(cm.exception))
def test_distance(self):
tested = 0
for constructor in self.sequence_kinds:
tested += 1
seq1 = Sequence("abcdef")
seq2 = constructor("12bcef")
self.assertIsInstance(seq1.distance(seq1), float)
self.assertEqual(seq1.distance(seq2), 2.0/3.0)
self.assertEqual(tested, 4)
def test_distance_arbitrary_function(self):
def metric(x, y):
return len(x) ** 2 + len(y) ** 2
seq1 = Sequence("12345678")
seq2 = Sequence("1234")
result = seq1.distance(seq2, metric=metric)
self.assertIsInstance(result, float)
self.assertEqual(result, 80.0)
def test_distance_default_metric(self):
seq1 = Sequence("abcdef")
seq2 = Sequence("12bcef")
seq_wrong = Sequence("abcdefghijklmnop")
self.assertIsInstance(seq1.distance(seq1), float)
self.assertEqual(seq1.distance(seq1), 0.0)
self.assertEqual(seq1.distance(seq2), 2.0/3.0)
with self.assertRaises(ValueError):
seq1.distance(seq_wrong)
with self.assertRaises(ValueError):
seq_wrong.distance(seq1)
def test_distance_on_subclass(self):
seq1 = Sequence("abcdef")
seq2 = SequenceSubclass("12bcef")
with self.assertRaises(TypeError):
seq1.distance(seq2)
def test_matches(self):
tested = 0
for constructor in self.sequence_kinds:
tested += 1
seq1 = Sequence("AACCEEGG")
seq2 = constructor("ABCDEFGH")
expected = np.array([True, False] * 4)
npt.assert_equal(seq1.matches(seq2), expected)
self.assertEqual(tested, 4)
def test_matches_on_subclass(self):
seq1 = Sequence("AACCEEGG")
seq2 = SequenceSubclass("ABCDEFGH")
with self.assertRaises(TypeError):
seq1.matches(seq2)
def test_matches_unequal_length(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("TOOLONGTOCOMPARE")
with self.assertRaises(ValueError):
seq1.matches(seq2)
def test_mismatches(self):
tested = 0
for constructor in self.sequence_kinds:
tested += 1
seq1 = Sequence("AACCEEGG")
seq2 = constructor("ABCDEFGH")
expected = np.array([False, True] * 4)
npt.assert_equal(seq1.mismatches(seq2), expected)
self.assertEqual(tested, 4)
def test_mismatches_on_subclass(self):
seq1 = Sequence("AACCEEGG")
seq2 = SequenceSubclass("ABCDEFGH")
with self.assertRaises(TypeError):
seq1.mismatches(seq2)
def test_mismatches_unequal_length(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("TOOLONGTOCOMPARE")
with self.assertRaises(ValueError):
seq1.mismatches(seq2)
def test_mismatch_frequency(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("ABCDEFGH")
seq3 = Sequence("TTTTTTTT")
self.assertIs(type(seq1.mismatch_frequency(seq1)), int)
self.assertEqual(seq1.mismatch_frequency(seq1), 0)
self.assertEqual(seq1.mismatch_frequency(seq2), 4)
self.assertEqual(seq1.mismatch_frequency(seq3), 8)
def test_mismatch_frequency_relative(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("ABCDEFGH")
seq3 = Sequence("TTTTTTTT")
self.assertIs(type(seq1.mismatch_frequency(seq1, relative=True)),
float)
self.assertEqual(seq1.mismatch_frequency(seq1, relative=True), 0.0)
self.assertEqual(seq1.mismatch_frequency(seq2, relative=True), 0.5)
self.assertEqual(seq1.mismatch_frequency(seq3, relative=True), 1.0)
def test_mismatch_frequency_unequal_length(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("TOOLONGTOCOMPARE")
with self.assertRaises(ValueError):
seq1.mismatch_frequency(seq2)
def test_mismatch_frequence_on_subclass(self):
seq1 = Sequence("AACCEEGG")
seq2 = SequenceSubclass("ABCDEFGH")
with self.assertRaises(TypeError):
seq1.mismatch_frequency(seq2)
def test_match_frequency(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("ABCDEFGH")
seq3 = Sequence("TTTTTTTT")
self.assertIs(type(seq1.match_frequency(seq1)), int)
self.assertEqual(seq1.match_frequency(seq1), 8)
self.assertEqual(seq1.match_frequency(seq2), 4)
self.assertEqual(seq1.match_frequency(seq3), 0)
def test_match_frequency_relative(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("ABCDEFGH")
seq3 = Sequence("TTTTTTTT")
self.assertIs(type(seq1.match_frequency(seq1, relative=True)),
float)
self.assertEqual(seq1.match_frequency(seq1, relative=True), 1.0)
self.assertEqual(seq1.match_frequency(seq2, relative=True), 0.5)
self.assertEqual(seq1.match_frequency(seq3, relative=True), 0.0)
def test_match_frequency_unequal_length(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("TOOLONGTOCOMPARE")
with self.assertRaises(ValueError):
seq1.match_frequency(seq2)
def test_match_frequency_on_subclass(self):
seq1 = Sequence("AACCEEGG")
seq2 = SequenceSubclass("ABCDEFGH")
with self.assertRaises(TypeError):
seq1.match_frequency(seq2)
def test_index(self):
tested = 0
for c in self.sequence_kinds:
tested += 1
seq = Sequence("ABCDEFG@@ABCDFOO")
self.assertEqual(seq.index(c("A")), 0)
self.assertEqual(seq.index(c("@")), 7)
self.assertEqual(seq.index(c("@@")), 7)
with self.assertRaises(ValueError):
seq.index("A", start=1, end=5)
self.assertEqual(tested, 4)
def test_index_on_subclass(self):
with self.assertRaises(TypeError):
Sequence("ABCDEFG").index(SequenceSubclass("A"))
self.assertEqual(
SequenceSubclass("ABCDEFG").index(SequenceSubclass("A")), 0)
def _compare_kmers_results(self, observed, expected):
for obs, exp in zip_longest(observed, expected, fillvalue=None):
self.assertEqual(obs, exp)
def test_iter_kmers(self):
seq = Sequence('GATTACA', quality=range(7))
expected = [
Sequence('G', quality=[0]),
Sequence('A', quality=[1]),
Sequence('T', quality=[2]),
Sequence('T', quality=[3]),
Sequence('A', quality=[4]),
Sequence('C', quality=[5]),
Sequence('A', quality=[6])
]
self._compare_kmers_results(
seq.iter_kmers(1, overlap=False), expected)
expected = [
Sequence('GA', quality=[0, 1]),
Sequence('TT', quality=[2, 3]),
Sequence('AC', quality=[4, 5])
]
self._compare_kmers_results(
seq.iter_kmers(2, overlap=False), expected)
expected = [
Sequence('GAT', quality=[0, 1, 2]),
Sequence('TAC', quality=[3, 4, 5])
]
self._compare_kmers_results(
seq.iter_kmers(3, overlap=False), expected)
expected = [
Sequence('GATTACA', quality=[0, 1, 2, 3, 4, 5, 6])
]
self._compare_kmers_results(
seq.iter_kmers(7, overlap=False), expected)
expected = []
self._compare_kmers_results(
seq.iter_kmers(8, overlap=False), expected)
self.assertIs(type(seq.iter_kmers(1)), GeneratorType)
def test_iter_kmers_with_overlap(self):
seq = Sequence('GATTACA', quality=range(7))
expected = [
Sequence('G', quality=[0]),
Sequence('A', quality=[1]),
Sequence('T', quality=[2]),
Sequence('T', quality=[3]),
Sequence('A', quality=[4]),
Sequence('C', quality=[5]),
Sequence('A', quality=[6])
]
self._compare_kmers_results(
seq.iter_kmers(1, overlap=True), expected)
expected = [
Sequence('GA', quality=[0, 1]),
Sequence('AT', quality=[1, 2]),
Sequence('TT', quality=[2, 3]),
Sequence('TA', quality=[3, 4]),
Sequence('AC', quality=[4, 5]),
Sequence('CA', quality=[5, 6])
]
self._compare_kmers_results(
seq.iter_kmers(2, overlap=True), expected)
expected = [
Sequence('GAT', quality=[0, 1, 2]),
Sequence('ATT', quality=[1, 2, 3]),
Sequence('TTA', quality=[2, 3, 4]),
Sequence('TAC', quality=[3, 4, 5]),
Sequence('ACA', quality=[4, 5, 6])
]
self._compare_kmers_results(
seq.iter_kmers(3, overlap=True), expected)
expected = [
Sequence('GATTACA', quality=[0, 1, 2, 3, 4, 5, 6])
]
self._compare_kmers_results(
seq.iter_kmers(7, overlap=True), expected)
expected = []
self._compare_kmers_results(
seq.iter_kmers(8, overlap=True), expected)
self.assertIs(type(seq.iter_kmers(1)), GeneratorType)
def test_iter_kmers_invalid_k(self):
seq = Sequence('GATTACA', quality=range(7))
with self.assertRaises(ValueError):
list(seq.iter_kmers(0))
with self.assertRaises(ValueError):
list(seq.iter_kmers(-42))
def test_iter_kmers_different_sequences(self):
seq = Sequence('HE..--..LLO', id='hello', description='gapped hello',
quality=range(11))
expected = [
Sequence('HE.', quality=[0, 1, 2], id='hello',
description='gapped hello'),
Sequence('.--', quality=[3, 4, 5], id='hello',
description='gapped hello'),
Sequence('..L', quality=[6, 7, 8], id='hello',
description='gapped hello')
]
self._compare_kmers_results(seq.iter_kmers(3, overlap=False), expected)
def test_kmer_frequencies(self):
seq = Sequence('GATTACA', quality=range(7))
# overlap = True
expected = Counter('GATTACA')
self.assertEqual(seq.kmer_frequencies(1, overlap=True), expected)
expected = Counter(['GAT', 'ATT', 'TTA', 'TAC', 'ACA'])
self.assertEqual(seq.kmer_frequencies(3, overlap=True), expected)
expected = Counter([])
self.assertEqual(seq.kmer_frequencies(8, overlap=True), expected)
# overlap = False
expected = Counter(['GAT', 'TAC'])
self.assertEqual(seq.kmer_frequencies(3, overlap=False), expected)
expected = Counter(['GATTACA'])
self.assertEqual(seq.kmer_frequencies(7, overlap=False), expected)
expected = Counter([])
self.assertEqual(seq.kmer_frequencies(8, overlap=False), expected)
def test_kmer_frequencies_relative(self):
seq = Sequence('GATTACA', quality=range(7))
# overlap = True
expected = defaultdict(float)
expected['A'] = 3/7.
expected['C'] = 1/7.
expected['G'] = 1/7.
expected['T'] = 2/7.
self.assertEqual(seq.kmer_frequencies(1, overlap=True, relative=True),
expected)
expected = defaultdict(float)
expected['GAT'] = 1/5.
expected['ATT'] = 1/5.
expected['TTA'] = 1/5.
expected['TAC'] = 1/5.
expected['ACA'] = 1/5.
self.assertEqual(seq.kmer_frequencies(3, overlap=True, relative=True),
expected)
expected = defaultdict(float)
self.assertEqual(seq.kmer_frequencies(8, overlap=True, relative=True),
expected)
# overlap = False
expected = defaultdict(float)
expected['GAT'] = 1/2.
expected['TAC'] = 1/2.
self.assertEqual(seq.kmer_frequencies(3, overlap=False, relative=True),
expected)
expected = defaultdict(float)
expected['GATTACA'] = 1.0
self.assertEqual(seq.kmer_frequencies(7, overlap=False, relative=True),
expected)
expected = defaultdict(float)
self.assertEqual(seq.kmer_frequencies(8, overlap=False, relative=True),
expected)
def test_kmer_frequencies_floating_point_precision(self):
# Test that a sequence having no variation in k-words yields a
# frequency of exactly 1.0. Note that it is important to use
# self.assertEqual here instead of self.assertAlmostEqual because we
# want to test for exactly 1.0. A previous implementation of
# Sequence.kmer_frequencies(relative=True) added (1 / num_words) for
# each occurrence of a k-word to compute the frequencies (see
# https://github.com/biocore/scikit-bio/issues/801). In certain cases,
# this yielded a frequency slightly less than 1.0 due to roundoff
# error. The test case here uses a sequence with 10 characters that are
# all identical and computes k-word frequencies with k=1. This test
# case exposes the roundoff error present in the previous
# implementation because there are 10 k-words (which are all
# identical), so 1/10 added 10 times yields a number slightly less than
# 1.0. This occurs because 1/10 cannot be represented exactly as a
# floating point number.
seq = Sequence('AAAAAAAAAA')
self.assertEqual(seq.kmer_frequencies(1, relative=True),
defaultdict(float, {'A': 1.0}))
def test_find_with_regex(self):
seq = Sequence('GATTACA', quality=range(7))
pat = re.compile('(T+A)(CA)')
obs = list(seq.find_with_regex(pat))
exp = [slice(2, 5), slice(5, 7)]
self.assertEqual(obs, exp)
self.assertIs(type(seq.find_with_regex(pat)), GeneratorType)
def test_find_with_regex_string_as_input(self):
seq = Sequence('GATTACA', quality=range(7))
pat = '(T+A)(CA)'
obs = list(seq.find_with_regex(pat))
exp = [slice(2, 5), slice(5, 7)]
self.assertEqual(obs, exp)
self.assertIs(type(seq.find_with_regex(pat)), GeneratorType)
def test_find_with_regex_no_groups(self):
seq = Sequence('GATTACA', quality=range(7))
pat = re.compile('(FOO)')
self.assertEqual(list(seq.find_with_regex(pat)), [])
def test_find_with_regex_ignore_no_difference(self):
seq = Sequence('..ABCDEFG..')
pat = "([A-Z]+)"
exp = [slice(2, 9)]
self.assertEqual(list(seq.find_with_regex(pat)), exp)
obs = seq.find_with_regex(
pat, ignore=np.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1],
dtype=bool))
self.assertEqual(list(obs), exp)
def test_find_with_regex_ignore(self):
obs = Sequence('A..A..BBAAB.A..AB..A.').find_with_regex(
"(A+)", ignore=np.array([0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1,
1, 0, 0, 1, 1, 0, 1], dtype=bool))
self.assertEqual(list(obs), [slice(0, 4), slice(8, 10), slice(12, 16),
slice(19, 20)])
def test_find_with_regex_ignore_index_array(self):
obs = Sequence('A..A..BBAAB.A..AB..A.').find_with_regex(
"(A+)", ignore=np.array([1, 2, 4, 5, 11, 13, 14, 17, 18, 20]))
self.assertEqual(list(obs), [slice(0, 4), slice(8, 10), slice(12, 16),
slice(19, 20)])
def test_iter_contiguous_index_array(self):
s = Sequence("0123456789abcdef")
for c in list, tuple, np.array, pd.Series:
exp = [Sequence("0123"), Sequence("89ab")]
obs = s.iter_contiguous(c([0, 1, 2, 3, 8, 9, 10, 11]))
self.assertEqual(list(obs), exp)
def test_iter_contiguous_boolean_vector(self):
s = Sequence("0123456789abcdef")
for c in list, tuple, np.array, pd.Series:
exp = [Sequence("0123"), Sequence("89ab")]
obs = s.iter_contiguous(c(([True] * 4 + [False] * 4) * 2))
self.assertEqual(list(obs), exp)
def test_iter_contiguous_iterable_slices(self):
def spaced_out():
yield slice(0, 4)
yield slice(8, 12)
def contiguous():
yield slice(0, 4)
yield slice(4, 8)
yield slice(12, 16)
s = Sequence("0123456789abcdef")
for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
lambda x: pd.Series(tuple(x))):
exp = [Sequence("0123"), Sequence("89ab")]
obs = s.iter_contiguous(c(spaced_out()))
self.assertEqual(list(obs), exp)
exp = [Sequence("01234567"), Sequence("cdef")]
obs = s.iter_contiguous(c(contiguous()))
self.assertEqual(list(obs), exp)
def test_iter_contiguous_with_max_length(self):
s = Sequence("0123456789abcdef")
for c in list, tuple, np.array, pd.Series:
exp = [Sequence("234"), Sequence("678"), Sequence("abc")]
obs = s.iter_contiguous(c([True, False, True, True] * 4),
min_length=3)
self.assertEqual(list(obs), exp)
exp = [Sequence("0"), Sequence("234"), Sequence("678"),
Sequence("abc"), Sequence("ef")]
obs1 = list(s.iter_contiguous(c([True, False, True, True] * 4),
min_length=1))
obs2 = list(s.iter_contiguous(c([True, False, True, True] * 4)))
self.assertEqual(obs1, obs2)
self.assertEqual(obs1, exp)
def test_iter_contiguous_with_invert(self):
def spaced_out():
yield slice(0, 4)
yield slice(8, 12)
def contiguous():
yield slice(0, 4)
yield slice(4, 8)
yield slice(12, 16)
s = Sequence("0123456789abcdef")
for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
lambda x: pd.Series(tuple(x))):
exp = [Sequence("4567"), Sequence("cdef")]
obs = s.iter_contiguous(c(spaced_out()), invert=True)
self.assertEqual(list(obs), exp)
exp = [Sequence("89ab")]
obs = s.iter_contiguous(c(contiguous()), invert=True)
self.assertEqual(list(obs), exp)
def test_munge_to_index_array_valid_index_array(self):
s = Sequence('123456')
for c in list, tuple, np.array, pd.Series:
exp = np.array([1, 2, 3], dtype=int)
obs = s._munge_to_index_array(c([1, 2, 3]))
npt.assert_equal(obs, exp)
exp = np.array([1, 3, 5], dtype=int)
obs = s._munge_to_index_array(c([1, 3, 5]))
npt.assert_equal(obs, exp)
def test_munge_to_index_array_invalid_index_array(self):
s = Sequence("12345678")
for c in list, tuple, np.array, pd.Series:
with self.assertRaises(ValueError):
s._munge_to_index_array(c([3, 2, 1]))
with self.assertRaises(ValueError):
s._munge_to_index_array(c([5, 6, 7, 2]))
with self.assertRaises(ValueError):
s._munge_to_index_array(c([0, 1, 2, 1]))
def test_munge_to_index_array_valid_bool_array(self):
s = Sequence('123456')
for c in list, tuple, np.array, pd.Series:
exp = np.array([2, 3, 5], dtype=int)
obs = s._munge_to_index_array(
c([False, False, True, True, False, True]))
npt.assert_equal(obs, exp)
exp = np.array([], dtype=int)
obs = s._munge_to_index_array(
c([False] * 6))
npt.assert_equal(obs, exp)
exp = np.arange(6)
obs = s._munge_to_index_array(
c([True] * 6))
npt.assert_equal(obs, exp)
def test_munge_to_index_array_invalid_bool_array(self):
s = Sequence('123456')
for c in (list, tuple, lambda x: np.array(x, dtype=bool),
lambda x: pd.Series(x, dtype=bool)):
with self.assertRaises(ValueError):
s._munge_to_index_array(c([]))
with self.assertRaises(ValueError):
s._munge_to_index_array(c([True]))
with self.assertRaises(ValueError):
s._munge_to_index_array(c([True] * 10))
def test_munge_to_index_array_valid_iterable(self):
s = Sequence('')
def slices_only():
return (slice(i, i+1) for i in range(0, 10, 2))
def mixed():
return (slice(i, i+1) if i % 2 == 0 else i for i in range(10))
def unthinkable():
for i in range(10):
if i % 3 == 0:
yield slice(i, i+1)
elif i % 3 == 1:
yield i
else:
yield np.array([i], dtype=int)
for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
lambda x: pd.Series(tuple(x))):
exp = np.arange(10, dtype=int)
obs = s._munge_to_index_array(c(mixed()))
npt.assert_equal(obs, exp)
exp = np.arange(10, dtype=int)
obs = s._munge_to_index_array(c(unthinkable()))
npt.assert_equal(obs, exp)
exp = np.arange(10, step=2, dtype=int)
obs = s._munge_to_index_array(c(slices_only()))
npt.assert_equal(obs, exp)
def test_munge_to_index_array_invalid_iterable(self):
s = Sequence('')
def bad1():
yield "r"
yield [1, 2, 3]
def bad2():
yield 1
yield 'str'
def bad3():
yield False
yield True
yield 2
def bad4():
yield np.array([False, True])
yield slice(2, 5)
for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
lambda x: pd.Series(tuple(x))):
with self.assertRaises(TypeError):
s._munge_to_index_array(bad1())
with self.assertRaises(TypeError):
s._munge_to_index_array(bad2())
with self.assertRaises(TypeError):
s._munge_to_index_array(bad3())
with self.assertRaises(TypeError):
s._munge_to_index_array(bad4())
if __name__ == "__main__":
main()
|
|
from django import forms
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
import pytz
from rest_framework import fields, permissions, serializers
from rest_framework.authentication import SessionAuthentication, CSRFCheck
from rest_framework.exceptions import APIException, AuthenticationFailed
from rest_framework.filters import BaseFilterBackend
from tower import ugettext as _
from kitsune.sumo.utils import uselocale
from kitsune.sumo.urlresolvers import get_best_language
from kitsune.users.models import Profile
class GenericAPIException(APIException):
"""Generic Exception, since DRF doesn't provide one.
DRF allows views to throw subclasses of APIException to cause non-200
status codes to be sent back to API consumers. These subclasses are
expected to have a ``status_code`` and ``detail`` property.
DRF doesn't give a generic way to make an object with these properties.
Instead you are expected to make many specific subclasses and make
instances of those. That seemed lame, so this class creates instances
instead of lots of subclasses.
"""
def __init__(self, status_code, detail, **kwargs):
self.status_code = status_code
self.detail = detail
for key, val in kwargs.items():
setattr(self, key, val)
class LocaleNegotiationMixin(object):
"""A mixin for CBV to select a locale based on Accept-Language headers."""
def get_locale(self):
accept_language = self.request.META.get('HTTP_ACCEPT_LANGUAGE', '')
lang = get_best_language(accept_language)
return lang or settings.WIKI_DEFAULT_LANGUAGE
def get_serializer_context(self):
context = super(LocaleNegotiationMixin, self).get_serializer_context()
context['locale'] = self.get_locale()
return context
class LocalizedCharField(fields.CharField):
"""
This is a field for DRF that localizes itself based on the current locale.
There should be a locale field on the serialization context. If the view
that uses this serializer subclasses LocaleNegotiationMixin, the context
will get a locale field automatically.
A serializer can use this field like this:
class FooSerializer(serializers.ModelSerializer):
title = LocalizedCharField(source='title',
l10n_context='DB: bar.Foo.title')
class Meta:
model = Foo
fields = ('id', 'title')
:args l10n_context: Set the localization context, mainly for fields that
come from the DB.
"""
type_name = 'LocalizedCharField'
type_label = 'string'
form_field_class = forms.CharField
read_only = True
def __init__(self, l10n_context=None, **kwargs):
self.l10n_context = l10n_context
super(LocalizedCharField, self).__init__(**kwargs)
def to_native(self, value):
value = super(LocalizedCharField, self).from_native(value)
locale = self.context.get('locale')
if locale is None:
return value
with uselocale(locale):
return _(value, self.l10n_context)
class SplitSourceField(fields.Field):
"""
This allows reading from one field and writing to another under the same
name in the serialized/deserialized data.
A serializer can use this field like this:
class FooSerializer(serializers.ModelSerializer):
content = SplitSourceField(read_source='content_parsed', write_source='content')
class Meta:
model = Foo
fields = ('id', 'content')
The normal field parameter ``source`` is no longer allowed. Instead use
``read_source`` and ``write_source``.
:args read_source: The field to read from for serialization.
:args write_source: The field to write to for deserialization.
"""
type_name = 'SplitSourceField'
read_only = False
def __init__(self, write_source=None, read_source=None, source=None, **kwargs):
if source is not None:
raise ValueError("Use read_source and write_source with SplitSourceField.")
self.read_source = read_source
self.write_source = write_source
super(SplitSourceField, self).__init__(**kwargs)
def get_value(self, dictionary):
"""
Given the *incoming* primitive data, return the value for this field
that should be validated and transformed to a native value.
"""
# NB: This doesn't support reading from HTML input, unlike normal fields.
return dictionary.get(self.write_source, fields.empty)
def get_attribute(self, instance):
"""
Given the *outgoing* object instance, return the primitive value
that should be used for this field.
"""
# NB: This is a lot less robust than the DRF original, but it
# should be fine for our purposes.
return getattr(instance, self.read_source)
def to_representation(self, obj):
return obj
def to_internal_value(self, data):
return data
class DateTimeUTCField(fields.DateTimeField):
"""
This is like DateTimeField, except it always outputs in UTC.
"""
def to_representation(self, value):
if value.tzinfo is None:
default_tzinfo = pytz.timezone(settings.TIME_ZONE)
value = default_tzinfo.localize(value)
value = value.astimezone(pytz.utc)
return super(DateTimeUTCField, self).to_representation(value)
class _IDSerializer(serializers.Serializer):
id = fields.Field(source='pk')
class Meta:
fields = ('id', )
class GenericRelatedField(fields.ReadOnlyField):
"""
Serializes GenericForeignKey relations using specified type of serializer.
"""
def __init__(self, serializer_type='fk', **kwargs):
self.serializer_type = serializer_type
super(GenericRelatedField, self).__init__(**kwargs)
def to_representation(self, value):
content_type = ContentType.objects.get_for_model(value)
data = {'type': content_type.model}
if isinstance(value, User):
value = Profile.objects.get(user=value)
if hasattr(value, 'get_serializer'):
SerializerClass = value.get_serializer(self.serializer_type)
else:
SerializerClass = _IDSerializer
data.update(SerializerClass(instance=value).data)
return data
class InequalityFilterBackend(BaseFilterBackend):
"""A filter backend that allows for field__gt style filtering."""
def filter_queryset(self, request, queryset, view):
filter_fields = getattr(view, 'filter_fields', [])
for key, value in request.QUERY_PARAMS.items():
splits = key.split('__')
if len(splits) != 2:
continue
field, opname = splits
if field not in filter_fields:
continue
op = getattr(self, 'op_' + opname, None)
if op:
queryset = op(queryset, field, value)
return queryset
def op_gt(self, queryset, key, value):
arg = {key + '__gt': value}
return queryset.filter(**arg)
def op_lt(self, queryset, key, value):
arg = {key + '__lt': value}
return queryset.filter(**arg)
def op_gte(self, queryset, key, value):
arg = {key + '__gte': value}
return queryset.filter(**arg)
def op_lte(self, queryset, key, value):
arg = {key + '__lte': value}
return queryset.filter(**arg)
class GenericDjangoPermission(permissions.BasePermission):
@property
def permissions(self):
raise NotImplemented
def has_permission(self, request, view):
u = request.user
not_inactive = u.is_anonymous() or u.is_active
return not_inactive and all(u.has_perm(p) for p in self.permissions)
class OnlyCreatorEdits(permissions.BasePermission):
"""
Only allow objects to be edited and deleted by their creators.
TODO: This should be tied to user and object permissions better, but
for now this is a bandaid.
"""
def has_object_permission(self, request, view, obj):
# SAFE_METHODS is a list containing all the read-only methods.
if request.method in permissions.SAFE_METHODS:
return True
# If flow gets here, the method will modify something.
user = getattr(request, 'user', None)
creator = getattr(obj, 'creator', None)
# Only the creator can modify things.
return user == creator
PermissionListSerializer = None
def PermissionMod(field, permissions):
"""
Takes a class and modifies it to conditionally hide based on permissions.
"""
class Modded(field):
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = field()
return PermissionMod(serializers.ListSerializer, permissions)(*args, **kwargs)
def get_attribute(self, instance):
if self.check_permissions(instance):
return super(Modded, self).get_attribute(instance)
else:
raise fields.SkipField()
def check_permissions(self, obj):
request = self.context.get('request')
for Perm in permissions:
perm = Perm()
if not perm.has_permission(request, self):
return False
if not perm.has_object_permission(request, self, obj):
return False
return True
return Modded
class InactiveSessionAuthentication(SessionAuthentication):
"""
Use Django's session framework for authentication.
Allows inactive users.
"""
def authenticate(self, request):
"""
Returns a `User` if the request session currently has a logged in user.
Otherwise returns `None`.
"""
# Get the underlying HttpRequest object
request = request._request
user = getattr(request, 'user', None)
# Unauthenticated, CSRF validation not required
if not user or user.is_anonymous():
return None
self.enforce_csrf(request)
# CSRF passed with authenticated user
return (user, None)
def enforce_csrf(self, request):
"""
Enforce CSRF validation for session based authentication.
"""
reason = CSRFCheck().process_view(request, None, (), {})
if reason:
# CSRF failed, bail with explicit error message
raise AuthenticationFailed('CSRF Failed: %s' % reason)
class ImageUrlField(fields.ImageField):
"""An image field that serializes to a url instead of a file name.
Additionally, if there is no file associated with this image, this
returns ``None`` instead of erroring.
"""
def to_native(self, value):
try:
return value.url
except ValueError:
return None
|
|
from __future__ import absolute_import, division, print_function
import atexit
import os
import logging
import socket
import select
import signal
import platform
import requests
import socket
from subprocess import Popen, PIPE, call
import struct
import time
import weakref
from .conf import get_config, DEFAULT_KNIT_HOME
from .env import CondaCreator
from .exceptions import KnitException, YARNException
from .yarn_api import YARNAPI
from .utils import triple_slash
from py4j.protocol import Py4JError
from py4j.java_gateway import JavaGateway, GatewayClient
from py4j.java_collections import MapConverter, ListConverter
logger = logging.getLogger(__name__)
on_windows = platform.system() == "Windows"
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
class Knit(object):
"""
Connection to HDFS/YARN. Launches a single "application" master with a
number of worker containers.
Parameter definition (nn, nn_port, rm, rm_port): those parameters given
to __init__ take priority. If autodetect=True, Knit will attempt to fill
out the others from system configuration files; fallback values are provided
if this fails.
Parameters
----------
nn: str
Namenode hostname/ip
nn_port: int
Namenode Port (default: 9000)
rm: str
Resource Manager hostname
rm_port: int
Resource Manager port (default: 8088)
lang: str
Environment variable language setting, required for ``click`` to
successfully read from the shell. (default: 'C.UTF-8')
user: str ('root')
The user name from point of view of HDFS. This is only used when
checking for the existence of knit files on HDFS, since they are stored
in the user's home directory.
hdfs_home: str
Explicit location of a writable directory in HDFS to store files.
Defaults to the user 'home': hdfs://user/<username>/
replication_factor: int (3)
replication factor for files upload to HDFS (default: 3)
autodetect: bool
Autodetect configuration
upload_always: bool(=False)
If True, will upload conda environment zip always; otherwise will
attempt to check for the file's existence in HDFS (using the hdfs3
library, if present) and not upload if that matches the existing local
file in size and is newer.
knit_home: str
Location of knit's jar
hdfs: HDFileSystem instance or None
Used for checking files in HDFS.
Note: for now, only one Knit instance can live in a single process because
of how py4j interfaces with the JVM.
Examples
--------
>>> k = Knit()
>>> app_id = k.start('sleep 100', num_containers=5, memory=1024)
"""
JAR_FILE = "knit-1.0-SNAPSHOT.jar"
JAVA_APP = "io.continuum.knit.Client"
_instances = weakref.WeakSet()
def __init__(self, autodetect=True, upload_always=False, hdfs_home=None,
knit_home=DEFAULT_KNIT_HOME, hdfs=None, pars=None,
**kwargs):
self.conf = get_config(autodetect=autodetect, pars=pars, **kwargs)
gateway_path = self.conf.get('gateway_path', '')
kerb = self.conf.get(
'hadoop.http.authentication.type', '') == 'kerberos'
if not kerb and self.conf.get('hadoop.http.authentication.simple.'
'anonymous.allowed', '') == 'false':
if 'password' not in self.conf:
raise KnitException('Simple auth required: please supply'
'`password=`.')
pw = self.conf['password']
else:
pw = None
if self.conf.get('yarn.http.policy', '').upper() == "HTTPS_ONLY":
self.yarn_api = YARNAPI(self.conf['rm'], self.conf['rm_port_https'],
scheme='https', gateway_path=gateway_path,
kerberos=kerb, username=self.conf['user'],
password=pw)
else:
self.yarn_api = YARNAPI(self.conf['rm'], self.conf['rm_port'],
gateway_path=gateway_path,
kerberos=kerb, username=self.conf['user'],
password=pw)
self.KNIT_HOME = knit_home
self.upload_always = upload_always
self.lang = self.conf.get('lang', 'C.UTF-8')
self.hdfs_home = hdfs_home or self.conf.get(
'dfs.user.home.base.dir', '/user/' + self.conf['user'])
self.client_gateway = None
# must set KNIT_HOME ENV for YARN App
os.environ['KNIT_HOME'] = self.KNIT_HOME
os.environ['REPLICATION_FACTOR'] = str(self.conf['replication_factor'])
os.environ['HDFS_KNIT_DIR'] = self.hdfs_home
self.client = None
self.master = None
self.app_id = None
self.proc = None
self.hdfs = hdfs
self._instances.add(self)
def __repr__(self):
return "Knit<RM={0}:{1}>".format(self.conf['rm'], self.conf['rm_port'])
@property
def JAR_FILE_PATH(self):
return os.path.join(self.KNIT_HOME, self.JAR_FILE)
def _pre_flight_checks(self, num_containers, virtual_cores, memory,
files, queue):
"""Some checks to see if app is possible to schedule
This depends on YARN's allocations reporting, which do not necessarily
reflect the true amount of resources on the cluster. Other failure
modes, such as full disc, are not likely to be caught here.
"""
try:
# check response from RM
met = self.yarn_api.cluster_metrics()
except YARNException:
raise
except requests.RequestException as e:
if isinstance(e, requests.Timeout):
m = 'Connection timeout'
else:
m = 'Connection error'
raise YARNException(m + ' when talking to the '
'YARN REST server at {}. This can mean that '
'the server/port values are wrong, that you '
'are using the wrong protocol (http/https) or '
'that you need to route through a proxy.'
''.format(self.yarn_api.url))
if met['activeNodes'] < 1:
raise KnitException('No name-nodes active')
# What if we simply don't have the full yarn-site.xml available?
mmin = int(self.conf.get('yarn.scheduler.minimum-allocation-mb', 1024))
# 300MB default allocation for AM in client.scala
mem = (max(300, mmin) + num_containers * max(memory, mmin))
if met['availableMB'] < mem:
raise KnitException('Memory estimate for app (%iMB) exceeds cluster'
' capacity (%iMB)' % (mem, met['availableMB']))
c = 1 + num_containers * virtual_cores
if met['availableVirtualCores'] < c:
raise KnitException('vCPU request for app (%i) exceeds cluster capa'
'city (%i)' % (c, met['availableVirtualCores']))
nodes = self.yarn_api.nodes()
if all((max(mmin, memory) > n['availMemoryMB']) and
(virtual_cores > n['availableVirtualCores'])
for n in nodes):
# cannot test without multiple nodemanagers
raise KnitException('No NodeManager can fit any single container')
if self.hdfs:
df = self.hdfs.df()
cap = (df['capacity'] - df['used']) // 2**20
fs = [self.JAR_FILE_PATH] + [f for f in files
if not f.startswith('hdfs://')]
need = sum(os.stat(f).st_size for f in fs) // 2**20
# NB: if replication > 1 this might not be enough
if cap < need:
raise KnitException('HDFS space requirement (%iMB) exceeds'
'capacity (%iMB)' % (need, cap))
def start(self, cmd, num_containers=1, virtual_cores=1, memory=128,
files=None, envvars=None, app_name="knit", queue="default",
checks=True):
"""
Method to start a yarn app with a distributed shell
Parameters
----------
cmd: str
command to run in each yarn container
num_containers: int
Number of containers YARN should request (default: 1)
* A container should be requested with the number of cores it can
saturate, i.e.
* the average number of threads it expects to have runnable at a
time.
virtual_cores: int
Number of virtual cores per container (default: 1)
* A node's capacity should be configured with virtual cores equal to
* its number of physical cores.
memory: int
Memory per container (default: 128)
* The unit for memory is megabytes.
files: list
list of files to be include in each container. If starting with
`hdfs://`, assume these already exist in HDFS and don't need
uploading. Otherwise, if hdfs3 is installed, existence of the
file on HDFS will be checked to see if upload is needed.
Files ending with `.zip` will be decompressed in the
container before launch as a directory with the same name as the
file: if myarc.zip contains files inside a directory stuff/, to
the container they will appear at ./myarc.zip/stuff/* .
envvars: dict
Environment variables to pass to AM *and* workers. Both keys
and values must be strings only.
app_name: String
Application name shown in YARN (default: "knit")
queue: String
RM Queue to use while scheduling (default: "default")
checks: bool=True
Whether to run pre-flight checks before submitting app to YARN
Returns
-------
applicationId: str
A yarn application ID string
"""
files = files or []
envvars = envvars or {'KNIT_LANG': self.lang}
for k, v in envvars.items():
if not isinstance(k, str) or not isinstance(v, str):
raise ValueError('Environment must contain only strings (%s)'
% ((k, v),))
if self.app_id:
raise ValueError('Already started')
if not isinstance(memory, int):
raise KnitException("Memory argument must be an integer")
if files:
if not isinstance(files, list):
raise KnitException("File argument must be a list of strings")
if checks:
self._pre_flight_checks(num_containers, virtual_cores, memory,
files, queue)
# From https://github.com/apache/spark/blob/d83c2f9f0b08d6d5d369d9fae04cdb15448e7f0d/python/pyspark/java_gateway.py
# thank you spark
## Socket for PythonGatewayServer to communicate its port to us
callback_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
callback_socket.bind(('127.0.0.1', 0))
callback_socket.listen(1)
callback_host, callback_port = callback_socket.getsockname()
if not os.path.exists(self.JAR_FILE_PATH):
raise KnitException('JAR file %s does not exists - please build'
' with maven' % self.JAR_FILE_PATH)
args = ["hadoop", "jar", self.JAR_FILE_PATH, self.JAVA_APP,
"--callbackHost", str(callback_host), "--callbackPort",
str(callback_port)]
## Launch the Java gateway.
# We open a pipe to stdin so that the Java gateway can die when the pipe is broken
if not on_windows:
# Don't send ctrl-c / SIGINT to the Java gateway:
def preexec_func():
signal.signal(signal.SIGINT, signal.SIG_IGN)
proc = Popen(args, stdin=PIPE, preexec_fn=preexec_func)
else:
# preexec_fn not supported on Windows
proc = Popen(args, stdin=PIPE)
self.proc = proc
gateway_port = None
# We use select() here in order to avoid blocking indefinitely if the
# subprocess dies before connecting
long_timeout = 60
while gateway_port is None and proc.poll() is None and long_timeout > 0:
timeout = 1 # (seconds)
readable, _, _ = select.select([callback_socket], [], [], timeout)
if callback_socket in readable:
gateway_connection = callback_socket.accept()[0]
# Determine which ephemeral port the server started on:
gateway_port = read_int(gateway_connection.makefile(mode="rb"))
gateway_connection.close()
callback_socket.close()
long_timeout -= 1
if gateway_port is None:
raise Exception("The JVM Knit client failed to launch successfully."
" Check that java is installed and the Knit JAR"
" file exists.")
gateway = JavaGateway(GatewayClient(port=gateway_port),
auto_convert=True)
self.client = gateway.entry_point
self.client_gateway = gateway
logger.debug("Files submitted: %s" % files)
upfiles = [f for f in files if (not f.startswith('hdfs://')
and self.check_needs_upload(f))]
logger.debug("Files to upload: %s" % upfiles)
jfiles = ListConverter().convert(upfiles, gateway._gateway_client)
jenv = MapConverter().convert(envvars, gateway._gateway_client)
self.app_id = self.client.start(jfiles, jenv, app_name, queue)
## Wait for AM to appear
long_timeout = 100
master_rpcport = -1
while master_rpcport == -1:
master_rpcport = self.client.masterRPCPort()
time.sleep(0.2)
long_timeout -= 0.2
if long_timeout < 0:
break
if master_rpcport in [-1, 'N/A']:
raise Exception(
"""The application master JVM process failed to report back. This can mean:
- that the YARN cluster cannot scheduler adequate resources - check
k.yarn_api.cluster_metrics() and other diagnostic methods;
- that the ApplicationMaster crashed - check the application logs, k.logs();
- that the cluster is otherwise unhealthy - check the RM and NN logs
(use k.yarn_api.system_logs() to find these on a one-node system
""")
master_rpchost = self.client.masterRPCHost()
gateway = JavaGateway(GatewayClient(
address=master_rpchost, port=master_rpcport), auto_convert=True)
self.master = gateway.entry_point
rfiles = [triple_slash(f) if f.startswith('hdfs://') else
'/'.join(['hdfs://', self.hdfs_home, '.knitDeps',
os.path.basename(f)])
for f in files]
logger.debug("Resource files: %s" % rfiles)
jfiles = ListConverter().convert(rfiles, gateway._gateway_client)
jenv = MapConverter().convert(envvars, gateway._gateway_client)
self.master.init(jfiles, jenv, cmd, num_containers,
virtual_cores, memory)
return self.app_id
def add_containers(self, num_containers=1, virtual_cores=1, memory=128):
"""
Method to add containers to an already running yarn app
num_containers: int
Number of containers YARN should request (default: 1)
* A container should be requested with the number of cores it can
saturate, i.e.
* the average number of threads it expects to have runnable at a
time.
virtual_cores: int
Number of virtual cores per container (default: 1)
* A node's capacity should be configured with virtual cores equal to
* its number of physical cores.
memory: int
Memory per container (default: 128)
* The unit for memory is megabytes.
"""
self.master.addContainers(num_containers, virtual_cores, memory)
def get_containers(self):
"""
Method to return active containers
Returns
-------
container_list: List
List of dicts with each container's details
"""
if self.app_id:
return self.yarn_api.app_containers(self.app_id)
else:
raise KnitException('Cannot get containers, app has not started')
def get_container_statuses(self):
"""Get status info for each container
Returns dict where the values are the raw text output.
"""
return {c['id']: c['state'] for c in self.get_containers()}
def remove_containers(self, container_id):
"""
Method to remove containers from a running yarn app
Calls removeContainers in ApplicationMaster.scala
Be careful removing the ...0001 container. This is where the
applicationMaster is running
Parameters
----------
container_id: str
Returns
-------
None
"""
if container_id not in self.get_container_statuses():
raise KnitException('Attempt to remove container nor owned by this'
'app: ' + container_id)
self.master.removeContainer(str(container_id))
@staticmethod
def create_env(env_name, packages=None, remove=False,
channels=None, conda_pars=None):
"""
Create zipped directory of a conda environment
Parameters
----------
env_name : str
packages : list
conda_root: str
Location of conda installation. If None, will download miniconda and
produce an isolated environment.
remove : bool
remove possible conda environment before creating
channels : list of str
conda channels to use (defaults to your conda setup)
conda_pars: dict
Further pars to pass to CondaCreator
Returns
-------
path: str
path to zipped conda environment
Examples
--------
>>> k = Knit()
>>> pkg_path = k.create_env(env_name='dev',
... packages=['distributed', 'dask', 'pandas'])
"""
channels = channels or []
c = CondaCreator(channels=channels, **(conda_pars or {}))
return c.create_env(env_name, packages=packages, remove=remove)
def logs(self, shell=False):
"""
Collect logs from RM (if running)
With shell=True, collect logs from HDFS after job completion
Parameters
----------
shell: bool
Shell out to yarn CLI (default False)
Returns
-------
log: dictionary
logs from each container (when possible)
"""
if self.app_id:
return self.yarn_api.logs(self.app_id, shell=shell)
else:
raise KnitException('Cannot get logs, app not started')
def print_logs(self, shell=False):
"""print out a more console-friendly version of logs()"""
for l, v in self.logs(shell).items():
print('\n### Container ', l, ', id ', v.get('id', 'None'), ' ###\n')
for part in ['stdout', 'stderr']:
print('##', part, '##')
print(v[part])
def wait_for_completion(self, timeout=10):
"""
Wait for completion of the yarn application
Returns
-------
bool:
True if successful, False otherwise
"""
cur_status = self.runtime_status()
while cur_status not in ['FAILED', 'KILLED', 'FINISHED']:
time.sleep(0.2)
timeout -= 0.2
cur_status = self.runtime_status()
if timeout < 0:
break
return timeout > 0
def kill(self):
"""
Method to kill a yarn application
Returns
-------
bool:
True if successful, False otherwise.
"""
if self.client is None:
# never started, can't stop - should be warning or exception?
return False
try:
self.client.kill()
except Py4JError:
logger.debug("Error while attempting to kill", exc_info=1)
# fallback
self.yarn_api.kill(self.app_id)
if self.proc is not None:
self.client_gateway.shutdown()
if on_windows:
call(["cmd", "/c", "taskkill", "/f", "/t", "/pid",
str(self.proc.pid)])
self.proc.terminate()
self.proc.communicate()
self.proc = None
self.client = None
out = self.runtime_status() == 'KILLED'
return out
def __del__(self):
if self.app_id is not None:
try:
self.kill()
except:
pass
self.app_id = None
def status(self):
""" Get status of an application
Returns
-------
log: dictionary
status of application
"""
if self.app_id:
return self.yarn_api.apps_info(self.app_id)
else:
raise KnitException("Cannot get status, app not started")
def runtime_status(self):
""" Get runtime status of an application
Returns
-------
str:
status of application
"""
try:
return self.yarn_api.state(self.app_id)
except:
return "NONE"
def list_envs(self):
"""List knit conda environments already in HDFS
Looks in staging directory for zip-files
Returns: list of dict
Details for each zip-file."""
if self.hdfs:
files = self.hdfs.ls(self.hdfs_home + '/.knitDeps/', True)
return [f for f in files if f['name'].endswith('.zip')]
else:
raise ImportError('Set the `hdfs` attribute to be able to list'
'environments.')
def check_needs_upload(self, path):
"""Upload is needed if file does not exist in HDFS or is older"""
if self.upload_always:
return True
fn = '/'.join([self.hdfs_home, '.knitDeps', os.path.basename(path)])
if self.hdfs and self.hdfs.exists(fn):
st = os.stat(path)
size = st.st_size
t = st.st_mtime
info = self.hdfs.info(fn)
if info['size'] == size and t < info['last_mod']:
return False
else:
return True
else:
return True
@classmethod
def _cleanup(cls):
# called on program exit to destroy lingering connections/apps
for instance in cls._instances:
instance.kill()
atexit.register(Knit._cleanup)
|
|
import re
import textwrap
import logging
import MySQLdb
from MySQLdb import OperationalError
from MySQLdb.constants.CLIENT import INTERACTIVE
__all__ = [
'connect',
'MySQLClient',
'OperationalError',
'ProgrammingError',
'DatabaseError'
]
LOGGER = logging.getLogger(__name__)
class MySQLClient(object):
def __init__(self, **kwargs):
"""
Initialize a MySQLClient connections. Keyword arguments are passed
directly to MySQLdb.connect(). See MySQLdb for all known arguments.
Possible Arguments:
host -- Name of host to connect to.
user -- User to authenticate as.
passwd -- Password to authenticate with.
db -- Database to use.
port -- TCP port of MySQL server.
unix_socket -- Location of UNIX socket.
"""
self._conn = MySQLdb.connect(**kwargs)
def quote_id(self, *args):
"""
quote_id(self, *args)
return a qualified list of quoted schema components
['test','bar`foo', 'column'] => "`test`.`bar``foo`.`columns`"
"""
if not args: return None
return '.'.join(map(lambda x: '`%s`' % x.replace('`','``'), args))
def unquote_id(self, *args):
result = []
for arg in args:
arg = arg[1:-1].replace('``', '`')
result.append(arg)
return result
def quote(self, *args):
"""
quote(self, *args)
return a comma delimited string with each element in args quoted
['a', '\'b', 'c'] => "'a','''b','c'"
"""
if not args: return None
return ','.join(map(lambda x: "'%s'" % x.replace("'","''"), args))
def show_databases(self):
"""
Return a list of databases.
"""
cursor = self.cursor()
cursor.execute('SHOW DATABASES')
result = [db for db, in cursor]
cursor.close()
return result
def show_tables(self, db):
"""
Return a list of tables for 'db'.
Arguments:
db -- The database name.
"""
cursor = self.cursor()
# probably should filter views
cursor.execute('SHOW TABLES FROM %s' % self.quote_id(db))
result = [tbl for tbl, in cursor]
cursor.close()
return result
def show_table_status(self, db):
"""
Return a the table status for 'db'. Returns an iterable generator
object.
Arguments:
db -- The database name.
"""
cursor = self.cursor()
cursor.execute('SHOW TABLE STATUS FROM %s' % self.quote_id(db))
hdr = [d[0].lower() for d in cursor.description]
while True:
row = cursor.fetchone()
if not row:
break
tbl_status = dict(zip(hdr, row))
yield tbl_status
cursor.close()
def show_variable(self, name, session_only=False):
"""
Returns the result of SHOW GLOBAL VARIABLIES LIKE '${name}' without
any glob wild cards (only returns a single result (string)).
Arguments:
name -- Name of the 'like' variable modifier
session_only -- Boolean. Only show session variables, rather than
global.
"""
cursor = self.cursor()
if session_only:
cursor.execute('SHOW SESSION VARIABLES LIKE %s', name)
else:
cursor.execute('SHOW GLOBAL VARIABLES LIKE %s', name)
try:
_, value = cursor.fetchone()
except TypeError, e:
value = None
cursor.close()
return value
def show_variables_like(self, name, session_only=False):
"""
Returns the result of SHOW GLOBAL VARIABLIES LIKE '%${name}%' with
the glob wild card to return all matching variables.
Arguments:
name -- Name of the 'like' variable modifier
session_only -- Boolean. Only show session variables, rather than
global.
"""
cursor = self.cursor()
if session_only:
cursor.execute('SHOW SESSION VARIABLES LIKE %s', name)
else:
cursor.execute('SHOW GLOBAL VARIABLES LIKE %s', name)
variables = {}
for row in cursor.fetchall():
variables[row[0]] = row[1]
cursor.close()
return variables
def set_variable(self, name, value, session=True):
"""
Set a variable in the running server
"""
cursor = self.cursor()
name = self.quote_id(name)
sql = 'SET ' + ['GLOBAL', 'SESSION'][session] + ' ' + name + ' = %s'
cursor.execute(sql, value)
if not session:
LOGGER.debug("GLOBAL variable set: %s = %s" % (name, value))
cursor.close()
def set_wait_timeout(self, value):
"""
Change the idle timeout for this connection. This method is
deprecated, use MySQLClient.set_variable.
If this connection is flagged as interactive interactive_timeout
will be set, otherwise wait_timeout is set
"""
if self.client_flag & INTERACTIVE:
self.set_variable('interactive_timeout', value)
else:
self.set_variable('wait_timeout', value)
def show_indexes(self, db, tbl):
"""
Returns a dictionary of index for the database
and table specified
"""
cursor = self.cursor()
sql = "SHOW INDEXES FROM %s" % self.quote_id(db, tbl)
cursor.execute(sql)
hdr = [d[0].lower() for d in cursor.description]
info = {}
for row in cursor.fetchall():
row = dict(zip(hdr, row))
info.setdefault(row.get('key_name'), [])\
.append(row.get('column_name'))
cursor.close()
return info
def flush_logs(self):
"""
Runs FLUSH LOGS
"""
cursor = self.cursor()
LOGGER.debug("Query: FLUSH LOGS executed.")
cursor.execute('FLUSH LOGS')
cursor.close()
def flush_tables(self, table_list=None):
"""
Runs FLUSH TABLES, by default flushes all tables. Only flush specific
tables by passing a list of database.table names.
"""
if table_list:
for db_and_table in table_list:
db, table = db_and_table.split('.')
cursor = self.cursor()
LOGGER.debug('Query: FLUSH TABLES %s.%s' % (db, table))
cursor.execute('FLUSH TABLES %s.%s' % (db, table))
else:
cursor = self.cursor()
LOGGER.debug('Query: FLUSH TABLES')
cursor.execute('FLUSH TABLES')
cursor.close()
def flush_tables_with_read_lock(self, extra_flush=False):
"""
Runs FLUSH TABLES WITH READ LOCK
"""
cursor = self.cursor()
if extra_flush:
LOGGER.debug('Query: FLUSH TABLES')
cursor.execute('FLUSH TABLES')
LOGGER.debug('Query: FLUSH TABLES WITH READ LOCK')
cursor.execute('FLUSH TABLES WITH READ LOCK')
cursor.close()
def lock_tables(self, table_list=None):
if not table_list:
return
query = 'LOCK TABLES ' + ' READ LOCAL, '.join(table_list)\
+ ' READ LOCAL'
LOGGER.debug("Query: %s", query)
cursor = self.cursor()
cursor.execute(query)
cursor.close()
def unlock_tables(self):
cursor = self.cursor()
LOGGER.debug('Query: UNLOCK TABLES')
cursor.execute('UNLOCK TABLES')
cursor.close()
def walk_databases(self):
for db in self.show_databases():
yield db
def walk_tables(self, dbinclude=None):
"""
walk_tables(self, include=None, exclude=None)
Walks over the tables in the databases in include and returns
(db, tbl_status) tuples where tbl_status is the dictionary from
a SHOW TABLE STATUS row.
if include is None, include all databases
except those in exclude
otherwise, only visit tables in the include list
except those also in the exclude list
"""
for db in self.show_databases():
if db not in (dbinclude or ()):
continue
for tbl_status in self.show_table_status(db):
tbl_status['db'] = db
yield tbl_status
def show_master_status(self):
cursor = self.cursor()
info = None
if cursor.execute('SHOW MASTER STATUS'):
info = cursor.fetchone()[0:2]
cursor.close()
return info
def show_slave_status(self):
cursor = self.cursor(MySQLdb.cursors.DictCursor)
info = None
cursor.execute('SHOW SLAVE STATUS')
info = cursor.fetchone()
cursor.close()
return info
def is_slave_running(self):
info = self.show_slave_status()
if not info:
return False
return (info.get('Slave_IO_Running', 'No') == 'Yes'
and info.get('Slave_SQL_Running', 'No') == 'Yes')
def start_slave(self):
cursor = self.cursor()
#FIXME: handle other warnings?
LOGGER.debug("Query: START SLAVE")
cursor.execute('START SLAVE')
cursor.close()
def stop_slave(self):
if not self.is_slave_running():
raise OperationalError("Slave is not running")
cursor = self.cursor()
cursor.execute('STOP SLAVE')
messages = cursor.messages
cursor.close()
if messages:
raise OperationalError("%s[%d]: %s" % messages[1])
def show_transactional_engines(self):
"""
show_transaction_engines(self)
returns a list of engines with transactional capabilities suitable for
mysqldump's --single-transaction flag
"""
if self.server_version() < (5, 1, 2):
# No access to an engines transactional status
# before 5.1.2, so statically code the ones we
# know about
return ['innodb', 'berkelydb']
else:
cursor = self.cursor()
cursor.execute("""SELECT Engine
FROM INFORMATION_SCHEMA.ENGINES
WHERE TRANSACTIONS = 'YES'""")
result = [eng[0].lower() for eng in cursor.fetchall()]
cursor.close()
return result
def server_version(self):
"""
server_version(self)
returns a numeric tuple: major, minor, revision versions (respectively)
"""
version = self.get_server_info()
m = re.match(r'^(\d+)\.(\d+)\.(\d+)', version)
if m:
return tuple(map(int, m.groups()))
else:
# TODO: make this prettier
raise OperationalError("Could not match server version")
def is_transactional(self, engine):
if not engine:
return False
if not hasattr(self, '_txn_ngn_cache'):
self._txn_ngn_cache = self.show_transactional_engines() + ['view']
return engine.lower() in self._txn_ngn_cache
def encode_as_filename(self, name):
if self.server_version() < (5, 1, 2):
raise OperationalError, \
"MySQLClient.encode_as_filename not compatible with MySQL < 5.1."
cursor = self.cursor()
orig_charset = self.show_variable('character_set_results',
session_only=True)
try:
self.set_variable('character_set_results',
'filename',
session=True)
cursor.execute('SELECT %s', name)
filename, = [x for x, in cursor]
cursor.close()
self.set_variable('character_set_results',
orig_charset,
session=True)
except OperationalError, e:
# try again just to make sure
self.set_variable('character_set_results', orig_charset, session=True)
raise OperationalError, e
return filename
def show_encoded_dbs(self):
if self.server_version() < (5, 1, 2):
raise OperationalError, \
"MySQLClient.show_encoded_dbs not compatible with MySQL < 5.1."
charset_name = self.get_character_set_info()['name']
self.set_character_set('binary')
cursor = self.cursor()
cursor.execute('''SELECT CONVERT(SCHEMA_NAME USING utf8) AS utf8_name,
CONVERT(SCHEMA_NAME USING filename) AS encoded_name
FROM INFORMATION_SCHEMA.SCHEMATA''')
result = []
for utf8_name, encoded_name in cursor:
result.append((utf8_name, encoded_name))
cursor.close()
self.set_character_set(charset_name)
return result
def run_stmt(self, sql):
cursor = self.cursor()
cursor.execute(sql)
cursor.close()
# pass through to underlying connection object
def __getattr__(self, key):
return getattr(self._conn, key)
# map standard my.cnf parameters to
# what MySQLdb.connect expects
# http://mysql-python.sourceforge.net/MySQLdb.html#mysqldb
CNF_TO_MYSQLDB = {
'user' : 'user', # same
'password' : 'passwd', # weird
'host' : 'host', # same
'port' : 'port',
'socket' : 'unix_socket',
'ssl' : 'ssl',
'compress' : 'compress'
}
def connect(**kwargs):
args = {}
for key in kwargs:
if key in CNF_TO_MYSQLDB:
args[CNF_TO_MYSQLDB[key]] = kwargs[key]
else:
LOGGER.warn("Skipping unknown parameter %s", key)
return MySQLClient(use_unicode=True, charset='utf8', **args)
|
|
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations.
There are "opportunistic" tests which allows testing against all 3 databases
(sqlite in memory, mysql, pg) in a properly configured unit test environment.
For the opportunistic testing you need to set up db's named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost. The
test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands::
| sudo -u postgres psql
| postgres=# create user openstack_citest with createdb login password
| 'openstack_citest';
| postgres=# create database openstack_citest with owner openstack_citest;
"""
import glob
import os
from migrate import UniqueConstraint
from migrate.versioning import repository
import mock
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils as oslodbutils
import sqlalchemy
from sqlalchemy.engine import reflection
import sqlalchemy.exc
from sqlalchemy.sql import null
from nova.db import migration
from nova.db.sqlalchemy import migrate_repo
from nova.db.sqlalchemy import migration as sa_migration
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import utils as db_utils
from nova import exception
from nova import test
from nova.tests import fixtures as nova_fixtures
# TODO(sdague): no tests in the nova/tests tree should inherit from
# base test classes in another library. This causes all kinds of havoc
# in these doing things incorrectly for what we need in subunit
# reporting. This is a long unwind, but should be done in the future
# and any code needed out of oslo_db should be exported / accessed as
# a fixture.
class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
test_migrations.WalkVersionsMixin):
"""Test sqlalchemy-migrate migrations."""
TIMEOUT_SCALING_FACTOR = 4
@property
def INIT_VERSION(self):
return migration.db_initial_version()
@property
def REPOSITORY(self):
return repository.Repository(
os.path.abspath(os.path.dirname(migrate_repo.__file__)))
@property
def migration_api(self):
return sa_migration.versioning_api
@property
def migrate_engine(self):
return self.engine
def setUp(self):
# NOTE(sdague): the oslo_db base test case completely
# invalidates our logging setup, we actually have to do that
# before it is called to keep this from vomitting all over our
# test output.
self.useFixture(nova_fixtures.StandardLogging())
super(NovaMigrationsCheckers, self).setUp()
# NOTE(rpodolyaka): we need to repeat the functionality of the base
# test case a bit here as this gets overridden by oslotest base test
# case and nova base test case cleanup must be the last one (as it
# deletes attributes of test case instances)
self.useFixture(nova_fixtures.Timeout(
os.environ.get('OS_TEST_TIMEOUT', 0),
self.TIMEOUT_SCALING_FACTOR))
def assertColumnExists(self, engine, table_name, column):
self.assertTrue(oslodbutils.column_exists(engine, table_name, column),
'Column %s.%s does not exist' % (table_name, column))
def assertColumnNotExists(self, engine, table_name, column):
self.assertFalse(oslodbutils.column_exists(engine, table_name, column),
'Column %s.%s should not exist' % (table_name, column))
def assertTableNotExists(self, engine, table):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, table)
def assertIndexExists(self, engine, table_name, index):
self.assertTrue(oslodbutils.index_exists(engine, table_name, index),
'Index %s on table %s does not exist' %
(index, table_name))
def assertIndexNotExists(self, engine, table_name, index):
self.assertFalse(oslodbutils.index_exists(engine, table_name, index),
'Index %s on table %s should not exist' %
(index, table_name))
def assertIndexMembers(self, engine, table, index, members):
# NOTE(johannes): Order of columns can matter. Most SQL databases
# can use the leading columns for optimizing queries that don't
# include all of the covered columns.
self.assertIndexExists(engine, table, index)
t = oslodbutils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = [c.name for c in idx.columns]
break
self.assertEqual(members, index_columns)
# Implementations for ModelsMigrationsSync
def db_sync(self, engine):
with mock.patch.object(sa_migration, 'get_engine',
return_value=engine):
sa_migration.db_sync()
def get_engine(self, context=None):
return self.migrate_engine
def get_metadata(self):
return models.BASE.metadata
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table':
# migrate_version is a sqlalchemy-migrate control table and
# isn't included in the model. shadow_* are generated from
# the model and have their own tests to ensure they don't
# drift.
if name == 'migrate_version' or name.startswith('shadow_'):
return False
return True
def _skippable_migrations(self):
special = [
216, # Havana
272, # NOOP migration due to revert
]
havana_placeholders = list(range(217, 227))
icehouse_placeholders = list(range(235, 244))
juno_placeholders = list(range(255, 265))
kilo_placeholders = list(range(281, 291))
liberty_placeholders = list(range(303, 313))
mitaka_placeholders = list(range(320, 330))
newton_placeholders = list(range(335, 345))
ocata_placeholders = list(range(348, 358))
return (special +
havana_placeholders +
icehouse_placeholders +
juno_placeholders +
kilo_placeholders +
liberty_placeholders +
mitaka_placeholders +
newton_placeholders +
ocata_placeholders)
def migrate_up(self, version, with_data=False):
if with_data:
check = getattr(self, "_check_%03d" % version, None)
if version not in self._skippable_migrations():
self.assertIsNotNone(check,
('DB Migration %i does not have a '
'test. Please add one!') % version)
# NOTE(danms): This is a list of migrations where we allow dropping
# things. The rules for adding things here are very very specific.
# Chances are you don't meet the critera.
# Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE
exceptions = [
# 267 enforces non-nullable instance.uuid. This was mostly
# a special case because instance.uuid shouldn't be able
# to be nullable
267,
# 278 removes a FK restriction, so it's an alter operation
# that doesn't break existing users
278,
# 280 enforces non-null keypair name. This is really not
# something we should allow, but it's in the past
280,
# 292 drops completely orphaned tables with no users, so
# it can be done without affecting anything.
292,
# 346 Drops column scheduled_at from instances table since it
# is no longer used. The field value is always NULL so
# it does not affect anything.
346,
]
# Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE
# NOTE(danms): We only started requiring things be additive in
# kilo, so ignore all migrations before that point.
KILO_START = 265
if version >= KILO_START and version not in exceptions:
banned = ['Table', 'Column']
else:
banned = None
with nova_fixtures.BannedDBSchemaOperations(banned):
super(NovaMigrationsCheckers, self).migrate_up(version, with_data)
def test_walk_versions(self):
self.walk_versions(snake_walk=False, downgrade=False)
def _check_227(self, engine, data):
table = oslodbutils.get_table(engine, 'project_user_quotas')
# Insert fake_quotas with the longest resource name.
fake_quotas = {'id': 5,
'project_id': 'fake_project',
'user_id': 'fake_user',
'resource': 'injected_file_content_bytes',
'hard_limit': 10}
table.insert().execute(fake_quotas)
# Check we can get the longest resource name.
quota = table.select(table.c.id == 5).execute().first()
self.assertEqual(quota['resource'], 'injected_file_content_bytes')
def _check_228(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'metrics')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.metrics.type,
sqlalchemy.types.Text)
def _check_229(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'extra_resources')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.extra_resources.type,
sqlalchemy.types.Text)
def _check_230(self, engine, data):
for table_name in ['instance_actions_events',
'shadow_instance_actions_events']:
self.assertColumnExists(engine, table_name, 'host')
self.assertColumnExists(engine, table_name, 'details')
action_events = oslodbutils.get_table(engine,
'instance_actions_events')
self.assertIsInstance(action_events.c.host.type,
sqlalchemy.types.String)
self.assertIsInstance(action_events.c.details.type,
sqlalchemy.types.Text)
def _check_231(self, engine, data):
self.assertColumnExists(engine, 'instances', 'ephemeral_key_uuid')
instances = oslodbutils.get_table(engine, 'instances')
self.assertIsInstance(instances.c.ephemeral_key_uuid.type,
sqlalchemy.types.String)
self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
def _check_232(self, engine, data):
table_names = ['compute_node_stats', 'compute_nodes',
'instance_actions', 'instance_actions_events',
'instance_faults', 'migrations']
for table_name in table_names:
self.assertTableNotExists(engine, 'dump_' + table_name)
def _check_233(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'stats')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.stats.type,
sqlalchemy.types.Text)
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, 'compute_node_stats')
def _check_234(self, engine, data):
self.assertIndexMembers(engine, 'reservations',
'reservations_deleted_expire_idx',
['deleted', 'expire'])
def _check_244(self, engine, data):
volume_usage_cache = oslodbutils.get_table(
engine, 'volume_usage_cache')
self.assertEqual(64, volume_usage_cache.c.user_id.type.length)
def _pre_upgrade_245(self, engine):
# create a fake network
networks = oslodbutils.get_table(engine, 'networks')
fake_network = {'id': 1}
networks.insert().execute(fake_network)
def _check_245(self, engine, data):
networks = oslodbutils.get_table(engine, 'networks')
network = networks.select(networks.c.id == 1).execute().first()
# mtu should default to None
self.assertIsNone(network.mtu)
# dhcp_server should default to None
self.assertIsNone(network.dhcp_server)
# enable dhcp should default to true
self.assertTrue(network.enable_dhcp)
# share address should default to false
self.assertFalse(network.share_address)
def _check_246(self, engine, data):
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertEqual(1, len([fk for fk in pci_devices.foreign_keys
if fk.parent.name == 'compute_node_id']))
def _check_247(self, engine, data):
quota_usages = oslodbutils.get_table(engine, 'quota_usages')
self.assertFalse(quota_usages.c.resource.nullable)
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertTrue(pci_devices.c.deleted.nullable)
self.assertFalse(pci_devices.c.product_id.nullable)
self.assertFalse(pci_devices.c.vendor_id.nullable)
self.assertFalse(pci_devices.c.dev_type.nullable)
def _check_248(self, engine, data):
self.assertIndexMembers(engine, 'reservations',
'reservations_deleted_expire_idx',
['deleted', 'expire'])
def _check_249(self, engine, data):
# Assert that only one index exists that covers columns
# instance_uuid and device_name
bdm = oslodbutils.get_table(engine, 'block_device_mapping')
self.assertEqual(1, len([i for i in bdm.indexes
if [c.name for c in i.columns] ==
['instance_uuid', 'device_name']]))
def _check_250(self, engine, data):
self.assertTableNotExists(engine, 'instance_group_metadata')
self.assertTableNotExists(engine, 'shadow_instance_group_metadata')
def _check_251(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'numa_topology')
self.assertColumnExists(engine, 'shadow_compute_nodes',
'numa_topology')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
shadow_compute_nodes = oslodbutils.get_table(engine,
'shadow_compute_nodes')
self.assertIsInstance(compute_nodes.c.numa_topology.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type,
sqlalchemy.types.Text)
def _check_252(self, engine, data):
oslodbutils.get_table(engine, 'instance_extra')
oslodbutils.get_table(engine, 'shadow_instance_extra')
self.assertIndexMembers(engine, 'instance_extra',
'instance_extra_idx',
['instance_uuid'])
def _check_253(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'pci_requests')
self.assertColumnExists(
engine, 'shadow_instance_extra', 'pci_requests')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(engine,
'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.pci_requests.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.pci_requests.type,
sqlalchemy.types.Text)
def _check_254(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'request_id')
self.assertColumnExists(
engine, 'shadow_pci_devices', 'request_id')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.request_id.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_pci_devices.c.request_id.type,
sqlalchemy.types.String)
def _check_265(self, engine, data):
# Assert that only one index exists that covers columns
# host and deleted
instances = oslodbutils.get_table(engine, 'instances')
self.assertEqual(1, len([i for i in instances.indexes
if [c.name for c in i.columns][:2] ==
['host', 'deleted']]))
# and only one index covers host column
iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
self.assertEqual(1, len([i for i in iscsi_targets.indexes
if [c.name for c in i.columns][:1] ==
['host']]))
def _check_266(self, engine, data):
self.assertColumnExists(engine, 'tags', 'resource_id')
self.assertColumnExists(engine, 'tags', 'tag')
table = oslodbutils.get_table(engine, 'tags')
self.assertIsInstance(table.c.resource_id.type,
sqlalchemy.types.String)
self.assertIsInstance(table.c.tag.type,
sqlalchemy.types.String)
def _pre_upgrade_267(self, engine):
# Create a fixed_ips row with a null instance_uuid (if not already
# there) to make sure that's not deleted.
fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
fake_fixed_ip = {'id': 1}
fixed_ips.insert().execute(fake_fixed_ip)
# Create an instance record with a valid (non-null) UUID so we make
# sure we don't do something stupid and delete valid records.
instances = oslodbutils.get_table(engine, 'instances')
fake_instance = {'id': 1, 'uuid': 'fake-non-null-uuid'}
instances.insert().execute(fake_instance)
# Add a null instance_uuid entry for the volumes table
# since it doesn't have a foreign key back to the instances table.
volumes = oslodbutils.get_table(engine, 'volumes')
fake_volume = {'id': '9c3c317e-24db-4d57-9a6f-96e6d477c1da'}
volumes.insert().execute(fake_volume)
def _check_267(self, engine, data):
# Make sure the column is non-nullable and the UC exists.
fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
self.assertTrue(fixed_ips.c.instance_uuid.nullable)
fixed_ip = fixed_ips.select(fixed_ips.c.id == 1).execute().first()
self.assertIsNone(fixed_ip.instance_uuid)
instances = oslodbutils.get_table(engine, 'instances')
self.assertFalse(instances.c.uuid.nullable)
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('instances')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertIn('uniq_instances0uuid', constraint_names)
# Make sure the instances record with the valid uuid is still there.
instance = instances.select(instances.c.id == 1).execute().first()
self.assertIsNotNone(instance)
# Check that the null entry in the volumes table is still there since
# we skipped tables that don't have FK's back to the instances table.
volumes = oslodbutils.get_table(engine, 'volumes')
self.assertTrue(volumes.c.instance_uuid.nullable)
volume = fixed_ips.select(
volumes.c.id == '9c3c317e-24db-4d57-9a6f-96e6d477c1da'
).execute().first()
self.assertIsNone(volume.instance_uuid)
def test_migration_267(self):
# This is separate from test_walk_versions so we can test the case
# where there are non-null instance_uuid entries in the database which
# cause the 267 migration to fail.
engine = self.migrate_engine
self.migration_api.version_control(
engine, self.REPOSITORY, self.INIT_VERSION)
self.migration_api.upgrade(engine, self.REPOSITORY, 266)
# Create a consoles record with a null instance_uuid so
# we can test that the upgrade fails if that entry is found.
# NOTE(mriedem): We use the consoles table since that's the only table
# created in the 216 migration with a ForeignKey created on the
# instance_uuid table for sqlite.
consoles = oslodbutils.get_table(engine, 'consoles')
fake_console = {'id': 1}
consoles.insert().execute(fake_console)
# NOTE(mriedem): We handle the 267 migration where we expect to
# hit a ValidationError on the consoles table to have
# a null instance_uuid entry
ex = self.assertRaises(exception.ValidationError,
self.migration_api.upgrade,
engine, self.REPOSITORY, 267)
self.assertIn("There are 1 records in the "
"'consoles' table where the uuid or "
"instance_uuid column is NULL.",
ex.kwargs['detail'])
# Remove the consoles entry with the null instance_uuid column.
rows = consoles.delete().where(
consoles.c['instance_uuid'] == null()).execute().rowcount
self.assertEqual(1, rows)
# Now run the 267 upgrade again.
self.migration_api.upgrade(engine, self.REPOSITORY, 267)
# Make sure the consoles entry with the null instance_uuid
# was deleted.
console = consoles.select(consoles.c.id == 1).execute().first()
self.assertIsNone(console)
def _check_268(self, engine, data):
# We can only assert that the col exists, not the unique constraint
# as the engine is running sqlite
self.assertColumnExists(engine, 'compute_nodes', 'host')
self.assertColumnExists(engine, 'shadow_compute_nodes', 'host')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
shadow_compute_nodes = oslodbutils.get_table(
engine, 'shadow_compute_nodes')
self.assertIsInstance(compute_nodes.c.host.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_compute_nodes.c.host.type,
sqlalchemy.types.String)
def _check_269(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'numa_node')
self.assertColumnExists(engine, 'shadow_pci_devices', 'numa_node')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.numa_node.type,
sqlalchemy.types.Integer)
self.assertTrue(pci_devices.c.numa_node.nullable)
self.assertIsInstance(shadow_pci_devices.c.numa_node.type,
sqlalchemy.types.Integer)
self.assertTrue(shadow_pci_devices.c.numa_node.nullable)
def _check_270(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'flavor')
self.assertColumnExists(engine, 'shadow_instance_extra', 'flavor')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(
engine, 'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.flavor.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.flavor.type,
sqlalchemy.types.Text)
def _check_271(self, engine, data):
self.assertIndexMembers(engine, 'block_device_mapping',
'snapshot_id', ['snapshot_id'])
self.assertIndexMembers(engine, 'block_device_mapping',
'volume_id', ['volume_id'])
self.assertIndexMembers(engine, 'dns_domains',
'dns_domains_project_id_idx',
['project_id'])
self.assertIndexMembers(engine, 'fixed_ips',
'network_id', ['network_id'])
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_instance_uuid_fkey',
['instance_uuid'])
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_virtual_interface_id_fkey',
['virtual_interface_id'])
self.assertIndexMembers(engine, 'floating_ips',
'fixed_ip_id', ['fixed_ip_id'])
self.assertIndexMembers(engine, 'iscsi_targets',
'iscsi_targets_volume_id_fkey', ['volume_id'])
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_network_id_idx',
['network_id'])
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_instance_uuid_fkey',
['instance_uuid'])
# Removed on MySQL, never existed on other databases
self.assertIndexNotExists(engine, 'dns_domains', 'project_id')
self.assertIndexNotExists(engine, 'virtual_interfaces', 'network_id')
def _pre_upgrade_273(self, engine):
if engine.name != 'sqlite':
return
# Drop a variety of unique constraints to ensure that the script
# properly readds them back
for table_name, constraint_name in [
('compute_nodes', 'uniq_compute_nodes0'
'host0hypervisor_hostname'),
('fixed_ips', 'uniq_fixed_ips0address0deleted'),
('instance_info_caches', 'uniq_instance_info_caches0'
'instance_uuid'),
('instance_type_projects', 'uniq_instance_type_projects0'
'instance_type_id0project_id0'
'deleted'),
('pci_devices', 'uniq_pci_devices0compute_node_id0'
'address0deleted'),
('virtual_interfaces', 'uniq_virtual_interfaces0'
'address0deleted')]:
table = oslodbutils.get_table(engine, table_name)
constraints = [c for c in table.constraints
if c.name == constraint_name]
for cons in constraints:
# Need to use sqlalchemy-migrate UniqueConstraint
cons = UniqueConstraint(*[c.name for c in cons.columns],
name=cons.name,
table=table)
cons.drop()
def _check_273(self, engine, data):
for src_table, src_column, dst_table, dst_column in [
('fixed_ips', 'instance_uuid', 'instances', 'uuid'),
('block_device_mapping', 'instance_uuid', 'instances', 'uuid'),
('instance_info_caches', 'instance_uuid', 'instances', 'uuid'),
('instance_metadata', 'instance_uuid', 'instances', 'uuid'),
('instance_system_metadata', 'instance_uuid',
'instances', 'uuid'),
('instance_type_projects', 'instance_type_id',
'instance_types', 'id'),
('iscsi_targets', 'volume_id', 'volumes', 'id'),
('reservations', 'usage_id', 'quota_usages', 'id'),
('security_group_instance_association', 'instance_uuid',
'instances', 'uuid'),
('security_group_instance_association', 'security_group_id',
'security_groups', 'id'),
('virtual_interfaces', 'instance_uuid', 'instances', 'uuid'),
('compute_nodes', 'service_id', 'services', 'id'),
('instance_actions', 'instance_uuid', 'instances', 'uuid'),
('instance_faults', 'instance_uuid', 'instances', 'uuid'),
('migrations', 'instance_uuid', 'instances', 'uuid')]:
src_table = oslodbutils.get_table(engine, src_table)
fkeys = {fk.parent.name: fk.column
for fk in src_table.foreign_keys}
self.assertIn(src_column, fkeys)
self.assertEqual(fkeys[src_column].table.name, dst_table)
self.assertEqual(fkeys[src_column].name, dst_column)
def _check_274(self, engine, data):
self.assertIndexMembers(engine, 'instances',
'instances_project_id_deleted_idx',
['project_id', 'deleted'])
self.assertIndexNotExists(engine, 'instances', 'project_id')
def _pre_upgrade_275(self, engine):
# Create a keypair record so we can test that the upgrade will set
# 'ssh' as default value in the new column for the previous keypair
# entries.
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
fake_keypair = {'name': 'test-migr'}
key_pairs.insert().execute(fake_keypair)
def _check_275(self, engine, data):
self.assertColumnExists(engine, 'key_pairs', 'type')
self.assertColumnExists(engine, 'shadow_key_pairs', 'type')
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
shadow_key_pairs = oslodbutils.get_table(engine, 'shadow_key_pairs')
self.assertIsInstance(key_pairs.c.type.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_key_pairs.c.type.type,
sqlalchemy.types.String)
# Make sure the keypair entry will have the type 'ssh'
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
keypair = key_pairs.select(
key_pairs.c.name == 'test-migr').execute().first()
self.assertEqual('ssh', keypair.type)
def _check_276(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'vcpu_model')
self.assertColumnExists(engine, 'shadow_instance_extra', 'vcpu_model')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(
engine, 'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.vcpu_model.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.vcpu_model.type,
sqlalchemy.types.Text)
def _check_277(self, engine, data):
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_deleted_allocated_updated_at_idx',
['deleted', 'allocated', 'updated_at'])
def _check_278(self, engine, data):
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertEqual(0, len([fk for fk in compute_nodes.foreign_keys
if fk.parent.name == 'service_id']))
self.assertTrue(compute_nodes.c.service_id.nullable)
def _check_279(self, engine, data):
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('compute_nodes')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertNotIn('uniq_compute_nodes0host0hypervisor_hostname',
constraint_names)
self.assertIn('uniq_compute_nodes0host0hypervisor_hostname0deleted',
constraint_names)
def _check_280(self, engine, data):
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
self.assertFalse(key_pairs.c.name.nullable)
def _check_291(self, engine, data):
# NOTE(danms): This is a dummy migration that just does a consistency
# check
pass
def _check_292(self, engine, data):
self.assertTableNotExists(engine, 'iscsi_targets')
self.assertTableNotExists(engine, 'volumes')
self.assertTableNotExists(engine, 'shadow_iscsi_targets')
self.assertTableNotExists(engine, 'shadow_volumes')
def _pre_upgrade_293(self, engine):
migrations = oslodbutils.get_table(engine, 'migrations')
fake_migration = {}
migrations.insert().execute(fake_migration)
def _check_293(self, engine, data):
self.assertColumnExists(engine, 'migrations', 'migration_type')
self.assertColumnExists(engine, 'shadow_migrations', 'migration_type')
migrations = oslodbutils.get_table(engine, 'migrations')
fake_migration = migrations.select().execute().first()
self.assertIsNone(fake_migration.migration_type)
self.assertFalse(fake_migration.hidden)
def _check_294(self, engine, data):
self.assertColumnExists(engine, 'services', 'last_seen_up')
self.assertColumnExists(engine, 'shadow_services', 'last_seen_up')
services = oslodbutils.get_table(engine, 'services')
shadow_services = oslodbutils.get_table(
engine, 'shadow_services')
self.assertIsInstance(services.c.last_seen_up.type,
sqlalchemy.types.DateTime)
self.assertIsInstance(shadow_services.c.last_seen_up.type,
sqlalchemy.types.DateTime)
def _pre_upgrade_295(self, engine):
self.assertIndexNotExists(engine, 'virtual_interfaces',
'virtual_interfaces_uuid_idx')
def _check_295(self, engine, data):
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_uuid_idx', ['uuid'])
def _check_296(self, engine, data):
pass
def _check_297(self, engine, data):
self.assertColumnExists(engine, 'services', 'forced_down')
def _check_298(self, engine, data):
# NOTE(nic): This is a MySQL-specific migration, and is a no-op from
# the point-of-view of unit tests, since they use SQLite
pass
def filter_metadata_diff(self, diff):
# Overriding the parent method to decide on certain attributes
# that maybe present in the DB but not in the models.py
def removed_column(element):
# Define a whitelist of columns that would be removed from the
# DB at a later release.
column_whitelist = {'instances': ['internal_id']}
if element[0] != 'remove_column':
return False
table_name, column = element[2], element[3]
return (table_name in column_whitelist and
column.name in column_whitelist[table_name])
return [
element
for element in diff
if not removed_column(element)
]
def _check_299(self, engine, data):
self.assertColumnExists(engine, 'services', 'version')
def _check_300(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'migration_context')
def _check_301(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes',
'cpu_allocation_ratio')
self.assertColumnExists(engine, 'compute_nodes',
'ram_allocation_ratio')
def _check_302(self, engine, data):
self.assertIndexMembers(engine, 'instance_system_metadata',
'instance_uuid', ['instance_uuid'])
def _check_313(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'parent_addr')
self.assertColumnExists(engine, 'shadow_pci_devices', 'parent_addr')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.parent_addr.type,
sqlalchemy.types.String)
self.assertTrue(pci_devices.c.parent_addr.nullable)
self.assertIsInstance(shadow_pci_devices.c.parent_addr.type,
sqlalchemy.types.String)
self.assertTrue(shadow_pci_devices.c.parent_addr.nullable)
self.assertIndexMembers(engine, 'pci_devices',
'ix_pci_devices_compute_node_id_parent_addr_deleted',
['compute_node_id', 'parent_addr', 'deleted'])
def _check_314(self, engine, data):
self.assertColumnExists(engine, 'inventories', 'resource_class_id')
self.assertColumnExists(engine, 'allocations', 'resource_class_id')
self.assertColumnExists(engine, 'resource_providers', 'id')
self.assertColumnExists(engine, 'resource_providers', 'uuid')
self.assertColumnExists(engine, 'compute_nodes', 'uuid')
self.assertColumnExists(engine, 'shadow_compute_nodes', 'uuid')
self.assertIndexMembers(engine, 'allocations',
'allocations_resource_provider_class_id_idx',
['resource_provider_id', 'resource_class_id'])
def _check_315(self, engine, data):
self.assertColumnExists(engine, 'migrations',
'memory_total')
self.assertColumnExists(engine, 'migrations',
'memory_processed')
self.assertColumnExists(engine, 'migrations',
'memory_remaining')
self.assertColumnExists(engine, 'migrations',
'disk_total')
self.assertColumnExists(engine, 'migrations',
'disk_processed')
self.assertColumnExists(engine, 'migrations',
'disk_remaining')
def _check_316(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes',
'disk_allocation_ratio')
def _check_317(self, engine, data):
self.assertColumnExists(engine, 'aggregates', 'uuid')
self.assertColumnExists(engine, 'shadow_aggregates', 'uuid')
def _check_318(self, engine, data):
self.assertColumnExists(engine, 'resource_providers', 'name')
self.assertColumnExists(engine, 'resource_providers', 'generation')
self.assertColumnExists(engine, 'resource_providers', 'can_host')
self.assertIndexMembers(engine, 'resource_providers',
'resource_providers_name_idx',
['name'])
self.assertColumnExists(engine, 'resource_provider_aggregates',
'resource_provider_id')
self.assertColumnExists(engine, 'resource_provider_aggregates',
'aggregate_id')
self.assertIndexMembers(engine, 'resource_provider_aggregates',
'resource_provider_aggregates_aggregate_id_idx',
['aggregate_id'])
self.assertIndexMembers(engine, 'resource_provider_aggregates',
'resource_provider_aggregates_aggregate_id_idx',
['aggregate_id'])
self.assertIndexMembers(engine, 'inventories',
'inventories_resource_provider_resource_class_idx',
['resource_provider_id', 'resource_class_id'])
def _check_319(self, engine, data):
self.assertIndexMembers(engine, 'instances',
'instances_deleted_created_at_idx',
['deleted', 'created_at'])
def _check_330(self, engine, data):
# Just a sanity-check migration
pass
def _check_331(self, engine, data):
self.assertColumnExists(engine, 'virtual_interfaces', 'tag')
self.assertColumnExists(engine, 'block_device_mapping', 'tag')
def _check_332(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'keypairs')
def _check_333(self, engine, data):
self.assertColumnExists(engine, 'console_auth_tokens', 'id')
self.assertColumnExists(engine, 'console_auth_tokens', 'token_hash')
self.assertColumnExists(engine, 'console_auth_tokens', 'console_type')
self.assertColumnExists(engine, 'console_auth_tokens', 'host')
self.assertColumnExists(engine, 'console_auth_tokens', 'port')
self.assertColumnExists(engine, 'console_auth_tokens',
'internal_access_path')
self.assertColumnExists(engine, 'console_auth_tokens',
'instance_uuid')
self.assertColumnExists(engine, 'console_auth_tokens', 'expires')
self.assertIndexMembers(engine, 'console_auth_tokens',
'console_auth_tokens_instance_uuid_idx',
['instance_uuid'])
self.assertIndexMembers(engine, 'console_auth_tokens',
'console_auth_tokens_host_expires_idx',
['host', 'expires'])
self.assertIndexMembers(engine, 'console_auth_tokens',
'console_auth_tokens_token_hash_idx',
['token_hash'])
def _check_334(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'device_metadata')
self.assertColumnExists(engine, 'shadow_instance_extra',
'device_metadata')
def _check_345(self, engine, data):
# NOTE(danms): Just a sanity-check migration
pass
def _check_346(self, engine, data):
self.assertColumnNotExists(engine, 'instances', 'scheduled_at')
self.assertColumnNotExists(engine, 'shadow_instances', 'scheduled_at')
def _check_347(self, engine, data):
self.assertIndexMembers(engine, 'instances',
'instances_project_id_idx',
['project_id'])
self.assertIndexMembers(engine, 'instances',
'instances_updated_at_project_id_idx',
['updated_at', 'project_id'])
def _check_358(self, engine, data):
self.assertColumnExists(engine, 'block_device_mapping',
'attachment_id')
def _check_359(self, engine, data):
self.assertColumnExists(engine, 'services', 'uuid')
self.assertIndexMembers(engine, 'services', 'services_uuid_idx',
['uuid'])
def _check_360(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'mapped')
self.assertColumnExists(engine, 'shadow_compute_nodes', 'mapped')
def _check_361(self, engine, data):
self.assertIndexMembers(engine, 'compute_nodes',
'compute_nodes_uuid_idx', ['uuid'])
class TestNovaMigrationsSQLite(NovaMigrationsCheckers,
test_base.DbTestCase,
test.NoDBTestCase):
pass
class TestNovaMigrationsMySQL(NovaMigrationsCheckers,
test_base.MySQLOpportunisticTestCase,
test.NoDBTestCase):
def test_innodb_tables(self):
with mock.patch.object(sa_migration, 'get_engine',
return_value=self.migrate_engine):
sa_migration.db_sync()
total = self.migrate_engine.execute(
"SELECT count(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA = '%(database)s'" %
{'database': self.migrate_engine.url.database})
self.assertGreater(total.scalar(), 0, "No tables found. Wrong schema?")
noninnodb = self.migrate_engine.execute(
"SELECT count(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA='%(database)s' "
"AND ENGINE != 'InnoDB' "
"AND TABLE_NAME != 'migrate_version'" %
{'database': self.migrate_engine.url.database})
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
class TestNovaMigrationsPostgreSQL(NovaMigrationsCheckers,
test_base.PostgreSQLOpportunisticTestCase,
test.NoDBTestCase):
pass
class ProjectTestCase(test.NoDBTestCase):
def test_no_migrations_have_downgrade(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../')
# Walk both the nova_api and nova (cell) database migrations.
includes_downgrade = []
for subdir in ('api_migrations', ''):
py_glob = os.path.join(topdir, "db", "sqlalchemy", subdir,
"migrate_repo", "versions", "*.py")
for path in glob.iglob(py_glob):
has_upgrade = False
has_downgrade = False
with open(path, "r") as f:
for line in f:
if 'def upgrade(' in line:
has_upgrade = True
if 'def downgrade(' in line:
has_downgrade = True
if has_upgrade and has_downgrade:
fname = os.path.basename(path)
includes_downgrade.append(fname)
helpful_msg = ("The following migrations have a downgrade "
"which is not supported:"
"\n\t%s" % '\n\t'.join(sorted(includes_downgrade)))
self.assertFalse(includes_downgrade, helpful_msg)
|
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import ElectraConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
)
from transformers.models.electra.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST
class ElectraModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
fake_token_labels = ids_tensor([self.batch_size, self.seq_length], 1)
config = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
)
def get_config(self):
return ElectraConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
_,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_electra_model(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
):
model = ElectraModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_electra_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = ElectraModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_electra_for_masked_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
):
model = ElectraForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_electra_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = ElectraForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_electra_for_token_classification(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
):
config.num_labels = self.num_labels
model = ElectraForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_electra_for_pretraining(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
):
config.num_labels = self.num_labels
model = ElectraForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=fake_token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
def create_and_check_electra_for_sequence_classification(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
):
config.num_labels = self.num_labels
model = ElectraForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_electra_for_question_answering(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
):
model = ElectraForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_electra_for_multiple_choice(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
):
config.num_choices = self.num_choices
model = ElectraForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class ElectraModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
ElectraModel,
ElectraForPreTraining,
ElectraForMaskedLM,
ElectraForCausalLM,
ElectraForMultipleChoice,
ElectraForTokenClassification,
ElectraForSequenceClassification,
ElectraForQuestionAnswering,
)
if is_torch_available()
else ()
)
fx_compatible = True
# special case for ForPreTraining model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = ElectraModelTester(self)
self.config_tester = ConfigTester(self, config_class=ElectraConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_electra_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_model(*config_and_inputs)
def test_electra_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_electra_model_as_decoder(*config_and_inputs)
def test_electra_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_electra_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_masked_lm(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_token_classification(*config_and_inputs)
def test_for_pre_training(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_pretraining(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_sequence_classification(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_question_answering(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_multiple_choice(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ElectraModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_electra_for_causal_lm(*config_and_inputs)
@require_torch
class ElectraModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_no_head_absolute_embedding(self):
model = ElectraModel.from_pretrained("google/electra-small-discriminator")
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
output = model(input_ids, attention_mask=attention_mask)[0]
expected_shape = torch.Size((1, 11, 256))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[0.4471, 0.6821, -0.3265], [0.4627, 0.5255, -0.3668], [0.4532, 0.3313, -0.4344]]]
)
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
|
|
# -*- coding: utf-8 -*-
import markupsafe
from modularodm import fields
from framework.auth.decorators import Auth
from framework.exceptions import HTTPError
from website.addons.base import exceptions
from website.addons.base import AddonOAuthUserSettingsBase, AddonOAuthNodeSettingsBase
from website.addons.base import StorageAddonBase
from website.oauth.models import ExternalProvider
from addons.figshare import messages
from addons.figshare.client import FigshareClient
from addons.figshare import settings
from addons.figshare.serializer import FigshareSerializer
class Figshare(ExternalProvider):
name = 'figshare'
short_name = 'figshare'
client_id = settings.CLIENT_ID
client_secret = settings.CLIENT_SECRET
auth_url_base = settings.FIGSHARE_OAUTH_AUTH_ENDPOINT
callback_url = settings.FIGSHARE_OAUTH_TOKEN_ENDPOINT
auto_refresh_url = callback_url
# refresh_time = settings.REFRESH_TIME # TODO: maybe
# expiry_time = settings.EXPIRY_TIME
default_scopes = ['all']
def handle_callback(self, response):
"""View called when the Oauth flow is completed. Adds a new BoxUserSettings
record to the user and saves the user's access token and account info.
"""
client = FigshareClient(response['access_token'])
about = client.userinfo()
return {
'provider_id': about['id'],
'display_name': '{} {}'.format(about['first_name'], about.get('last_name')),
}
class FigshareUserSettings(AddonOAuthUserSettingsBase):
oauth_provider = Figshare
serializer = FigshareSerializer
class FigshareNodeSettings(StorageAddonBase, AddonOAuthNodeSettingsBase):
oauth_provider = Figshare
serializer = FigshareSerializer
folder_id = fields.StringField()
folder_name = fields.StringField()
folder_path = fields.StringField() # hijacked; figshare_type
_api = None
@property
def api(self):
"""authenticated ExternalProvider instance"""
if self._api is None:
self._api = Figshare(self.external_account)
return self._api
def fetch_folder_name(self):
return u'{0}:{1}'.format(self.folder_name or 'Unnamed {0}'.format(self.folder_path or ''), self.folder_id)
def fetch_full_folder_path(self):
return self.folder_name
def get_folders(self, **kwargs):
return FigshareClient(self.external_account.oauth_key).get_folders()
def archive_errors(self):
items = []
if self.folder_path in ('article', 'fileset'):
article = FigshareClient(self.external_account.oauth_key).article(self.folder_id)
items = [article]
else:
project = FigshareClient(self.external_account.oauth_key).project(self.folder_id)
items = project['articles'] if project else []
private = any(
[item for item in items if item['status'].lower() != 'public']
)
if private:
return 'The figshare {folder_path} <strong>{folder_name}</strong> contains private content that we cannot copy to the registration. If this content is made public on figshare we should then be able to copy those files. You can view those files <a href="{url}" target="_blank">here.</a>'.format(
folder_path=markupsafe.escape(self.folder_path),
folder_name=markupsafe.escape(self.folder_name),
url=self.owner.web_url_for('collect_file_trees'))
def clear_settings(self):
self.folder_id = None
self.folder_name = None
self.folder_path = None
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
self.clear_settings()
if add_log:
self.nodelogger.log(action='node_deauthorized', save=True)
self.clear_auth()
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Addon is not authorized')
try:
# Figshare(self.external_account).refresh_oauth_key() # TODO: Maybe
return {'token': self.external_account.oauth_key}
except Exception as error: # TODO: specific exception
raise HTTPError(error.status_code, data={'message_long': error.message})
def serialize_waterbutler_settings(self):
if not self.folder_path or not self.folder_id:
raise exceptions.AddonError('Folder is not configured')
return {
'container_type': self.folder_path,
'container_id': str(self.folder_id),
}
def create_waterbutler_log(self, auth, action, metadata):
url = self.owner.web_url_for('addon_view_or_download_file', path=metadata['path'], provider='figshare')
self.owner.add_log(
'figshare_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': metadata['materialized'],
'filename': metadata['materialized'].strip('/'),
'urls': {
'view': url,
'download': url + '?action=download'
},
},
)
def set_folder(self, folder_id, auth):
try:
info = FigshareClient(self.external_account.oauth_key).get_linked_folder_info(folder_id)
except HTTPError as e:
raise exceptions.InvalidFolderError(e.message)
self.folder_id = info['id']
self.folder_name = info['name']
self.folder_path = info['path']
self.save()
self.nodelogger.log(action='folder_selected', save=True)
#############
# Callbacks #
#############
def after_delete(self, node=None, user=None):
self.deauthorize(Auth(user=user), add_log=True)
self.save()
def on_delete(self):
self.deauthorize(add_log=False)
self.save()
def before_page_load(self, node, user):
"""
:param Node node:
:param User user:
:return str: Alert message
"""
if not self.folder_id:
return []
figshare = node.get_addon('figshare')
# Quit if no user authorization
node_permissions = 'public' if node.is_public else 'private'
if figshare.folder_path == 'project':
if node_permissions == 'private':
message = messages.BEFORE_PAGE_LOAD_PRIVATE_NODE_MIXED_FS.format(category=node.project_or_component, project_id=figshare.folder_id)
return [message]
else:
message = messages.BEFORE_PAGE_LOAD_PUBLIC_NODE_MIXED_FS.format(category=node.project_or_component, project_id=figshare.folder_id)
connect = FigshareClient(self.external_account.oauth_key)
project_is_public = connect.container_is_public(self.folder_id, self.folder_path)
article_permissions = 'public' if project_is_public else 'private'
if article_permissions != node_permissions:
message = messages.BEFORE_PAGE_LOAD_PERM_MISMATCH.format(
category=node.project_or_component,
node_perm=node_permissions,
figshare_perm=article_permissions,
figshare_id=self.folder_id,
folder_type=self.folder_path,
)
if article_permissions == 'private' and node_permissions == 'public':
message += messages.BEFORE_PAGE_LOAD_PUBLIC_NODE_PRIVATE_FS.format(folder_type=self.folder_path)
# No HTML snippets, so escape message all at once
return [markupsafe.escape(message)]
|
|
"""
A widget containing a grid of clickable actions/buttons.
"""
from collections import namedtuple, deque
from PyQt4.QtGui import (
QFrame, QAction, QToolButton, QGridLayout, QFontMetrics,
QSizePolicy, QStyleOptionToolButton, QStylePainter, QStyle
)
from PyQt4.QtCore import Qt, QObject, QSize, QVariant, QEvent, QSignalMapper
from PyQt4.QtCore import pyqtSignal as Signal
from . import utils
_ToolGridSlot = namedtuple(
"_ToolGridSlot",
["button",
"action",
"row",
"column"
]
)
class _ToolGridButton(QToolButton):
def __init__(self, *args, **kwargs):
QToolButton.__init__(self, *args, **kwargs)
self.__text = ""
def actionEvent(self, event):
QToolButton.actionEvent(self, event)
if event.type() == QEvent.ActionChanged or \
event.type() == QEvent.ActionAdded:
self.__textLayout()
def resizeEvent(self, event):
QToolButton.resizeEvent(self, event)
self.__textLayout()
def __textLayout(self):
fm = QFontMetrics(self.font())
text = str(self.defaultAction().text())
words = deque(text.split())
lines = []
curr_line = ""
curr_line_word_count = 0
option = QStyleOptionToolButton()
option.initFrom(self)
margin = self.style().pixelMetric(QStyle.PM_ButtonMargin, option, self)
width = self.width() - 2 * margin
while words:
w = words.popleft()
if curr_line_word_count:
line_extended = " ".join([curr_line, w])
else:
line_extended = w
line_w = fm.boundingRect(line_extended).width()
if line_w >= width:
if curr_line_word_count == 0 or len(lines) == 1:
# A single word that is too long must be elided.
# Also if the text overflows 2 lines
# Warning: hardcoded max lines
curr_line = fm.elidedText(line_extended, Qt.ElideRight,
width)
curr_line = str(curr_line)
else:
# Put the word back
words.appendleft(w)
lines.append(curr_line)
curr_line = ""
curr_line_word_count = 0
if len(lines) == 2:
break
else:
curr_line = line_extended
curr_line_word_count += 1
if curr_line:
lines.append(curr_line)
text = "\n".join(lines)
text = text.replace('&', '&&') # Need escaped ampersand to show
self.__text = text
def paintEvent(self, event):
p = QStylePainter(self)
opt = QStyleOptionToolButton()
self.initStyleOption(opt)
if self.__text:
# Replace the text
opt.text = self.__text
p.drawComplexControl(QStyle.CC_ToolButton, opt)
p.end()
class ToolGrid(QFrame):
"""
A widget containing a grid of actions/buttons.
Actions can be added using standard :func:`QWidget.addAction(QAction)`
and :func:`QWidget.insertAction(int, QAction)` methods.
Parameters
----------
parent : :class:`QWidget`
Parent widget.
columns : int
Number of columns in the grid layout.
buttonSize : :class:`QSize`, optional
Size of tool buttons in the grid.
iconSize : :class:`QSize`, optional
Size of icons in the buttons.
toolButtonStyle : :class:`Qt.ToolButtonStyle`
Tool button style.
"""
actionTriggered = Signal(QAction)
actionHovered = Signal(QAction)
def __init__(self, parent=None, columns=4, buttonSize=None,
iconSize=None, toolButtonStyle=Qt.ToolButtonTextUnderIcon):
QFrame.__init__(self, parent)
if buttonSize is not None:
buttonSize = QSize(buttonSize)
if iconSize is not None:
iconSize = QSize(iconSize)
self.__columns = columns
self.__buttonSize = buttonSize or QSize(50, 50)
self.__iconSize = iconSize or QSize(26, 26)
self.__toolButtonStyle = toolButtonStyle
self.__gridSlots = []
self.__buttonListener = ToolButtonEventListener(self)
self.__buttonListener.buttonRightClicked.connect(
self.__onButtonRightClick)
self.__buttonListener.buttonEnter.connect(
self.__onButtonEnter)
self.__mapper = QSignalMapper()
self.__mapper.mapped[QObject].connect(self.__onClicked)
self.__setupUi()
def __setupUi(self):
layout = QGridLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.setSizeConstraint(QGridLayout.SetFixedSize)
self.setLayout(layout)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.MinimumExpanding)
def setButtonSize(self, size):
"""
Set the button size.
"""
if self.__buttonSize != size:
self.__buttonSize = size
for slot in self.__gridSlots:
slot.button.setFixedSize(size)
def buttonSize(self):
"""
Return the button size.
"""
return QSize(self.__buttonSize)
def setIconSize(self, size):
"""
Set the button icon size.
"""
if self.__iconSize != size:
self.__iconSize = size
for slot in self.__gridSlots:
slot.button.setIconSize(size)
def iconSize(self):
"""
Return the icon size
"""
return QSize(self.__iconSize)
def setToolButtonStyle(self, style):
"""
Set the tool button style.
"""
if self.__toolButtonStyle != style:
self.__toolButtonStyle = style
for slot in self.__gridSlots:
slot.button.setToolButtonStyle(style)
def toolButtonStyle(self):
"""
Return the tool button style.
"""
return self.__toolButtonStyle
def setColumnCount(self, columns):
"""
Set the number of button/action columns.
"""
if self.__columns != columns:
self.__columns = columns
self.__relayout()
def columns(self):
"""
Return the number of columns in the grid.
"""
return self.__columns
def clear(self):
"""
Clear all actions/buttons.
"""
for slot in reversed(list(self.__gridSlots)):
self.removeAction(slot.action)
self.__gridSlots = []
def insertAction(self, before, action):
"""
Insert a new action at the position currently occupied
by `before` (can also be an index).
Parameters
----------
before : :class:`QAction` or int
Position where the `action` should be inserted.
action : :class:`QAction`
Action to insert
"""
if isinstance(before, int):
actions = list(self.actions())
if len(actions) == 0 or before >= len(actions):
# Insert as the first action or the last action.
return self.addAction(action)
before = actions[before]
return QFrame.insertAction(self, before, action)
def setActions(self, actions):
"""
Clear the grid and add `actions`.
"""
self.clear()
for action in actions:
self.addAction(action)
def buttonForAction(self, action):
"""
Return the :class:`QToolButton` instance button for `action`.
"""
actions = [slot.action for slot in self.__gridSlots]
index = actions.index(action)
return self.__gridSlots[index].button
def createButtonForAction(self, action):
"""
Create and return a :class:`QToolButton` for action.
"""
button = _ToolGridButton(self)
button.setDefaultAction(action)
if self.__buttonSize.isValid():
button.setFixedSize(self.__buttonSize)
if self.__iconSize.isValid():
button.setIconSize(self.__iconSize)
button.setToolButtonStyle(self.__toolButtonStyle)
button.setProperty("tool-grid-button", True)
return button
def count(self):
"""
Return the number of buttons/actions in the grid.
"""
return len(self.__gridSlots)
def actionEvent(self, event):
QFrame.actionEvent(self, event)
if event.type() == QEvent.ActionAdded:
# Note: the action is already in the self.actions() list.
actions = list(self.actions())
index = actions.index(event.action())
self.__insertActionButton(index, event.action())
elif event.type() == QEvent.ActionRemoved:
self.__removeActionButton(event.action())
def __insertActionButton(self, index, action):
"""Create a button for the action and add it to the layout
at index.
"""
self.__shiftGrid(index, 1)
button = self.createButtonForAction(action)
row = index / self.__columns
column = index % self.__columns
self.layout().addWidget(
button, row, column,
Qt.AlignLeft | Qt.AlignTop
)
self.__gridSlots.insert(
index, _ToolGridSlot(button, action, row, column)
)
self.__mapper.setMapping(button, action)
button.clicked.connect(self.__mapper.map)
button.installEventFilter(self.__buttonListener)
button.installEventFilter(self)
def __removeActionButton(self, action):
"""Remove the button for the action from the layout and delete it.
"""
actions = [slot.action for slot in self.__gridSlots]
index = actions.index(action)
slot = self.__gridSlots.pop(index)
slot.button.removeEventFilter(self.__buttonListener)
slot.button.removeEventFilter(self)
self.__mapper.removeMappings(slot.button)
self.layout().removeWidget(slot.button)
self.__shiftGrid(index + 1, -1)
slot.button.deleteLater()
def __shiftGrid(self, start, count=1):
"""Shift all buttons starting at index `start` by `count` cells.
"""
button_count = self.layout().count()
direction = 1 if count >= 0 else -1
if direction == 1:
start, end = button_count - 1, start - 1
else:
start, end = start, button_count
for index in range(start, end, -direction):
item = self.layout().itemAtPosition(index / self.__columns,
index % self.__columns)
if item:
button = item.widget()
new_index = index + count
self.layout().addWidget(button, new_index / self.__columns,
new_index % self.__columns,
Qt.AlignLeft | Qt.AlignTop)
def __relayout(self):
"""Relayout the buttons.
"""
for i in reversed(range(self.layout().count())):
self.layout().takeAt(i)
self.__gridSlots = [_ToolGridSlot(slot.button, slot.action,
i / self.__columns,
i % self.__columns)
for i, slot in enumerate(self.__gridSlots)]
for slot in self.__gridSlots:
self.layout().addWidget(slot.button, slot.row, slot.column,
Qt.AlignLeft | Qt.AlignTop)
def __indexOf(self, button):
"""Return the index of button widget.
"""
buttons = [slot.button for slot in self.__gridSlots]
return buttons.index(button)
def __onButtonRightClick(self, button):
pass
def __onButtonEnter(self, button):
action = button.defaultAction()
self.actionHovered.emit(action)
def __onClicked(self, action):
self.actionTriggered.emit(action)
def paintEvent(self, event):
return utils.StyledWidget_paintEvent(self, event)
def eventFilter(self, obj, event):
etype = event.type()
if etype == QEvent.KeyPress and obj.hasFocus():
key = event.key()
if key in [Qt.Key_Up, Qt.Key_Down, Qt.Key_Left, Qt.Key_Right]:
if self.__focusMove(obj, key):
event.accept()
return True
return QFrame.eventFilter(self, obj, event)
def __focusMove(self, focus, key):
assert(focus is self.focusWidget())
try:
index = self.__indexOf(focus)
except IndexError:
return False
if key == Qt.Key_Down:
index += self.__columns
elif key == Qt.Key_Up:
index -= self.__columns
elif key == Qt.Key_Left:
index -= 1
elif key == Qt.Key_Right:
index += 1
if index >= 0 and index < self.count():
button = self.__gridSlots[index].button
button.setFocus(Qt.TabFocusReason)
return True
else:
return False
class ToolButtonEventListener(QObject):
"""
An event listener(filter) for :class:`QToolButtons`.
"""
buttonLeftClicked = Signal(QToolButton)
buttonRightClicked = Signal(QToolButton)
buttonEnter = Signal(QToolButton)
buttonLeave = Signal(QToolButton)
def __init__(self, parent=None):
QObject.__init__(self, parent)
self.button_down = None
self.button = None
self.button_down_pos = None
def eventFilter(self, obj, event):
if not isinstance(obj, QToolButton):
return False
if event.type() == QEvent.MouseButtonPress:
self.button = obj
self.button_down = event.button()
self.button_down_pos = event.pos()
elif event.type() == QEvent.MouseButtonRelease:
if self.button.underMouse():
if event.button() == Qt.RightButton:
self.buttonRightClicked.emit(self.button)
elif event.button() == Qt.LeftButton:
self.buttonLeftClicked.emit(self.button)
elif event.type() == QEvent.Enter:
self.buttonEnter.emit(obj)
elif event.type() == QEvent.Leave:
self.buttonLeave.emit(obj)
return False
|
|
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import Prefetch, QuerySet
from django.db.models.query import get_prefetcher, prefetch_related_objects
from django.test import TestCase, override_settings
from django.test.utils import CaptureQueriesContext
from .models import (
Author, Author2, AuthorAddress, AuthorWithAge, Bio, Book, Bookmark,
BookReview, BookWithYear, Comment, Department, Employee, FavoriteAuthors,
House, LessonEntry, ModelIterableSubclass, Person, Qualification, Reader,
Room, TaggedItem, Teacher, WordEntry,
)
class PrefetchRelatedTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Poems')
cls.book2 = Book.objects.create(title='Jane Eyre')
cls.book3 = Book.objects.create(title='Wuthering Heights')
cls.book4 = Book.objects.create(title='Sense and Sensibility')
cls.author1 = Author.objects.create(name='Charlotte', first_book=cls.book1)
cls.author2 = Author.objects.create(name='Anne', first_book=cls.book1)
cls.author3 = Author.objects.create(name='Emily', first_book=cls.book1)
cls.author4 = Author.objects.create(name='Jane', first_book=cls.book4)
cls.book1.authors.add(cls.author1, cls.author2, cls.author3)
cls.book2.authors.add(cls.author1)
cls.book3.authors.add(cls.author3)
cls.book4.authors.add(cls.author4)
cls.reader1 = Reader.objects.create(name='Amy')
cls.reader2 = Reader.objects.create(name='Belinda')
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
def assertWhereContains(self, sql, needle):
where_idx = sql.index('WHERE')
self.assertEqual(
sql.count(str(needle), where_idx), 1,
msg="WHERE clause doesn't contain %s, actual SQL: %s" % (needle, sql[where_idx:])
)
def test_m2m_forward(self):
with self.assertNumQueries(2):
lists = [list(b.authors.all()) for b in Book.objects.prefetch_related('authors')]
normal_lists = [list(b.authors.all()) for b in Book.objects.all()]
self.assertEqual(lists, normal_lists)
def test_m2m_reverse(self):
with self.assertNumQueries(2):
lists = [list(a.books.all()) for a in Author.objects.prefetch_related('books')]
normal_lists = [list(a.books.all()) for a in Author.objects.all()]
self.assertEqual(lists, normal_lists)
def test_foreignkey_forward(self):
with self.assertNumQueries(2):
books = [a.first_book for a in Author.objects.prefetch_related('first_book')]
normal_books = [a.first_book for a in Author.objects.all()]
self.assertEqual(books, normal_books)
def test_foreignkey_reverse(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors')]
self.assertQuerysetEqual(self.book2.authors.all(), ["<Author: Charlotte>"])
def test_onetoone_reverse_no_match(self):
# Regression for #17439
with self.assertNumQueries(2):
book = Book.objects.prefetch_related('bookwithyear').all()[0]
with self.assertNumQueries(0):
with self.assertRaises(BookWithYear.DoesNotExist):
book.bookwithyear
def test_onetoone_reverse_with_to_field_pk(self):
"""
A model (Bio) with a OneToOneField primary key (author) that references
a non-pk field (name) on the related model (Author) is prefetchable.
"""
Bio.objects.bulk_create([
Bio(author=self.author1),
Bio(author=self.author2),
Bio(author=self.author3),
])
authors = Author.objects.filter(
name__in=[self.author1, self.author2, self.author3],
).prefetch_related('bio')
with self.assertNumQueries(2):
for author in authors:
self.assertEqual(author.name, author.bio.author.name)
def test_survives_clone(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors').exclude(id=1000)]
def test_len(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
len(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_bool(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
bool(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_count(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.count() for b in qs]
def test_exists(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.exists() for b in qs]
def test_in_and_prefetch_related(self):
"""
Regression test for #20242 - QuerySet "in" didn't work the first time
when using prefetch_related. This was fixed by the removal of chunked
reads from QuerySet iteration in
70679243d1786e03557c28929f9762a119e3ac14.
"""
qs = Book.objects.prefetch_related('first_time_authors')
self.assertIn(qs[0], qs)
def test_clear(self):
with self.assertNumQueries(5):
with_prefetch = Author.objects.prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
def test_m2m_then_m2m(self):
"""A m2m can be followed through another m2m."""
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_overriding_prefetch(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books', 'books__read_by')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by', 'books')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_get(self):
"""
Objects retrieved with .get() get the prefetch behavior.
"""
# Need a double
with self.assertNumQueries(3):
author = Author.objects.prefetch_related('books__read_by').get(name="Charlotte")
lists = [[str(r) for r in b.read_by.all()] for b in author.books.all()]
self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre
def test_foreign_key_then_m2m(self):
"""
A m2m relation can be followed after a relation like ForeignKey that
doesn't have many objects.
"""
with self.assertNumQueries(2):
qs = Author.objects.select_related('first_book').prefetch_related('first_book__read_by')
lists = [[str(r) for r in a.first_book.read_by.all()]
for a in qs]
self.assertEqual(lists, [["Amy"], ["Amy"], ["Amy"], ["Amy", "Belinda"]])
def test_reverse_one_to_one_then_m2m(self):
"""
A m2m relation can be followed afterr going through the select_related
reverse of an o2o.
"""
qs = Author.objects.prefetch_related('bio__books').select_related('bio')
with self.assertNumQueries(1):
list(qs.all())
Bio.objects.create(author=self.author1)
with self.assertNumQueries(2):
list(qs.all())
def test_attribute_error(self):
qs = Reader.objects.all().prefetch_related('books_read__xyz')
msg = (
"Cannot find 'xyz' on Book object, 'books_read__xyz' "
"is an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
def test_invalid_final_lookup(self):
qs = Book.objects.prefetch_related('authors__name')
msg = (
"'authors__name' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to prefetch_related()."
)
with self.assertRaisesMessage(ValueError, msg) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
self.assertIn("name", str(cm.exception))
def test_forward_m2m_to_attr_conflict(self):
msg = 'to_attr=authors conflicts with a field on the Book model.'
authors = Author.objects.all()
with self.assertRaisesMessage(ValueError, msg):
list(Book.objects.prefetch_related(
Prefetch('authors', queryset=authors, to_attr='authors'),
))
# Without the ValueError, an author was deleted due to the implicit
# save of the relation assignment.
self.assertEqual(self.book1.authors.count(), 3)
def test_reverse_m2m_to_attr_conflict(self):
msg = 'to_attr=books conflicts with a field on the Author model.'
poems = Book.objects.filter(title='Poems')
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.prefetch_related(
Prefetch('books', queryset=poems, to_attr='books'),
))
# Without the ValueError, a book was deleted due to the implicit
# save of reverse relation assignment.
self.assertEqual(self.author1.books.count(), 2)
def test_m2m_then_reverse_fk_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__addresses'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_m2m_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__favorite_authors'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_reverse_one_to_one_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__authorwithage'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.id)
class CustomPrefetchTests(TestCase):
@classmethod
def traverse_qs(cls, obj_iter, path):
"""
Helper method that returns a list containing a list of the objects in the
obj_iter. Then for each object in the obj_iter, the path will be
recursively travelled and the found objects are added to the return value.
"""
ret_val = []
if hasattr(obj_iter, 'all'):
obj_iter = obj_iter.all()
try:
iter(obj_iter)
except TypeError:
obj_iter = [obj_iter]
for obj in obj_iter:
rel_objs = []
for part in path:
if not part:
continue
try:
related = getattr(obj, part[0])
except ObjectDoesNotExist:
continue
if related is not None:
rel_objs.extend(cls.traverse_qs(related, [part[1:]]))
ret_val.append((obj, rel_objs))
return ret_val
@classmethod
def setUpTestData(cls):
cls.person1 = Person.objects.create(name='Joe')
cls.person2 = Person.objects.create(name='Mary')
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
cls.house1 = House.objects.create(name='House 1', address='123 Main St', owner=cls.person1)
cls.room1_1 = Room.objects.create(name='Dining room', house=cls.house1)
cls.room1_2 = Room.objects.create(name='Lounge', house=cls.house1)
cls.room1_3 = Room.objects.create(name='Kitchen', house=cls.house1)
cls.house1.main_room = cls.room1_1
cls.house1.save()
cls.person1.houses.add(cls.house1)
cls.house2 = House.objects.create(name='House 2', address='45 Side St', owner=cls.person1)
cls.room2_1 = Room.objects.create(name='Dining room', house=cls.house2)
cls.room2_2 = Room.objects.create(name='Lounge', house=cls.house2)
cls.room2_3 = Room.objects.create(name='Kitchen', house=cls.house2)
cls.house2.main_room = cls.room2_1
cls.house2.save()
cls.person1.houses.add(cls.house2)
cls.house3 = House.objects.create(name='House 3', address='6 Downing St', owner=cls.person2)
cls.room3_1 = Room.objects.create(name='Dining room', house=cls.house3)
cls.room3_2 = Room.objects.create(name='Lounge', house=cls.house3)
cls.room3_3 = Room.objects.create(name='Kitchen', house=cls.house3)
cls.house3.main_room = cls.room3_1
cls.house3.save()
cls.person2.houses.add(cls.house3)
cls.house4 = House.objects.create(name='house 4', address="7 Regents St", owner=cls.person2)
cls.room4_1 = Room.objects.create(name='Dining room', house=cls.house4)
cls.room4_2 = Room.objects.create(name='Lounge', house=cls.house4)
cls.room4_3 = Room.objects.create(name='Kitchen', house=cls.house4)
cls.house4.main_room = cls.room4_1
cls.house4.save()
cls.person2.houses.add(cls.house4)
def test_traverse_qs(self):
qs = Person.objects.prefetch_related('houses')
related_objs_normal = [list(p.houses.all()) for p in qs],
related_objs_from_traverse = [[inner[0] for inner in o[1]]
for o in self.traverse_qs(qs, [['houses']])]
self.assertEqual(related_objs_normal, (related_objs_from_traverse,))
def test_ambiguous(self):
# Ambiguous: Lookup was already seen with a different queryset.
msg = (
"'houses' lookup was already seen with a different queryset. You "
"may need to adjust the ordering of your lookups."
)
with self.assertRaisesMessage(ValueError, msg):
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', Prefetch('houses', queryset=House.objects.all())),
[['houses', 'rooms']]
)
# Ambiguous: Lookup houses_lst doesn't yet exist when performing houses_lst__rooms.
msg = (
"Cannot find 'houses_lst' on Person object, 'houses_lst__rooms' is "
"an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg):
self.traverse_qs(
Person.objects.prefetch_related(
'houses_lst__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
# Not ambiguous.
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', 'houses'),
[['houses', 'rooms']]
)
self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
def test_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses'),
[['houses']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses')),
[['houses']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst')),
[['houses_lst']]
)
self.assertEqual(lst1, lst2)
def test_reverse_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
House.objects.prefetch_related('occupants'),
[['occupants']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants')),
[['occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants', to_attr='occupants_lst')),
[['occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_fk(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Room.objects.prefetch_related('house__occupants'),
[['house', 'occupants']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants')),
[['house', 'occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants', to_attr='occupants_lst')),
[['house', 'occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_gfk(self):
TaggedItem.objects.create(tag="houses", content_object=self.house1)
TaggedItem.objects.create(tag="houses", content_object=self.house2)
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
TaggedItem.objects.filter(tag='houses').prefetch_related('content_object__rooms'),
[['content_object', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
TaggedItem.objects.prefetch_related(
Prefetch('content_object'),
Prefetch('content_object__rooms', to_attr='rooms_lst')
),
[['content_object', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_o2m_through_m2m(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses', 'houses__rooms'),
[['houses', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), 'houses__rooms'),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), Prefetch('houses__rooms')),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst'), 'houses_lst__rooms'),
[['houses_lst', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch('houses', to_attr='houses_lst'),
Prefetch('houses_lst__rooms', to_attr='rooms_lst')
),
[['houses_lst', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_generic_rel(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, favorite=bookmark, tag='python')
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Bookmark.objects.prefetch_related('tags', 'tags__content_object', 'favorite_tags'),
[['tags', 'content_object'], ['favorite_tags']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Bookmark.objects.prefetch_related(
Prefetch('tags', to_attr='tags_lst'),
Prefetch('tags_lst__content_object'),
Prefetch('favorite_tags'),
),
[['tags_lst', 'content_object'], ['favorite_tags']]
)
self.assertEqual(lst1, lst2)
def test_traverse_single_item_property(self):
# Control lookups.
with self.assertNumQueries(5):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
'primary_house__occupants__houses',
),
[['primary_house', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(5):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('primary_house__occupants', to_attr='occupants_lst'),
'primary_house__occupants_lst__houses',
),
[['primary_house', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_traverse_multiple_items_property(self):
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
'all_houses__occupants__houses',
),
[['all_houses', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
Prefetch('all_houses__occupants', to_attr='occupants_lst'),
'all_houses__occupants_lst__houses',
),
[['all_houses', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_custom_qs(self):
# Test basic.
with self.assertNumQueries(2):
lst1 = list(Person.objects.prefetch_related('houses'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses']]),
self.traverse_qs(lst2, [['houses_lst']])
)
# Test queryset filtering.
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
'houses',
queryset=House.objects.filter(pk__in=[self.house1.pk, self.house3.pk]),
to_attr='houses_lst',
)
)
)
self.assertEqual(len(lst2[0].houses_lst), 1)
self.assertEqual(lst2[0].houses_lst[0], self.house1)
self.assertEqual(len(lst2[1].houses_lst), 1)
self.assertEqual(lst2[1].houses_lst[0], self.house3)
# Test flattened.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__rooms'))
with self.assertNumQueries(3):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses__rooms', queryset=Room.objects.all(), to_attr='rooms_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'rooms']]),
self.traverse_qs(lst2, [['houses', 'rooms_lst']])
)
# Test inner select_related.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__owner'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.select_related('owner'))))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'owner']]),
self.traverse_qs(lst2, [['houses', 'owner']])
)
# Test inner prefetch.
inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk])
houses_qs_prf = House.objects.prefetch_related(
Prefetch('rooms', queryset=inner_rooms_qs, to_attr='rooms_lst'))
with self.assertNumQueries(4):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=houses_qs_prf.filter(pk=self.house1.pk), to_attr='houses_lst'),
Prefetch('houses_lst__rooms_lst__main_room_of')
))
self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0].main_room_of, self.house1)
self.assertEqual(len(lst2[1].houses_lst), 0)
# Test ForwardManyToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('house')
lst1 = self.traverse_qs(rooms, [['house', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['house', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
houses = House.objects.select_related('owner')
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all(), to_attr='house_attr'))
lst2 = self.traverse_qs(rooms, [['house_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'house')
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'), to_attr='house_attr')
).first()
self.assertIsNone(room.house_attr)
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=House.objects.only('name')))
with self.assertNumQueries(2):
getattr(rooms.first().house, 'name')
with self.assertNumQueries(3):
getattr(rooms.first().house, 'address')
# Test ReverseOneToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('main_room_of')
lst1 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('main_room_of', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
rooms = list(
Room.objects.all().prefetch_related(
Prefetch('main_room_of', queryset=houses.all(), to_attr='main_room_of_attr')
)
)
lst2 = self.traverse_qs(rooms, [['main_room_of_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'main_room_of')
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'), to_attr='main_room_of_attr')
).first()
self.assertIsNone(room.main_room_of_attr)
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
person = Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.filter(name='House 1')),
).get(pk=self.person1.pk)
self.assertEqual(
list(person.houses.all()),
list(person.houses.all().all()),
)
def test_nested_prefetch_related_are_not_overwritten(self):
# Regression test for #24873
houses_2 = House.objects.prefetch_related(Prefetch('rooms'))
persons = Person.objects.prefetch_related(Prefetch('houses', queryset=houses_2))
houses = House.objects.prefetch_related(Prefetch('occupants', queryset=persons))
list(houses) # queryset must be evaluated once to reproduce the bug.
self.assertEqual(
houses.all()[0].occupants.all()[0].houses.all()[1].rooms.all()[0],
self.room2_1
)
def test_values_queryset(self):
with self.assertRaisesMessage(ValueError, 'Prefetch querysets cannot use values().'):
Prefetch('houses', House.objects.values('pk'))
# That error doesn't affect managers with custom ModelIterable subclasses
self.assertIs(Teacher.objects_custom.all()._iterable_class, ModelIterableSubclass)
Prefetch('teachers', Teacher.objects_custom.all())
def test_to_attr_doesnt_cache_through_attr_as_list(self):
house = House.objects.prefetch_related(
Prefetch('rooms', queryset=Room.objects.all(), to_attr='to_rooms'),
).get(pk=self.house3.pk)
self.assertIsInstance(house.rooms.all(), QuerySet)
def test_to_attr_cached_property(self):
persons = Person.objects.prefetch_related(
Prefetch('houses', House.objects.all(), to_attr='cached_all_houses'),
)
for person in persons:
# To bypass caching at the related descriptor level, don't use
# person.houses.all() here.
all_houses = list(House.objects.filter(occupants=person))
with self.assertNumQueries(0):
self.assertEqual(person.cached_all_houses, all_houses)
class DefaultManagerTests(TestCase):
def setUp(self):
self.qual1 = Qualification.objects.create(name="BA")
self.qual2 = Qualification.objects.create(name="BSci")
self.qual3 = Qualification.objects.create(name="MA")
self.qual4 = Qualification.objects.create(name="PhD")
self.teacher1 = Teacher.objects.create(name="Mr Cleese")
self.teacher2 = Teacher.objects.create(name="Mr Idle")
self.teacher3 = Teacher.objects.create(name="Mr Chapman")
self.teacher1.qualifications.add(self.qual1, self.qual2, self.qual3, self.qual4)
self.teacher2.qualifications.add(self.qual1)
self.teacher3.qualifications.add(self.qual2)
self.dept1 = Department.objects.create(name="English")
self.dept2 = Department.objects.create(name="Physics")
self.dept1.teachers.add(self.teacher1, self.teacher2)
self.dept2.teachers.add(self.teacher1, self.teacher3)
def test_m2m_then_m2m(self):
with self.assertNumQueries(3):
# When we prefetch the teachers, and force the query, we don't want
# the default manager on teachers to immediately get all the related
# qualifications, since this will do one query per teacher.
qs = Department.objects.prefetch_related('teachers')
depts = "".join("%s department: %s\n" %
(dept.name, ", ".join(str(t) for t in dept.teachers.all()))
for dept in qs)
self.assertEqual(depts,
"English department: Mr Cleese (BA, BSci, MA, PhD), Mr Idle (BA)\n"
"Physics department: Mr Cleese (BA, BSci, MA, PhD), Mr Chapman (BSci)\n")
class GenericRelationTests(TestCase):
@classmethod
def setUpTestData(cls):
book1 = Book.objects.create(title="Winnie the Pooh")
book2 = Book.objects.create(title="Do you like green eggs and spam?")
book3 = Book.objects.create(title="Three Men In A Boat")
reader1 = Reader.objects.create(name="me")
reader2 = Reader.objects.create(name="you")
reader3 = Reader.objects.create(name="someone")
book1.read_by.add(reader1, reader2)
book2.read_by.add(reader2)
book3.read_by.add(reader3)
cls.book1, cls.book2, cls.book3 = book1, book2, book3
cls.reader1, cls.reader2, cls.reader3 = reader1, reader2, reader3
def test_prefetch_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="great", content_object=self.reader1)
TaggedItem.objects.create(tag="outstanding", content_object=self.book2)
TaggedItem.objects.create(tag="amazing", content_object=self.reader3)
# 1 for TaggedItem table, 1 for Book table, 1 for Reader table
with self.assertNumQueries(3):
qs = TaggedItem.objects.prefetch_related('content_object')
list(qs)
def test_prefetch_GFK_nonint_pk(self):
Comment.objects.create(comment="awesome", content_object=self.book1)
# 1 for Comment table, 1 for Book table
with self.assertNumQueries(2):
qs = Comment.objects.prefetch_related('content_object')
[c.content_object for c in qs]
def test_traverse_GFK(self):
"""
A 'content_object' can be traversed with prefetch_related() and
get to related objects on the other side (assuming it is suitably
filtered)
"""
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
TaggedItem.objects.create(tag="awesome", content_object=self.book3)
TaggedItem.objects.create(tag="awesome", content_object=self.reader1)
TaggedItem.objects.create(tag="awesome", content_object=self.reader2)
ct = ContentType.objects.get_for_model(Book)
# We get 3 queries - 1 for main query, 1 for content_objects since they
# all use the same table, and 1 for the 'read_by' relation.
with self.assertNumQueries(3):
# If we limit to books, we know that they will have 'read_by'
# attributes, so the following makes sense:
qs = TaggedItem.objects.filter(content_type=ct, tag='awesome').prefetch_related('content_object__read_by')
readers_of_awesome_books = {r.name for tag in qs
for r in tag.content_object.read_by.all()}
self.assertEqual(readers_of_awesome_books, {"me", "you", "someone"})
def test_nullable_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1,
created_by=self.reader1)
TaggedItem.objects.create(tag="great", content_object=self.book2)
TaggedItem.objects.create(tag="rubbish", content_object=self.book3)
with self.assertNumQueries(2):
result = [t.created_by for t in TaggedItem.objects.prefetch_related('created_by')]
self.assertEqual(result,
[t.created_by for t in TaggedItem.objects.all()])
def test_generic_relation(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
tags = [t.tag for b in Bookmark.objects.prefetch_related('tags')
for t in b.tags.all()]
self.assertEqual(sorted(tags), ["django", "python"])
def test_charfield_GFK(self):
b = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=b, tag='django')
TaggedItem.objects.create(content_object=b, favorite=b, tag='python')
with self.assertNumQueries(3):
bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related('tags', 'favorite_tags')[0]
self.assertEqual(sorted(i.tag for i in bookmark.tags.all()), ["django", "python"])
self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"])
def test_custom_queryset(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
django_tag = TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
bookmark = Bookmark.objects.prefetch_related(
Prefetch('tags', TaggedItem.objects.filter(tag='django')),
).get()
with self.assertNumQueries(0):
self.assertEqual(list(bookmark.tags.all()), [django_tag])
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
self.assertEqual(list(bookmark.tags.all()), list(bookmark.tags.all().all()))
class MultiTableInheritanceTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = BookWithYear.objects.create(title='Poems', published_year=2010)
cls.book2 = BookWithYear.objects.create(title='More poems', published_year=2011)
cls.author1 = AuthorWithAge.objects.create(name='Jane', first_book=cls.book1, age=50)
cls.author2 = AuthorWithAge.objects.create(name='Tom', first_book=cls.book1, age=49)
cls.author3 = AuthorWithAge.objects.create(name='Robert', first_book=cls.book2, age=48)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
cls.book2.aged_authors.add(cls.author2, cls.author3)
cls.br1 = BookReview.objects.create(book=cls.book1, notes='review book1')
cls.br2 = BookReview.objects.create(book=cls.book2, notes='review book2')
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = AuthorWithAge.objects.prefetch_related('addresses')
addresses = [[str(address) for address in obj.addresses.all()] for obj in qs]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_foreignkey_to_inherited(self):
with self.assertNumQueries(2):
qs = BookReview.objects.prefetch_related('book')
titles = [obj.book.title for obj in qs]
self.assertEqual(titles, ["Poems", "More poems"])
def test_m2m_to_inheriting_model(self):
qs = AuthorWithAge.objects.prefetch_related('books_with_year')
with self.assertNumQueries(2):
lst = [[str(book) for book in author.books_with_year.all()] for author in qs]
qs = AuthorWithAge.objects.all()
lst2 = [[str(book) for book in author.books_with_year.all()] for author in qs]
self.assertEqual(lst, lst2)
qs = BookWithYear.objects.prefetch_related('aged_authors')
with self.assertNumQueries(2):
lst = [[str(author) for author in book.aged_authors.all()] for book in qs]
qs = BookWithYear.objects.all()
lst2 = [[str(author) for author in book.aged_authors.all()] for book in qs]
self.assertEqual(lst, lst2)
def test_parent_link_prefetch(self):
with self.assertNumQueries(2):
[a.author for a in AuthorWithAge.objects.prefetch_related('author')]
@override_settings(DEBUG=True)
def test_child_link_prefetch(self):
with self.assertNumQueries(2):
authors = [a.authorwithage for a in Author.objects.prefetch_related('authorwithage')]
# Regression for #18090: the prefetching query must include an IN clause.
# Note that on Oracle the table name is upper case in the generated SQL,
# thus the .lower() call.
self.assertIn('authorwithage', connection.queries[-1]['sql'].lower())
self.assertIn(' IN ', connection.queries[-1]['sql'])
self.assertEqual(authors, [a.authorwithage for a in Author.objects.all()])
class ForeignKeyToFieldTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book = Book.objects.create(title='Poems')
cls.author1 = Author.objects.create(name='Jane', first_book=cls.book)
cls.author2 = Author.objects.create(name='Tom', first_book=cls.book)
cls.author3 = Author.objects.create(name='Robert', first_book=cls.book)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
FavoriteAuthors.objects.create(author=cls.author2, likes_author=cls.author3)
FavoriteAuthors.objects.create(author=cls.author3, likes_author=cls.author1)
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = Author.objects.prefetch_related('addresses')
addresses = [[str(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_m2m(self):
with self.assertNumQueries(3):
qs = Author.objects.all().prefetch_related('favorite_authors', 'favors_me')
favorites = [(
[str(i_like) for i_like in author.favorite_authors.all()],
[str(likes_me) for likes_me in author.favors_me.all()]
) for author in qs]
self.assertEqual(
favorites,
[
([str(self.author2)], [str(self.author3)]),
([str(self.author3)], [str(self.author1)]),
([str(self.author1)], [str(self.author2)])
]
)
class LookupOrderingTest(TestCase):
"""
Test cases that demonstrate that ordering of lookups is important, and
ensure it is preserved.
"""
def setUp(self):
self.person1 = Person.objects.create(name="Joe")
self.person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
self.house1 = House.objects.create(address="123 Main St")
self.room1_1 = Room.objects.create(name="Dining room", house=self.house1)
self.room1_2 = Room.objects.create(name="Lounge", house=self.house1)
self.room1_3 = Room.objects.create(name="Kitchen", house=self.house1)
self.house1.main_room = self.room1_1
self.house1.save()
self.person1.houses.add(self.house1)
self.house2 = House.objects.create(address="45 Side St")
self.room2_1 = Room.objects.create(name="Dining room", house=self.house2)
self.room2_2 = Room.objects.create(name="Lounge", house=self.house2)
self.house2.main_room = self.room2_1
self.house2.save()
self.person1.houses.add(self.house2)
self.house3 = House.objects.create(address="6 Downing St")
self.room3_1 = Room.objects.create(name="Dining room", house=self.house3)
self.room3_2 = Room.objects.create(name="Lounge", house=self.house3)
self.room3_3 = Room.objects.create(name="Kitchen", house=self.house3)
self.house3.main_room = self.room3_1
self.house3.save()
self.person2.houses.add(self.house3)
self.house4 = House.objects.create(address="7 Regents St")
self.room4_1 = Room.objects.create(name="Dining room", house=self.house4)
self.room4_2 = Room.objects.create(name="Lounge", house=self.house4)
self.house4.main_room = self.room4_1
self.house4.save()
self.person2.houses.add(self.house4)
def test_order(self):
with self.assertNumQueries(4):
# The following two queries must be done in the same order as written,
# otherwise 'primary_house' will cause non-prefetched lookups
qs = Person.objects.prefetch_related('houses__rooms',
'primary_house__occupants')
[list(p.primary_house.occupants.all()) for p in qs]
class NullableTest(TestCase):
@classmethod
def setUpTestData(cls):
boss = Employee.objects.create(name="Peter")
Employee.objects.create(name="Joe", boss=boss)
Employee.objects.create(name="Angela", boss=boss)
def test_traverse_nullable(self):
# Because we use select_related() for 'boss', it doesn't need to be
# prefetched, but we can still traverse it although it contains some nulls
with self.assertNumQueries(2):
qs = Employee.objects.select_related('boss').prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.select_related('boss')
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_prefetch_nullable(self):
# One for main employee, one for boss, one for serfs
with self.assertNumQueries(3):
qs = Employee.objects.prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.all()
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_in_bulk(self):
"""
In-bulk does correctly prefetch objects by not using .iterator()
directly.
"""
boss1 = Employee.objects.create(name="Peter")
boss2 = Employee.objects.create(name="Jack")
with self.assertNumQueries(2):
# Prefetch is done and it does not cause any errors.
bulk = Employee.objects.prefetch_related('serfs').in_bulk([boss1.pk, boss2.pk])
for b in bulk.values():
list(b.serfs.all())
class MultiDbTests(TestCase):
multi_db = True
def test_using_is_honored_m2m(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Jane Eyre")
book3 = B.create(title="Wuthering Heights")
book4 = B.create(title="Sense and Sensibility")
author1 = A.create(name="Charlotte", first_book=book1)
author2 = A.create(name="Anne", first_book=book1)
author3 = A.create(name="Emily", first_book=book1)
author4 = A.create(name="Jane", first_book=book4)
book1.authors.add(author1, author2, author3)
book2.authors.add(author1)
book3.authors.add(author3)
book4.authors.add(author4)
# Forward
qs1 = B.prefetch_related('authors')
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(book.title, ", ".join(a.name for a in book.authors.all()))
for book in qs1)
self.assertEqual(books,
"Poems (Charlotte, Anne, Emily)\n"
"Jane Eyre (Charlotte)\n"
"Wuthering Heights (Emily)\n"
"Sense and Sensibility (Jane)\n")
# Reverse
qs2 = A.prefetch_related('books')
with self.assertNumQueries(2, using='other'):
authors = "".join("%s: %s\n" %
(author.name, ", ".join(b.title for b in author.books.all()))
for author in qs2)
self.assertEqual(authors,
"Charlotte: Poems, Jane Eyre\n"
"Anne: Poems\n"
"Emily: Poems, Wuthering Heights\n"
"Jane: Sense and Sensibility\n")
def test_using_is_honored_fkey(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Forward
with self.assertNumQueries(2, using='other'):
books = ", ".join(a.first_book.title for a in A.prefetch_related('first_book'))
self.assertEqual("Poems, Sense and Sensibility", books)
# Reverse
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related('first_time_authors'))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
def test_using_is_honored_inheritance(self):
B = BookWithYear.objects.using('other')
A = AuthorWithAge.objects.using('other')
book1 = B.create(title="Poems", published_year=2010)
B.create(title="More poems", published_year=2011)
A.create(name='Jane', first_book=book1, age=50)
A.create(name='Tom', first_book=book1, age=49)
# parent link
with self.assertNumQueries(2, using='other'):
authors = ", ".join(a.author.name for a in A.prefetch_related('author'))
self.assertEqual(authors, "Jane, Tom")
# child link
with self.assertNumQueries(2, using='other'):
ages = ", ".join(str(a.authorwithage.age) for a in A.prefetch_related('authorwithage'))
self.assertEqual(ages, "50, 49")
def test_using_is_honored_custom_qs(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Implicit hinting
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.all())
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on the same db.
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('other'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on a different db.
with self.assertNumQueries(1, using='default'), self.assertNumQueries(1, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('default'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems ()\n"
"Sense and Sensibility ()\n")
class Ticket19607Tests(TestCase):
def setUp(self):
for id, name1, name2 in [
(1, 'einfach', 'simple'),
(2, 'schwierig', 'difficult'),
]:
LessonEntry.objects.create(id=id, name1=name1, name2=name2)
for id, lesson_entry_id, name in [
(1, 1, 'einfach'),
(2, 1, 'simple'),
(3, 2, 'schwierig'),
(4, 2, 'difficult'),
]:
WordEntry.objects.create(id=id, lesson_entry_id=lesson_entry_id, name=name)
def test_bug(self):
list(WordEntry.objects.prefetch_related('lesson_entry', 'lesson_entry__wordentry_set'))
class Ticket21410Tests(TestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Poems")
self.book2 = Book.objects.create(title="Jane Eyre")
self.book3 = Book.objects.create(title="Wuthering Heights")
self.book4 = Book.objects.create(title="Sense and Sensibility")
self.author1 = Author2.objects.create(name="Charlotte", first_book=self.book1)
self.author2 = Author2.objects.create(name="Anne", first_book=self.book1)
self.author3 = Author2.objects.create(name="Emily", first_book=self.book1)
self.author4 = Author2.objects.create(name="Jane", first_book=self.book4)
self.author1.favorite_books.add(self.book1, self.book2, self.book3)
self.author2.favorite_books.add(self.book1)
self.author3.favorite_books.add(self.book2)
self.author4.favorite_books.add(self.book3)
def test_bug(self):
list(Author2.objects.prefetch_related('first_book', 'favorite_books'))
class Ticket21760Tests(TestCase):
def setUp(self):
self.rooms = []
for _ in range(3):
house = House.objects.create()
for _ in range(3):
self.rooms.append(Room.objects.create(house=house))
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house.main_room = self.rooms[-3]
house.save()
def test_bug(self):
prefetcher = get_prefetcher(self.rooms[0], 'house', 'house')[0]
queryset = prefetcher.get_prefetch_queryset(list(Room.objects.all()))[0]
self.assertNotIn(' JOIN ', str(queryset.query))
class DirectPrefechedObjectCacheReuseTests(TestCase):
"""
prefetch_related() reuses objects fetched in _prefetched_objects_cache.
When objects are prefetched and not stored as an instance attribute (often
intermediary relationships), they are saved to the
_prefetched_objects_cache attribute. prefetch_related() takes
_prefetched_objects_cache into account when determining whether an object
has been fetched[1] and retrieves results from it when it is populated [2].
[1]: #25546 (duplicate queries on nested Prefetch)
[2]: #27554 (queryset evaluation fails with a mix of nested and flattened
prefetches)
"""
@classmethod
def setUpTestData(cls):
cls.book1, cls.book2 = [
Book.objects.create(title='book1'),
Book.objects.create(title='book2'),
]
cls.author11, cls.author12, cls.author21 = [
Author.objects.create(first_book=cls.book1, name='Author11'),
Author.objects.create(first_book=cls.book1, name='Author12'),
Author.objects.create(first_book=cls.book2, name='Author21'),
]
cls.author1_address1, cls.author1_address2, cls.author2_address1 = [
AuthorAddress.objects.create(author=cls.author11, address='Happy place'),
AuthorAddress.objects.create(author=cls.author12, address='Haunted house'),
AuthorAddress.objects.create(author=cls.author21, address='Happy place'),
]
cls.bookwithyear1 = BookWithYear.objects.create(title='Poems', published_year=2010)
cls.bookreview1 = BookReview.objects.create(book=cls.bookwithyear1)
def test_detect_is_fetched(self):
"""
Nested prefetch_related() shouldn't trigger duplicate queries for the same
lookup.
"""
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
)
),
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertSequenceEqual(book1.first_time_authors.all(), [self.author11, self.author12])
self.assertSequenceEqual(book2.first_time_authors.all(), [self.author21])
self.assertSequenceEqual(book1.first_time_authors.all()[0].addresses.all(), [self.author1_address1])
self.assertSequenceEqual(book1.first_time_authors.all()[1].addresses.all(), [])
self.assertSequenceEqual(book2.first_time_authors.all()[0].addresses.all(), [self.author2_address1])
self.assertEqual(
list(book1.first_time_authors.all()), list(book1.first_time_authors.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()), list(book2.first_time_authors.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[0].addresses.all()),
list(book1.first_time_authors.all()[0].addresses.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[1].addresses.all()),
list(book1.first_time_authors.all()[1].addresses.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()[0].addresses.all()),
list(book2.first_time_authors.all()[0].addresses.all().all())
)
def test_detect_is_fetched_with_to_attr(self):
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
to_attr='happy_place',
)
),
to_attr='first_authors',
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertEqual(book1.first_authors, [self.author11, self.author12])
self.assertEqual(book2.first_authors, [self.author21])
self.assertEqual(book1.first_authors[0].happy_place, [self.author1_address1])
self.assertEqual(book1.first_authors[1].happy_place, [])
self.assertEqual(book2.first_authors[0].happy_place, [self.author2_address1])
def test_prefetch_reverse_foreign_key(self):
with self.assertNumQueries(2):
bookwithyear1, = BookWithYear.objects.prefetch_related('bookreview_set')
with self.assertNumQueries(0):
self.assertCountEqual(bookwithyear1.bookreview_set.all(), [self.bookreview1])
with self.assertNumQueries(0):
prefetch_related_objects([bookwithyear1], 'bookreview_set')
class ReadPrefetchedObjectsCacheTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Les confessions Volume I')
cls.book2 = Book.objects.create(title='Candide')
cls.author1 = AuthorWithAge.objects.create(name='Rousseau', first_book=cls.book1, age=70)
cls.author2 = AuthorWithAge.objects.create(name='Voltaire', first_book=cls.book2, age=65)
cls.book1.authors.add(cls.author1)
cls.book2.authors.add(cls.author2)
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
def test_retrieves_results_from_prefetched_objects_cache(self):
"""
When intermediary results are prefetched without a destination
attribute, they are saved in the RelatedManager's cache
(_prefetched_objects_cache). prefetch_related() uses this cache
(#27554).
"""
authors = AuthorWithAge.objects.prefetch_related(
Prefetch(
'author',
queryset=Author.objects.prefetch_related(
# Results are saved in the RelatedManager's cache
# (_prefetched_objects_cache) and do not replace the
# RelatedManager on Author instances (favorite_authors)
Prefetch('favorite_authors__first_book'),
),
),
)
with self.assertNumQueries(4):
# AuthorWithAge -> Author -> FavoriteAuthors, Book
self.assertQuerysetEqual(authors, ['<AuthorWithAge: Rousseau>', '<AuthorWithAge: Voltaire>'])
|
|
# Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def __str__(self):
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self):
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self):
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value):
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item, prereleases=None):
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(self, iterable, prereleases=None):
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class _IndividualSpecifier(BaseSpecifier):
_operators = {}
def __init__(self, spec="", prereleases=None):
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(
self.__class__.__name__,
str(self),
pre,
)
def __str__(self):
return "{0}{1}".format(*self._spec)
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec == other._spec
def __ne__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec != other._spec
def _get_operator(self, op):
return getattr(self, "_compare_{0}".format(self._operators[op]))
def _coerce_version(self, version):
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
def prereleases(self):
return self._prereleases
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def contains(self, item, prereleases=None):
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
return self._get_operator(self._spec[0])(item, self._spec[1])
def filter(self, iterable, prereleases=None):
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if (parsed_version.is_prerelease
and not (prereleases or self.prereleases)):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the begining.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class LegacySpecifier(_IndividualSpecifier):
_regex = re.compile(
r"""
^
\s*
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^\s]* # We just match everything, except for whitespace since this
# is a "legacy" specifier and the version string can be just
# about anything.
)
\s*
$
""",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def _coerce_version(self, version):
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective, spec):
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective, spec):
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective, spec):
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective, spec):
return prospective > self._coerce_version(spec)
def _require_version_compare(fn):
@functools.wraps(fn)
def wrapped(self, prospective, spec):
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
return wrapped
class Specifier(_IndividualSpecifier):
_regex = re.compile(
r"""
^
\s*
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s]* # We just match everything, except for whitespace
# since we are only testing for strict identity.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
# You cannot use a wild card and a dev or local version
# together so group them with a | and make them optional.
(?:
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
\.\* # Wild card syntax of .*
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
\s*
$
""",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
@_require_version_compare
def _compare_compatible(self, prospective, spec):
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore post and dev releases and we want to treat the pre-release as
# it's own separate segment.
prefix = ".".join(
list(
itertools.takewhile(
lambda x: (not x.startswith("post")
and not x.startswith("dev")),
_version_split(spec),
)
)[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return (self._get_operator(">=")(prospective, spec)
and self._get_operator("==")(prospective, prefix))
@_require_version_compare
def _compare_equal(self, prospective, spec):
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
prospective = prospective[:len(spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
spec, prospective = _pad_version(spec, prospective)
else:
# Convert our spec string into a Version
spec = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec.local:
prospective = Version(prospective.public)
return prospective == spec
@_require_version_compare
def _compare_not_equal(self, prospective, spec):
return not self._compare_equal(prospective, spec)
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
return prospective <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec):
# Less than are defined as exclusive operators, this implies that
# pre-releases do not match for the same series as the spec. This is
# implemented by making <V imply !=V.*.
spec = Version(spec)
return (prospective < spec
and self._get_operator("!=")(prospective, str(spec) + ".*"))
@_require_version_compare
def _compare_greater_than(self, prospective, spec):
# Greater than are defined as exclusive operators, this implies that
# pre-releases do not match for the same series as the spec. This is
# implemented by making >V imply !=V.*.
spec = Version(spec)
return (prospective > spec
and self._get_operator("!=")(prospective, str(spec) + ".*"))
def _compare_arbitrary(self, prospective, spec):
return str(prospective).lower() == str(spec).lower()
@property
def prereleases(self):
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if parse(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version):
result = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _pad_version(left, right):
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split):])
right_split.append(left[len(right_split):])
# Insert our padding
left_split.insert(
1,
["0"] * max(0, len(right_split[0]) - len(left_split[0])),
)
right_split.insert(
1,
["0"] * max(0, len(left_split[0]) - len(right_split[0])),
)
return (
list(itertools.chain(*left_split)),
list(itertools.chain(*right_split)),
)
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers="", prereleases=None):
# Split on , to break each indidivual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
parsed = set()
for specifier in specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
def __str__(self):
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self):
return hash(self._specs)
def __and__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs != other._specs
@property
def prereleases(self):
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
# Note: The use of any() here means that an empty set of specifiers
# will always return False, this is an explicit design decision.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def contains(self, item, prereleases=None):
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if (not (self.prereleases or prereleases)) and item.is_prerelease:
return False
# Determine if we're forcing a prerelease or not, we bypass
# self.prereleases here and use self._prereleases because we want to
# only take into consideration actual *forced* values. The underlying
# specifiers will handle the other logic.
# The logic here is: If prereleases is anything but None, we'll just
# go aheand and continue to use that. However if
# prereleases is None, then we'll use whatever the
# value of self._prereleases is as long as it is not
# None itself.
if prereleases is None and self._prereleases is not None:
prereleases = self._prereleases
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(
s.contains(item, prereleases=prereleases)
for s in self._specs
)
def filter(self, iterable, prereleases=None):
# Determine if we're forcing a prerelease or not, we bypass
# self.prereleases here and use self._prereleases because we want to
# only take into consideration actual *forced* values. The underlying
# specifiers will handle the other logic.
# The logic here is: If prereleases is anything but None, we'll just
# go aheand and continue to use that. However if
# prereleases is None, then we'll use whatever the
# value of self._prereleases is as long as it is not
# None itself.
if prereleases is None and self._prereleases is not None:
prereleases = self._prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=prereleases)
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
filtered = []
found_prereleases = []
for item in iterable:
# Ensure that we some kind of Version class for this item.
if not isinstance(item, (LegacyVersion, Version)):
parsed_version = parse(item)
else:
parsed_version = item
# Filter out any item which is parsed as a LegacyVersion
if isinstance(parsed_version, LegacyVersion):
continue
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return found_prereleases
return filtered
|
|
import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
from pkg_resources import packaging
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
"RN50x64": "https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit: bool = False, download_root: str = None):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
|
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides distutils command classes for the gRPC Python setup process."""
import distutils
import glob
import os
import os.path
import platform
import re
import shutil
import subprocess
import sys
import traceback
import setuptools
from setuptools.command import build_ext
from setuptools.command import build_py
from setuptools.command import easy_install
from setuptools.command import install
from setuptools.command import test
PYTHON_STEM = os.path.dirname(os.path.abspath(__file__))
GRPC_STEM = os.path.abspath(PYTHON_STEM + '../../../../')
GRPC_PROTO_STEM = os.path.join(GRPC_STEM, 'src', 'proto')
PROTO_STEM = os.path.join(PYTHON_STEM, 'src', 'proto')
PYTHON_PROTO_TOP_LEVEL = os.path.join(PYTHON_STEM, 'src')
class CommandError(object):
pass
class GatherProto(setuptools.Command):
description = 'gather proto dependencies'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# TODO(atash) ensure that we're running from the repository directory when
# this command is used
try:
shutil.rmtree(PROTO_STEM)
except Exception as error:
# We don't care if this command fails
pass
shutil.copytree(GRPC_PROTO_STEM, PROTO_STEM)
for root, _, _ in os.walk(PYTHON_PROTO_TOP_LEVEL):
path = os.path.join(root, '__init__.py')
open(path, 'a').close()
class BuildProtoModules(setuptools.Command):
"""Command to generate project *_pb2.py modules from proto files."""
description = 'build protobuf modules'
user_options = [
('include=', None, 'path patterns to include in protobuf generation'),
('exclude=', None, 'path patterns to exclude from protobuf generation')
]
def initialize_options(self):
self.exclude = None
self.include = r'.*\.proto$'
def finalize_options(self):
pass
def run(self):
import grpc.tools.protoc as protoc
include_regex = re.compile(self.include)
exclude_regex = re.compile(self.exclude) if self.exclude else None
paths = []
for walk_root, directories, filenames in os.walk(PROTO_STEM):
for filename in filenames:
path = os.path.join(walk_root, filename)
if include_regex.match(path) and not (
exclude_regex and exclude_regex.match(path)):
paths.append(path)
# TODO(kpayson): It would be nice to do this in a batch command,
# but we currently have name conflicts in src/proto
for path in paths:
command = [
'grpc.tools.protoc',
'-I {}'.format(PROTO_STEM),
'--python_out={}'.format(PROTO_STEM),
'--grpc_python_out={}'.format(PROTO_STEM),
] + [path]
if protoc.main(command) != 0:
sys.stderr.write(
'warning: Command:\n{}\nFailed'.format(
command))
# Generated proto directories dont include __init__.py, but
# these are needed for python package resolution
for walk_root, _, _ in os.walk(PROTO_STEM):
path = os.path.join(walk_root, '__init__.py')
open(path, 'a').close()
class BuildPy(build_py.build_py):
"""Custom project build command."""
def run(self):
try:
self.run_command('build_package_protos')
except CommandError as error:
sys.stderr.write('warning: %s\n' % error.message)
build_py.build_py.run(self)
class TestLite(setuptools.Command):
"""Command to run tests without fetching or building anything."""
description = 'run tests without fetching or building anything.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
# distutils requires this override.
pass
def run(self):
self._add_eggs_to_path()
import tests
loader = tests.Loader()
loader.loadTestsFromNames(['tests'])
runner = tests.Runner()
result = runner.run(loader.suite)
if not result.wasSuccessful():
sys.exit('Test failure')
def _add_eggs_to_path(self):
"""Fetch install and test requirements"""
self.distribution.fetch_build_eggs(self.distribution.install_requires)
self.distribution.fetch_build_eggs(self.distribution.tests_require)
class RunInterop(test.test):
description = 'run interop test client/server'
user_options = [
('args=', 'a', 'pass-thru arguments for the client/server'),
('client', 'c', 'flag indicating to run the client'),
('server', 's', 'flag indicating to run the server')
]
def initialize_options(self):
self.args = ''
self.client = False
self.server = False
def finalize_options(self):
if self.client and self.server:
raise DistutilsOptionError('you may only specify one of client or server')
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.client:
self.run_client()
elif self.server:
self.run_server()
def run_server(self):
# We import here to ensure that our setuptools parent has had a chance to
# edit the Python system path.
from tests.interop import server
sys.argv[1:] = self.args.split()
server.serve()
def run_client(self):
# We import here to ensure that our setuptools parent has had a chance to
# edit the Python system path.
from tests.interop import client
sys.argv[1:] = self.args.split()
client.test_interoperability()
|
|
#!/usr/bin/env python
import os
import argparse
import yaml
from fabric.api import run, task, cd, hide, abort, settings, lcd, local, env, execute
from fabric.utils import puts
from fabric.contrib.files import exists
from fabric.colors import cyan
from GitNotified import GitNotified
class GitSync:
local_path = ''
local_branch = ''
remote_path = ''
remote_host = ''
remote_user = ''
git_ignore_lines = ''
def __init__(self, config, notify):
self.notify = notify
self.local_path = os.path.expanduser(
os.path.expandvars(config['local_path'])
)
self.local_branch = config['local_branch']
self.remote_path = config['remote_path']
self.remote_host = config['remote_host']
self.remote_user = config['remote_user']
if len(os.path.split(self.local_path)) < 2:
abort(
"The local path appears to be bad: {0}".format(self.local_path)
)
if 'git_ignore' in config:
self.git_ignore_lines = config['git_ignore']
else:
self.git_ignore_lines = []
# Sort the git ignore lines.
self.git_ignore_lines = sorted(self.git_ignore_lines)
if self.remote_user:
self.remote_host = self.remote_user + '@' + self.remote_host
@task
def init_remote_master_repository(self, remote_path, local_branch, git_ignore_lines):
puts("Setting up %s" % remote_path)
if not exists(remote_path):
abort("The remote path does not exist: %s" % remote_path)
git_repo = self.get_remote_git_repo(self, remote_path)
if exists(git_repo):
puts("The git repo already exist: %s" % git_repo)
else:
with cd(remote_path):
run("git init")
self.update_git_ignore_file(self, remote_path, git_ignore_lines)
with cd(remote_path):
run("git add .gitignore")
run("git commit -m 'Inital Commit'")
run("git add .")
run("git commit -m 'add project'")
@task
def update_git_ignore_file(self, remote_path, git_ignore_lines):
puts("Updating ignore files.")
with cd(remote_path):
with hide('running'):
cmd = []
for line in git_ignore_lines:
cmd.append("echo '{0}' >> .gitignore_new".format(line))
if cmd:
run(';'.join(cmd))
run('mv .gitignore_new .gitignore', shell=False)
else:
run("echo '' > .gitignore")
@task
def remote_has_modified_files(self, remote_path):
with cd(remote_path):
with settings(warn_only=True):
with hide('running', 'status', 'warnings', 'stderr', 'stdout'):
git_status_output = run("git status --porcelain .")
if not git_status_output:
puts(cyan("%s (remote) is clean." % remote_path))
return False
else:
puts(
cyan(
" %s (remote) has uncommitted changes."
% remote_path
)
)
return True
@task
def local_has_modified_files(self, local_path):
with lcd(local_path):
with settings(warn_only=True):
with hide('running', 'status', 'warnings', 'stderr', 'stdout'):
git_status_output = local("git status --porcelain .", capture=True)
if not git_status_output:
puts(cyan("%s (local) is clean." % local_path))
return False
else:
puts(
cyan("%s (local) has uncommitted changes." % local_path)
)
return True
@task
def get_remote_git_repo(self, remote_path):
git_repo = os.path.join(remote_path, '.git')
return git_repo
def check_local_path_case(self, path, full_path=None):
if not full_path:
full_path = path
(head, tail) = os.path.split(path)
if tail:
self.check_local_path_case(head, full_path)
if head == '/':
return True
if not os.path.isdir(head):
return True
if not os.path.exists(os.path.join(head, tail)):
return True
if tail not in os.listdir(head):
abort(
"Your local path appears to be miss configured, maybe the check"
" to make sure upper and lower case letters are"
" correct: {0}".format(full_path)
)
def check_local_path_permissions(self, path, full_path=None):
if not full_path:
full_path = path
(head, tail) = os.path.split(path)
if os.path.isdir(head):
if not os.access(head, os.W_OK):
abort(
(
"Unable to write to {0}, that means your local"
" path will not work. {1}"
).format(head, full_path)
)
else:
self.check_local_path_permissions(head, full_path)
@task
def get_local_git_clone(self, remote_path, local_path):
self.check_local_path_case(local_path)
self.check_local_path_permissions(local_path)
local("git clone ssh://%s/%s %s" % (env.host, remote_path, local_path))
@task
def commit_remote_modified_files(self, remote_path):
if not self.remote_has_modified_files(self, remote_path):
return True
with cd(remote_path):
run("git add .")
run("git commit -a -m 'committing all changes from %s'" % (remote_path))
return True
@task
def push_remote_master(self, remote_path, local_branch):
self.remote_has_local_branch(self, remote_path, local_branch)
with cd(remote_path):
run("git push origin %s" % (local_branch))
return True
def remote_has_local_branch(self, remote_path, local_branch):
with cd(remote_path):
git_branches = run('git branch')
puts(cyan(git_branches))
@task
def pull_local(self, local_path):
with lcd(local_path):
local('git fetch origin')
@task
def merge_local_master(self, local_path):
with lcd(local_path):
local('git merge origin/master')
@task
def pull_and_merge_local(self, local_path):
self.pull_local(self, local_path)
self.merge_local_master(self, local_path)
@task
def commit_local_modified_files(self, local_path):
with lcd(local_path):
if self.local_has_modified_files(self, local_path):
local("git add .")
local(
"git commit -a -m 'committing all changes from a local machine'"
)
return True
@task
def push_local_to_remote(self, local_path, local_branch):
if not self.local_has_local_branch(local_path, local_branch):
self.local_create_local_branch(local_path, local_branch)
with lcd(local_path):
local("git push origin %s" % (local_branch))
def local_create_local_branch(self, local_path, local_branch):
with lcd(local_path):
local('git branch %s' % (local_branch), capture=True)
def local_has_local_branch(self, local_path, local_branch):
puts(cyan(local_path))
with lcd(local_path):
git_branches = local('git branch', capture=True)
for branch in git_branches.split():
if branch == local_branch:
return True
return False
@task
def merge_local_to_remote(self, remote_path, local_branch):
with cd(remote_path):
run('git merge %s' % (local_branch))
@task
def send_local_changes_to_remote(self, remote_path, local_path, local_branch):
self.commit_local_modified_files(self, local_path)
self.push_local_to_remote(self, local_path, local_branch)
self.merge_local_to_remote(self, remote_path, local_branch)
@task
def send_remote_changes_to_local(self, remote_path, local_path):
self.commit_remote_modified_files(self, remote_path)
self.pull_and_merge_local(self, local_path)
@task
def sync(self, remote_path, local_path, local_branch, git_ignore_lines):
self.test_and_init(
remote_path,
local_path,
local_branch,
git_ignore_lines
)
if self.remote_has_modified_files(self, remote_path):
self.send_remote_changes_to_local(self, remote_path, local_path)
self.send_local_changes_to_remote(self, remote_path, local_path, local_branch)
def initial_sync(self, remote_path, local_path, local_branch, git_ignore_lines):
self.test_and_init(
remote_path,
local_path,
local_branch,
git_ignore_lines
)
self.update_git_ignore_file(self, remote_path, git_ignore_lines)
self.send_remote_changes_to_local(self, remote_path, local_path)
self.send_local_changes_to_remote(self, remote_path, local_path, local_branch)
def test_and_init(self, remote_path, local_path, local_branch, git_ignore_lines):
if not os.path.exists(local_path):
self.init(self, remote_path, local_path, local_branch, git_ignore_lines)
return
# Test to see if the local path is just an empty directory.
wc_output = local("ls -1a {0} | wc -l".format(local_path), capture=True)
number_of_files = int(wc_output.stdout)
puts(number_of_files)
if number_of_files == 2:
local('rmdir {0}'.format(local_path))
self.init(self, remote_path, local_path, local_branch, git_ignore_lines)
return
elif number_of_files < 2:
raise Exception(
"Local directory exists, has less then 2 files in it, on"
" a POSIX operating system this makes not sense."
)
raise Exception(
(
"Local directory {0} exists already and can not work with a new"
" GitSync project."
).format(local_path)
)
@task
def init(self, remote_path, local_path, local_branch, git_ignore_lines):
self.init_remote_master_repository(self, remote_path, local_branch, git_ignore_lines)
self.get_local_git_clone(self, remote_path, local_path)
self.local_create_local_branch(local_path, local_branch)
with lcd(local_path):
local("git checkout %s" % (local_branch))
def run_remote_has_modified_files(self):
result = execute(
self.remote_has_modified_files,
self.remote_path,
host=self.remote_host,
remote_path=self.remote_path
)
return result[self.remote_host]
def run_send_remote_changes_to_local(self):
result = execute(
self.send_remote_changes_to_local,
self,
host=self.remote_host,
remote_path=self.remote_path,
local_path=self.local_path
)
return result[self.remote_host]
def run_send_local_changes_to_remote(self):
result = execute(
self.send_local_changes_to_remote,
self,
host=self.remote_host,
remote_path=self.remote_path,
local_path=self.local_path,
local_branch=self.local_branch
)
return result[self.remote_host]
def run_initial_sync(self):
self.notify.sync_start(self.local_path, self.remote_path, self.remote_host)
execute(
self.initial_sync,
host=self.remote_host,
remote_path=self.remote_path,
local_path=self.local_path,
local_branch=self.local_branch,
git_ignore_lines=self.git_ignore_lines
)
self.notify.sync_done(self.local_path, self.remote_path, self.remote_host)
def run_sync(self):
self.notify.sync_start(self.local_path, self.remote_path, self.remote_host)
try:
if self.run_remote_has_modified_files():
self.run_send_remote_changes_to_local()
self.run_send_local_changes_to_remote()
except Exception as e:
print("sync failed.")
print(type(e))
print(e.args)
print(e)
self.notify.sync_failed()
raise
else:
self.notify.sync_done(self.local_path, self.remote_path, self.remote_host)
def parse_config():
# Setup Parser
parser = argparse.ArgumentParser(
description='Use git to sync a site on a server to your local machine.'
)
parser.add_argument(
'config_file',
nargs='?',
type=argparse.FileType('r')
)
parser.add_argument(
'command',
nargs='?',
type=str
)
args = parser.parse_args()
# Read in config file.
return yaml.safe_load(args.config_file)
def setup_git_sync(config, notifier=None):
if not notifier:
notifier = GitNotified()
git_sync = GitSync(config, notifier)
return (git_sync, notifier)
def main():
parser = argparse.ArgumentParser(
description='Use git to sync a site on a server to your local machine.'
)
parser.add_argument(
'config_file',
nargs='?',
type=argparse.FileType('r')
)
parser.add_argument(
'command',
nargs='?',
type=str
)
args = parser.parse_args()
if not args.config_file:
abort( "No yaml configuration file specified." )
config = yaml.safe_load(args.config_file)
(git_sync, notifier) = setup_git_sync(config)
if args.command == "init":
git_sync.run_initial_sync()
elif args.command == 'sync':
git_sync.run_sync()
else:
notifier.notify("Invalid command.")
return 0
if __name__ == '__main__':
main()
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution Strategy-related dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops.distribute_options import ExternalStatePolicy
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
class _AutoShardDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that shards the `Dataset` automatically.
This dataset takes in an existing dataset and tries to automatically figure
out how to shard the dataset in a multi-worker scenario using graph rewrites.
If the AutoShardPolicy is set to FILE, it walks up the dataset graph until
it finds a reader dataset, then inserts a ShardDataset op before that node
so that each worker only sees some files.
If the AutoShardPolicy is set to DATA, it inserts a ShardDataset op at the
end of the input pipeline, before any terminal PrefetchDataset if there is
one. Additionally, if there is a RebatchDatasetV2 in the input pipeline, it
is written to legacy RebatchDataset for correctness reasons, since
RebatchDatasetV2 is incompatible with data sharding.
If the AutoShardPolicy is set to AUTO, it tries to do file-based sharding.
If it cannot find a reader dataset, it falls back to doing data-based
sharding.
If the AutoShardPolicy is set to OFF, it does nothing.
Args:
num_workers: Total number of workers to shard this dataset across.
index: The current worker index (out of the total number of workers) this
dataset is for.
num_replicas: The total number of replicas across all workers. This is used
only when sharding by data (either DATA or AUTO) in order to rewrite
RebatchDatasetV2 to RebatchDataset.
Raises:
NotFoundError: If we cannot find a suitable reader dataset to begin
automatically sharding the dataset.
"""
def __init__(self, input_dataset, num_workers, index, num_replicas=None):
self._input_dataset = input_dataset
self._element_spec = input_dataset.element_spec
variant_tensor = ged_ops.auto_shard_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
num_workers=num_workers,
index=index,
auto_shard_policy=int(
input_dataset.options().experimental_distribute.auto_shard_policy),
num_replicas=num_replicas,
**self._flat_structure)
super(_AutoShardDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._element_spec
def _AutoShardDatasetV1(input_dataset, num_workers, index, num_replicas=None): # pylint: disable=invalid-name
return dataset_ops.DatasetV1Adapter(
_AutoShardDataset(input_dataset, num_workers, index, num_replicas))
class _RebatchDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that rebatches elements from its input into new batch sizes.
`_RebatchDataset(input_dataset, batch_sizes)` is functionally equivalent to
`input_dataset.unbatch().batch(N)`, where the value of N cycles through the
`batch_sizes` input list. The elements produced by this dataset have the same
rank as the elements of the input dataset.
For example:
```python
ds = tf.data.Dataset.range(8)
ds = ds.batch(4)
ds = _RebatchDataset(ds, batch_sizes=[2, 1, 1])
for elem in ds:
print(elem)
>> [0, 1], [2], [3], [4, 5], [6], [7]
ds = tf.data.Dataset.range(16)
ds = ds.batch(4)
ds = _RebatchDataset(ds, batch_sizes=[6])
for elem in ds:
print(elem)
>> [0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11], [12, 13, 14, 15]
```
"""
def __init__(self, input_dataset, batch_sizes, drop_remainder=False):
"""Creates a _RebatchDataset.
Args:
input_dataset: `Dataset` to rebatch.
batch_sizes: A `tf.int64` scalar or vector, representing the size of
batches to produce. If this argument is a vector, these values are
cycled through in order.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_sizes[cycle_index] elements; the default behavior is not to drop
the smaller batch.
"""
self._input_dataset = input_dataset
self._batch_sizes = ops.convert_to_tensor(
batch_sizes, dtype=dtypes.int64, name="batch_sizes")
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
new_batch_dim = self._compute_static_batch_dim()
# pylint: disable=protected-access
self._element_spec = nest.map_structure(
lambda ts: ts._unbatch()._batch(new_batch_dim),
dataset_ops.get_structure(input_dataset))
# pylint: enable=protected-access
input_dataset = dataset_ops.normalize_to_dense(input_dataset)
variant_tensor = ged_ops.rebatch_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
batch_sizes=batch_sizes,
drop_remainder=drop_remainder,
**self._flat_structure)
super(_RebatchDataset, self).__init__(input_dataset, variant_tensor)
def _compute_static_batch_dim(self):
"""Computes the static batch dimension of a dataset if it can be determined.
Given the _RebatchDataset parameters, determines the batch dimension of this
dataset statically. Returns None if this cannot be determined or is
variable.
Returns:
An integer representing the batch dimension of the dataset. If it cannot
be determined statically, returns None.
Raises:
ValueError: The batch_sizes parameter is malformed, input_dataset is
not batched, or input_dataset batch sizes are incompatible with each
other.
"""
new_batch_dim = tensor_util.constant_value(self._batch_sizes)
if new_batch_dim is None:
return None
if isinstance(new_batch_dim, np.ndarray):
if len(new_batch_dim.shape) == 1:
if np.all(new_batch_dim == new_batch_dim[0]):
new_batch_dim = new_batch_dim[0]
else:
return None
elif len(new_batch_dim.shape) > 1:
raise ValueError("Expected batch_sizes to be a scalar or vector.")
if self._may_form_partial_batches(new_batch_dim):
return None
return new_batch_dim
def _may_form_partial_batches(self, desired_batch_size):
"""Returns whether this dataset may form partial batches."""
if tensor_util.constant_value(self._drop_remainder):
return False
def get_batch_dim(type_spec):
shape = type_spec._to_legacy_output_shapes() # pylint: disable=protected-access
if not isinstance(shape, tensor_shape.TensorShape):
return None
if shape.rank is None:
return None
if len(shape) < 1:
raise ValueError("Expected a dataset whose elements have rank >= 1 "
"but found a dataset whose elements are scalars. "
"You can fix the issue by adding the `batch` "
"transformation to the dataset.")
return shape.dims[0].value
input_batch_dims = [
get_batch_dim(ts)
for ts in nest.flatten(dataset_ops.get_structure(self._input_dataset))
]
known_input_batch_dims = [d for d in input_batch_dims if d is not None]
if not known_input_batch_dims:
return True
known_input_batch_dims = np.asarray(known_input_batch_dims)
if not np.all(known_input_batch_dims == known_input_batch_dims[0]):
raise ValueError("Batch dimensions of input dataset are not compatible.")
return known_input_batch_dims[0] % desired_batch_size != 0
@property
def element_spec(self):
return self._element_spec
class _LegacyRebatchDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that divides its input batches into `num_replicas` sub-batches.
For each batch in the input dataset, _LegacyRebatchDataset will produce
`num_replicas` smaller batches whose sizes add up to the original batch size.
For example:
```python
ds = tf.data.Dataset.range(8)
ds = ds.batch(4)
ds = _LegacyRebatchDataset(ds, num_replicas=3)
for elem in ds:
print(elem)
>> [0, 1], [2, 3], [], [4, 5], [6, 7], []
```
"""
def __init__(self, input_dataset, num_replicas):
"""Creates a _LegacyRebatchDataset.
Args:
input_dataset: `Dataset` to rebatch.
num_replicas: A `tf.int64` scalar, representing the number of sub-batches
to split each batch from `input_dataset` into.
"""
def recalculate_batch_size(type_spec):
"""Recalculates the output_shape after dividing it by num_replicas."""
output_shape = type_spec._to_legacy_output_shapes() # pylint: disable=protected-access
if not isinstance(output_shape, tensor_shape.TensorShape):
return None
# If the output shape is unknown, we set the batch dimension to unknown.
if output_shape.rank is None:
return None
if len(output_shape) < 1:
raise ValueError("Expected a dataset whose elements have rank >= 1 "
"but found a dataset whose elements are scalars. "
"You can fix the issue by adding the `batch` "
"transformation to the dataset.")
output_dims = [d.value for d in output_shape.dims]
if output_dims[0] is not None and output_dims[0] % num_replicas == 0:
return output_dims[0] // num_replicas
# Set the batch dimension to unknown. If the global batch size does not
# divide num_replicas evenly, the minibatches may have different sizes.
return None
def rebatch(type_spec):
# pylint: disable=protected-access
batch_size = recalculate_batch_size(type_spec)
return type_spec._unbatch()._batch(batch_size)
# pylint: enable=protected-access
self._element_spec = nest.map_structure(
rebatch, dataset_ops.get_structure(input_dataset))
input_dataset = dataset_ops.normalize_to_dense(input_dataset)
variant_tensor = ged_ops.rebatch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
num_replicas=num_replicas,
**self._flat_structure)
super(_LegacyRebatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._element_spec
class _RemoteDataset(dataset_ops.DatasetSource):
"""Creates a dataset on a given `device` given a graph def."""
def __init__(self, graph_def, device, element_spec):
self._elem_spec = element_spec
with ops.device(device):
variant_tensor = ged_ops.dataset_from_graph(graph_def)
super(_RemoteDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._elem_spec
def replicate(dataset, devices):
"""A transformation that replicates `dataset` onto a list of devices.
Args:
dataset: A `tf.data.Dataset` object.
devices: A list of devices to replicate the dataset on.
Returns:
A dictionary mapping device name to a dataset on that device.
"""
if not isinstance(dataset, dataset_ops.DatasetV2):
raise TypeError("`dataset` must be a `tf.data.Dataset` object.")
# pylint: disable=protected-access
dataset_device = dataset._variant_tensor.device
datasets = {}
if len(devices) == 1 and devices[0] == dataset_device:
datasets[devices[0]] = dataset
return datasets
with ops.colocate_with(dataset._variant_tensor):
dataset = dataset._apply_options()
policy = dataset.options().experimental_external_state_policy
if policy is None:
policy = ExternalStatePolicy.WARN
graph_def = dataset._as_serialized_graph(
strip_device_assignment=True, external_state_policy=policy)
for device in devices:
ds = _RemoteDataset(graph_def, device, dataset.element_spec)
datasets[device] = ds
return datasets
def batch_sizes_for_worker(global_batch_size, num_workers,
num_replicas_per_worker, worker_index):
"""Determines how to rebatch a dataset for the given worker.
Given the global batch size, number of workers, number of replicas per worker,
and worker index, returns the correct batch sizes for rebatching a dataset
on worker `worker_index` of `num_workers`, such that each global step (across
all workers and replicas) will consume global_batch_size elements. The
returned value should be passed as the `batch_sizes` input parameter to
`tf.data.experimental.rebatch()`. The returned batch sizes meet the following
constraints:
Let G = global_batch_size, W = num_workers, R = num_replicas_per_worker
(A) for any worker, len(batch_sizes) = W * R
(B) for any worker, sum(batch_sizes) == G
(C) for any global step (i.e. R iterations on each worker), the sum of batches
consumed by replicas across all workers is G.
(D) any two batch sizes of any two replicas differs by at most one.
For example, suppose we have G = 7, W = 2, R = 2, and suppose we have two
files which each contain 7 elements:
```python
# WORKER 0
batch_sizes_0 = batch_sizes_for_worker(global_batch_size=global_batch_size,
num_workers=2,
num_replicas_per_worker=2,
worker_index=0)
print(batch_sizes_0)
>> [2, 2, 2, 1]
dataset_0 = tf.data.Dataset.from_tensor_slices(["file_a", "file_b"])
dataset_0 = dataset_0.shard(num_shards, index=0)
dataset_0 = dataset_0.batch(7)
dataset_0 = dataset_0.apply(tf.data.experimental.rebatch(batch_sizes_0))
for elem in dataset_0:
print(elem)
>> [[A0, A1], [A2, A3], [A4, A5], [A6]]
# WORKER 1
batch_sizes_1 = batch_sizes_for_worker(global_batch_size=global_batch_size,
num_workers=2,
num_replicas_per_worker=2,
worker_index=1)
print(batch_sizes_1)
>> [2, 1, 2, 2]
dataset_1 = tf.data.Dataset.from_tensor_slices(["file_a", "file_b"])
dataset_1 = dataset_1.shard(num_shards, index=1)
dataset_1 = dataset_1.batch(7)
dataset_1 = dataset_1.apply(tf.data.experimental.rebatch(batch_sizes_1))
for elem in dataset_1:
print(elem)
>> [[B0, B1], [B2], [B3, B4], [B5, B6]]
```
The above example will produce the following elements:
Step 1:
Worker 0 Replica 0: [A0, A1]
Worker 0 Replica 1: [A2, A3]
Worker 1 Replica 0: [B0, B1]
Worker 1 Replica 1: [B2]
Total batch size = 7
Step 2:
Worker 0 Replica 0: [A4, A5]
Worker 0 Replica 1: [A6]
Worker 1 Replica 0: [B3, B4]
Worker 1 Replica 1: [B5, B6]
Total batch size = 7
Args:
global_batch_size: A `tf.int64` scalar, representing the global batch size.
num_workers: An integer representing the number of workers the dataset will
be distributed across.
num_replicas_per_worker: An integer representing the number of replicas per
worker. All workers are assumed to have the same number of replicas.
worker_index: An integer index of the worker to be rebatched.
Returns:
A `tf.int64` vector, representing the batch sizes to rebatch the dataset
into.
"""
# Constraint (A)
num_subbatches = num_workers * num_replicas_per_worker
offset = worker_index * num_replicas_per_worker
const_value = tensor_util.constant_value(global_batch_size)
if const_value is not None:
# Use the constant global batch size for further calculations
global_batch_size = const_value
# Let N = W * R. Constraint (B) and (D) jointly mean that the iterations
# should have batch size either floor(B/N) or ceil(B/N). Namely, of the N
# subbatches a batch is split into, B - N * floor(B/N) of them will have size
# ceil(B/N), and the rest will have size floor(B/N).
floor = global_batch_size // num_subbatches
num_ceil = global_batch_size - (num_subbatches * floor)
# For worker 0, we assign the first num_ceil subbatches to have size
# ceil(B/N), and the remainder to have size floor(B/N). The other workers will
# each be offset by R * worker_index in order to meet constraint (C).
if const_value is not None:
# If the global batch size is a known constant value, we return a constant
# tensor directly instead of manipulating it with TF ops. This allows for
# better downstream shape inference.
worker_0 = [floor + 1] * num_ceil + [floor] * (num_subbatches - num_ceil)
return ops.convert_to_tensor(
worker_0[offset:] + worker_0[:offset],
dtype=dtypes.int64,
name="batch_sizes")
worker_0 = array_ops.ones(num_subbatches, dtype=dtypes.int64)
worker_0 = floor * worker_0 + array_ops.concat([
array_ops.ones(num_ceil, dtype=dtypes.int64),
array_ops.zeros(num_subbatches - num_ceil, dtype=dtypes.int64)
],
axis=0)
return array_ops.concat([worker_0[offset:], worker_0[:offset]], axis=0)
def compute_batch_size(dataset):
"""An operation that returns the batch size of the dataset.
This op tries to infer the batch size statically by walking up the dataset
tree from the final dataset node and returning the batch size of the first
batching dataset (such as from .batch() and .padded_batch()) that it
encounters. This differs from using the `element_spec` of a dataset in that it
does not account for partial batches.
This operation may fail if it encounters contradictory batch sizes (for
example, if the dataset is created by zipping together two datasets with
different batch sizes), if there are no explicit batching transformations, or
if there are operations downstream from the batching transformation that may
modify its batch size. In these cases, it returns a -1.
Args:
dataset: A `tf.data.Dataset` object.
Returns:
A `tf.int64` Tensor representing the batch size of the dataset sans partial
batches. If this cannot be inferred statically, the value of this tensor
will be -1.
"""
def get_static_batch_dim(output_shape):
if output_shape.rank is None:
return None
return output_shape.dims[0].value
batch_dims = [
get_static_batch_dim(ts._to_legacy_output_shapes()) # pylint: disable=protected-access
for ts in nest.flatten(dataset_ops.get_structure(dataset))
]
if all(d is not None for d in batch_dims):
if all(d == batch_dims[0] for d in batch_dims):
# If all batch dimensions are known and equal, return that directly.
batch_dim = batch_dims[0]
else:
# If all batch dimensions are known but not all equal, return -1.
batch_dim = -1
return constant_op.constant(
batch_dim, dtype=dtypes.int64, name="static_batch_size")
# If any batch dimensions are unknown, use compute_batch_size op.
return ged_ops.compute_batch_size(dataset._variant_tensor) # pylint: disable=protected-access
_AutoShardDatasetV1.__doc__ = _AutoShardDataset.__doc__
|
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''Unit tests for the display module'''
# For testing purposes, clobber the rcfile
import matplotlib
matplotlib.use('Agg') # nopep8
import matplotlib.pyplot as plt
import numpy as np
# Import the hacked image comparison module
from mpl_ic import image_comparison
from nose.tools import raises
# We'll make a decorator to handle style contexts
from decorator import decorator
import mir_eval
import mir_eval.display
from mir_eval.io import load_labeled_intervals
from mir_eval.io import load_valued_intervals
from mir_eval.io import load_labeled_events
from mir_eval.io import load_ragged_time_series
from mir_eval.io import load_wav
@decorator
def styled(f, *args, **kwargs):
matplotlib.rcdefaults()
return f(*args, **kwargs)
@image_comparison(baseline_images=['segment'], extensions=['png'])
@styled
def test_display_segment():
plt.figure()
# Load some segment data
intervals, labels = load_labeled_intervals('data/segment/ref00.lab')
# Plot the segments with no labels
mir_eval.display.segments(intervals, labels, text=False)
# Draw a legend
plt.legend()
@image_comparison(baseline_images=['segment_text'], extensions=['png'])
@styled
def test_display_segment_text():
plt.figure()
# Load some segment data
intervals, labels = load_labeled_intervals('data/segment/ref00.lab')
# Plot the segments with no labels
mir_eval.display.segments(intervals, labels, text=True)
@image_comparison(baseline_images=['labeled_intervals'], extensions=['png'])
@styled
def test_display_labeled_intervals():
plt.figure()
# Load some chord data
intervals, labels = load_labeled_intervals('data/chord/ref01.lab')
# Plot the chords with nothing fancy
mir_eval.display.labeled_intervals(intervals, labels)
@image_comparison(baseline_images=['labeled_intervals_noextend'],
extensions=['png'])
@styled
def test_display_labeled_intervals_noextend():
plt.figure()
# Load some chord data
intervals, labels = load_labeled_intervals('data/chord/ref01.lab')
# Plot the chords with nothing fancy
ax = plt.axes()
ax.set_yticklabels([])
mir_eval.display.labeled_intervals(intervals, labels,
label_set=[],
extend_labels=False,
ax=ax)
@image_comparison(baseline_images=['labeled_intervals_compare'],
extensions=['png'])
@styled
def test_display_labeled_intervals_compare():
plt.figure()
# Load some chord data
ref_int, ref_labels = load_labeled_intervals('data/chord/ref01.lab')
est_int, est_labels = load_labeled_intervals('data/chord/est01.lab')
# Plot reference and estimates using label set extension
mir_eval.display.labeled_intervals(ref_int, ref_labels,
alpha=0.5, label='Reference')
mir_eval.display.labeled_intervals(est_int, est_labels,
alpha=0.5, label='Estimate')
plt.legend()
@image_comparison(baseline_images=['labeled_intervals_compare_noextend'],
extensions=['png'])
@styled
def test_display_labeled_intervals_compare_noextend():
plt.figure()
# Load some chord data
ref_int, ref_labels = load_labeled_intervals('data/chord/ref01.lab')
est_int, est_labels = load_labeled_intervals('data/chord/est01.lab')
# Plot reference and estimate, but only use the reference labels
mir_eval.display.labeled_intervals(ref_int, ref_labels,
alpha=0.5, label='Reference')
mir_eval.display.labeled_intervals(est_int, est_labels,
extend_labels=False,
alpha=0.5, label='Estimate')
plt.legend()
@image_comparison(baseline_images=['labeled_intervals_compare_common'],
extensions=['png'])
@styled
def test_display_labeled_intervals_compare_common():
plt.figure()
# Load some chord data
ref_int, ref_labels = load_labeled_intervals('data/chord/ref01.lab')
est_int, est_labels = load_labeled_intervals('data/chord/est01.lab')
label_set = list(sorted(set(ref_labels) | set(est_labels)))
# Plot reference and estimate with a common label set
mir_eval.display.labeled_intervals(ref_int, ref_labels,
label_set=label_set,
alpha=0.5, label='Reference')
mir_eval.display.labeled_intervals(est_int, est_labels,
label_set=label_set,
alpha=0.5, label='Estimate')
plt.legend()
@image_comparison(baseline_images=['hierarchy_nolabel'], extensions=['png'])
@styled
def test_display_hierarchy_nolabel():
plt.figure()
# Load some chord data
int0, lab0 = load_labeled_intervals('data/hierarchy/ref00.lab')
int1, lab1 = load_labeled_intervals('data/hierarchy/ref01.lab')
# Plot reference and estimate with a common label set
mir_eval.display.hierarchy([int0, int1],
[lab0, lab1])
plt.legend()
@image_comparison(baseline_images=['hierarchy_label'], extensions=['png'])
@styled
def test_display_hierarchy_label():
plt.figure()
# Load some chord data
int0, lab0 = load_labeled_intervals('data/hierarchy/ref00.lab')
int1, lab1 = load_labeled_intervals('data/hierarchy/ref01.lab')
# Plot reference and estimate with a common label set
mir_eval.display.hierarchy([int0, int1],
[lab0, lab1],
levels=['Large', 'Small'])
plt.legend()
@image_comparison(baseline_images=['pitch_hz'], extensions=['png'])
@styled
def test_pitch_hz():
plt.figure()
ref_times, ref_freqs = load_labeled_events('data/melody/ref00.txt')
est_times, est_freqs = load_labeled_events('data/melody/est00.txt')
# Plot pitches on a Hz scale
mir_eval.display.pitch(ref_times, ref_freqs, unvoiced=True,
label='Reference')
mir_eval.display.pitch(est_times, est_freqs, unvoiced=True,
label='Estimate')
plt.legend()
@image_comparison(baseline_images=['pitch_midi'], extensions=['png'])
@styled
def test_pitch_midi():
plt.figure()
times, freqs = load_labeled_events('data/melody/ref00.txt')
# Plot pitches on a midi scale with note tickers
mir_eval.display.pitch(times, freqs, midi=True)
mir_eval.display.ticker_notes()
@image_comparison(baseline_images=['pitch_midi_hz'], extensions=['png'])
@styled
def test_pitch_midi_hz():
plt.figure()
times, freqs = load_labeled_events('data/melody/ref00.txt')
# Plot pitches on a midi scale with note tickers
mir_eval.display.pitch(times, freqs, midi=True)
mir_eval.display.ticker_pitch()
@image_comparison(baseline_images=['multipitch_hz_unvoiced'],
extensions=['png'])
@styled
def test_multipitch_hz_unvoiced():
plt.figure()
times, pitches = load_ragged_time_series('data/multipitch/est01.txt')
# Plot pitches on a midi scale with note tickers
mir_eval.display.multipitch(times, pitches, midi=False, unvoiced=True)
@image_comparison(baseline_images=['multipitch_hz_voiced'], extensions=['png'])
@styled
def test_multipitch_hz_voiced():
plt.figure()
times, pitches = load_ragged_time_series('data/multipitch/est01.txt')
mir_eval.display.multipitch(times, pitches, midi=False, unvoiced=False)
@image_comparison(baseline_images=['multipitch_midi'], extensions=['png'])
@styled
def test_multipitch_midi():
plt.figure()
ref_t, ref_p = load_ragged_time_series('data/multipitch/ref01.txt')
est_t, est_p = load_ragged_time_series('data/multipitch/est01.txt')
# Plot pitches on a midi scale with note tickers
mir_eval.display.multipitch(ref_t, ref_p, midi=True,
alpha=0.5, label='Reference')
mir_eval.display.multipitch(est_t, est_p, midi=True,
alpha=0.5, label='Estimate')
plt.legend()
@image_comparison(baseline_images=['piano_roll'], extensions=['png'])
@styled
def test_pianoroll():
plt.figure()
ref_t, ref_p = load_valued_intervals('data/transcription/ref04.txt')
est_t, est_p = load_valued_intervals('data/transcription/est04.txt')
mir_eval.display.piano_roll(ref_t, ref_p,
label='Reference', alpha=0.5)
mir_eval.display.piano_roll(est_t, est_p,
label='Estimate', alpha=0.5, facecolor='r')
plt.legend()
@image_comparison(baseline_images=['piano_roll_midi'], extensions=['png'])
@styled
def test_pianoroll_midi():
plt.figure()
ref_t, ref_p = load_valued_intervals('data/transcription/ref04.txt')
est_t, est_p = load_valued_intervals('data/transcription/est04.txt')
ref_midi = mir_eval.util.hz_to_midi(ref_p)
est_midi = mir_eval.util.hz_to_midi(est_p)
mir_eval.display.piano_roll(ref_t, midi=ref_midi,
label='Reference', alpha=0.5)
mir_eval.display.piano_roll(est_t, midi=est_midi,
label='Estimate', alpha=0.5, facecolor='r')
plt.legend()
@image_comparison(baseline_images=['ticker_midi_zoom'], extensions=['png'])
@styled
def test_ticker_midi_zoom():
plt.figure()
plt.plot(np.arange(3))
mir_eval.display.ticker_notes()
@image_comparison(baseline_images=['separation'], extensions=['png'])
@styled
def test_separation():
plt.figure()
x0, fs = load_wav('data/separation/ref05/0.wav')
x1, fs = load_wav('data/separation/ref05/1.wav')
x2, fs = load_wav('data/separation/ref05/2.wav')
mir_eval.display.separation([x0, x1, x2], fs=fs)
@image_comparison(baseline_images=['separation_label'], extensions=['png'])
@styled
def test_separation_label():
plt.figure()
x0, fs = load_wav('data/separation/ref05/0.wav')
x1, fs = load_wav('data/separation/ref05/1.wav')
x2, fs = load_wav('data/separation/ref05/2.wav')
mir_eval.display.separation([x0, x1, x2], fs=fs,
labels=['Alice', 'Bob', 'Carol'])
plt.legend()
@image_comparison(baseline_images=['events'], extensions=['png'])
@styled
def test_events():
plt.figure()
# Load some event data
beats_ref = mir_eval.io.load_events('data/beat/ref00.txt')[:30]
beats_est = mir_eval.io.load_events('data/beat/est00.txt')[:30]
# Plot both with labels
mir_eval.display.events(beats_ref, label='reference')
mir_eval.display.events(beats_est, label='estimate')
plt.legend()
@image_comparison(baseline_images=['labeled_events'], extensions=['png'])
@styled
def test_labeled_events():
plt.figure()
# Load some event data
beats_ref = mir_eval.io.load_events('data/beat/ref00.txt')[:10]
labels = list('abcdefghijklmnop')
# Plot both with labels
mir_eval.display.events(beats_ref, labels)
@raises(ValueError)
def test_pianoroll_nopitch_nomidi():
# Issue 214
mir_eval.display.piano_roll([[0, 1]])
|
|
"""
============================
``ctypes`` Utility Functions
============================
See Also
---------
load_library : Load a C library.
ndpointer : Array restype/argtype with verification.
as_ctypes : Create a ctypes array from an ndarray.
as_array : Create an ndarray from a ctypes array.
References
----------
.. [1] "SciPy Cookbook: ctypes", http://www.scipy.org/Cookbook/Ctypes
Examples
--------
Load the C library:
>>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP
Our result type, an ndarray that must be of type double, be 1-dimensional
and is C-contiguous in memory:
>>> array_1d_double = np.ctypeslib.ndpointer(
... dtype=np.double,
... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP
Our C-function typically takes an array and updates its values
in-place. For example::
void foo_func(double* x, int length)
{
int i;
for (i = 0; i < length; i++) {
x[i] = i*i;
}
}
We wrap it using:
>>> _lib.foo_func.restype = None #doctest: +SKIP
>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP
Then, we're ready to call ``foo_func``:
>>> out = np.empty(15, dtype=np.double)
>>> _lib.foo_func(out, len(out)) #doctest: +SKIP
"""
from __future__ import division, absolute_import, print_function
__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library',
'c_intp', 'as_ctypes', 'as_array']
import sys, os
from numpy import integer, ndarray, dtype as _dtype, deprecate, array
from numpy.core.multiarray import _flagdict, flagsobj
try:
import ctypes
except ImportError:
ctypes = None
if ctypes is None:
def _dummy(*args, **kwds):
"""
Dummy object that raises an ImportError if ctypes is not available.
Raises
------
ImportError
If ctypes is not available.
"""
raise ImportError("ctypes is not available.")
ctypes_load_library = _dummy
load_library = _dummy
as_ctypes = _dummy
as_array = _dummy
from numpy import intp as c_intp
_ndptr_base = object
else:
import numpy.core._internal as nic
c_intp = nic._getintp_ctype()
del nic
_ndptr_base = ctypes.c_void_p
# Adapted from Albert Strasheim
def load_library(libname, loader_path):
"""
It is possible to load a library using
>>> lib = ctypes.cdll[<full_path_name>]
But there are cross-platform considerations, such as library file extensions,
plus the fact Windows will just load the first library it finds with that name.
Numpy supplies the load_library function as a convenience.
Parameters
----------
libname : str
Name of the library, which can have 'lib' as a prefix,
but without an extension.
loader_path : str
Where the library can be found.
Returns
-------
ctypes.cdll[libpath] : library object
A ctypes library object
Raises
------
OSError
If there is no library with the expected extension, or the
library is defective and cannot be loaded.
"""
if ctypes.__version__ < '1.0.1':
import warnings
warnings.warn("All features of ctypes interface may not work " \
"with ctypes < 1.0.1")
ext = os.path.splitext(libname)[1]
if not ext:
# Try to load library with platform-specific name, otherwise
# default to libname.[so|pyd]. Sometimes, these files are built
# erroneously on non-linux platforms.
from numpy.distutils.misc_util import get_shared_lib_extension
so_ext = get_shared_lib_extension()
libname_ext = [libname + so_ext]
# mac, windows and linux >= py3.2 shared library and loadable
# module have different extensions so try both
so_ext2 = get_shared_lib_extension(is_python_ext=True)
if not so_ext2 == so_ext:
libname_ext.insert(0, libname + so_ext2)
try:
import sysconfig
so_ext3 = '.%s-%s.so' % (sysconfig.get_config_var('SOABI'),
sysconfig.get_config_var('MULTIARCH'))
libname_ext.insert(0, libname + so_ext3)
except (KeyError, ImportError):
pass
else:
libname_ext = [libname]
loader_path = os.path.abspath(loader_path)
if not os.path.isdir(loader_path):
libdir = os.path.dirname(loader_path)
else:
libdir = loader_path
for ln in libname_ext:
libpath = os.path.join(libdir, ln)
if os.path.exists(libpath):
try:
return ctypes.cdll[libpath]
except OSError:
## defective lib file
raise
## if no successful return in the libname_ext loop:
raise OSError("no file with expected extension")
ctypes_load_library = deprecate(load_library, 'ctypes_load_library',
'load_library')
def _num_fromflags(flaglist):
num = 0
for val in flaglist:
num += _flagdict[val]
return num
_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',
'OWNDATA', 'UPDATEIFCOPY']
def _flags_fromnum(num):
res = []
for key in _flagnames:
value = _flagdict[key]
if (num & value):
res.append(key)
return res
class _ndptr(_ndptr_base):
def _check_retval_(self):
"""This method is called when this class is used as the .restype
asttribute for a shared-library function. It constructs a numpy
array from a void pointer."""
return array(self)
@property
def __array_interface__(self):
return {'descr': self._dtype_.descr,
'__ref': self,
'strides': None,
'shape': self._shape_,
'version': 3,
'typestr': self._dtype_.descr[0][1],
'data': (self.value, False),
}
@classmethod
def from_param(cls, obj):
if not isinstance(obj, ndarray):
raise TypeError("argument must be an ndarray")
if cls._dtype_ is not None \
and obj.dtype != cls._dtype_:
raise TypeError("array must have data type %s" % cls._dtype_)
if cls._ndim_ is not None \
and obj.ndim != cls._ndim_:
raise TypeError("array must have %d dimension(s)" % cls._ndim_)
if cls._shape_ is not None \
and obj.shape != cls._shape_:
raise TypeError("array must have shape %s" % str(cls._shape_))
if cls._flags_ is not None \
and ((obj.flags.num & cls._flags_) != cls._flags_):
raise TypeError("array must have flags %s" %
_flags_fromnum(cls._flags_))
return obj.ctypes
# Factory for an array-checking class with from_param defined for
# use with ctypes argtypes mechanism
_pointer_type_cache = {}
def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
"""
Array-checking restype/argtypes.
An ndpointer instance is used to describe an ndarray in restypes
and argtypes specifications. This approach is more flexible than
using, for example, ``POINTER(c_double)``, since several restrictions
can be specified, which are verified upon calling the ctypes function.
These include data type, number of dimensions, shape and flags. If a
given array does not satisfy the specified restrictions,
a ``TypeError`` is raised.
Parameters
----------
dtype : data-type, optional
Array data-type.
ndim : int, optional
Number of array dimensions.
shape : tuple of ints, optional
Array shape.
flags : str or tuple of str
Array flags; may be one or more of:
- C_CONTIGUOUS / C / CONTIGUOUS
- F_CONTIGUOUS / F / FORTRAN
- OWNDATA / O
- WRITEABLE / W
- ALIGNED / A
- UPDATEIFCOPY / U
Returns
-------
klass : ndpointer type object
A type object, which is an ``_ndtpr`` instance containing
dtype, ndim, shape and flags information.
Raises
------
TypeError
If a given array does not satisfy the specified restrictions.
Examples
--------
>>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,
... ndim=1,
... flags='C_CONTIGUOUS')]
... #doctest: +SKIP
>>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
... #doctest: +SKIP
"""
if dtype is not None:
dtype = _dtype(dtype)
num = None
if flags is not None:
if isinstance(flags, str):
flags = flags.split(',')
elif isinstance(flags, (int, integer)):
num = flags
flags = _flags_fromnum(num)
elif isinstance(flags, flagsobj):
num = flags.num
flags = _flags_fromnum(num)
if num is None:
try:
flags = [x.strip().upper() for x in flags]
except:
raise TypeError("invalid flags specification")
num = _num_fromflags(flags)
try:
return _pointer_type_cache[(dtype, ndim, shape, num)]
except KeyError:
pass
if dtype is None:
name = 'any'
elif dtype.names:
name = str(id(dtype))
else:
name = dtype.str
if ndim is not None:
name += "_%dd" % ndim
if shape is not None:
try:
strshape = [str(x) for x in shape]
except TypeError:
strshape = [str(shape)]
shape = (shape,)
shape = tuple(shape)
name += "_"+"x".join(strshape)
if flags is not None:
name += "_"+"_".join(flags)
else:
flags = []
klass = type("ndpointer_%s"%name, (_ndptr,),
{"_dtype_": dtype,
"_shape_" : shape,
"_ndim_" : ndim,
"_flags_" : num})
_pointer_type_cache[dtype] = klass
return klass
if ctypes is not None:
ct = ctypes
################################################################
# simple types
# maps the numpy typecodes like '<f8' to simple ctypes types like
# c_double. Filled in by prep_simple.
_typecodes = {}
def prep_simple(simple_type, dtype):
"""Given a ctypes simple type, construct and attach an
__array_interface__ property to it if it does not yet have one.
"""
try: simple_type.__array_interface__
except AttributeError: pass
else: return
typestr = _dtype(dtype).str
_typecodes[typestr] = simple_type
def __array_interface__(self):
return {'descr': [('', typestr)],
'__ref': self,
'strides': None,
'shape': (),
'version': 3,
'typestr': typestr,
'data': (ct.addressof(self), False),
}
simple_type.__array_interface__ = property(__array_interface__)
simple_types = [
((ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong), "i"),
((ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong), "u"),
((ct.c_float, ct.c_double), "f"),
]
# Prep that numerical ctypes types:
for types, code in simple_types:
for tp in types:
prep_simple(tp, "%c%d" % (code, ct.sizeof(tp)))
################################################################
# array types
_ARRAY_TYPE = type(ct.c_int * 1)
def prep_array(array_type):
"""Given a ctypes array type, construct and attach an
__array_interface__ property to it if it does not yet have one.
"""
try: array_type.__array_interface__
except AttributeError: pass
else: return
shape = []
ob = array_type
while type(ob) is _ARRAY_TYPE:
shape.append(ob._length_)
ob = ob._type_
shape = tuple(shape)
ai = ob().__array_interface__
descr = ai['descr']
typestr = ai['typestr']
def __array_interface__(self):
return {'descr': descr,
'__ref': self,
'strides': None,
'shape': shape,
'version': 3,
'typestr': typestr,
'data': (ct.addressof(self), False),
}
array_type.__array_interface__ = property(__array_interface__)
def prep_pointer(pointer_obj, shape):
"""Given a ctypes pointer object, construct and
attach an __array_interface__ property to it if it does not
yet have one.
"""
try: pointer_obj.__array_interface__
except AttributeError: pass
else: return
contents = pointer_obj.contents
dtype = _dtype(type(contents))
inter = {'version': 3,
'typestr': dtype.str,
'data': (ct.addressof(contents), False),
'shape': shape}
pointer_obj.__array_interface__ = inter
################################################################
# public functions
def as_array(obj, shape=None):
"""Create a numpy array from a ctypes array or a ctypes POINTER.
The numpy array shares the memory with the ctypes object.
The size parameter must be given if converting from a ctypes POINTER.
The size parameter is ignored if converting from a ctypes array
"""
tp = type(obj)
try: tp.__array_interface__
except AttributeError:
if hasattr(obj, 'contents'):
prep_pointer(obj, shape)
else:
prep_array(tp)
return array(obj, copy=False)
def as_ctypes(obj):
"""Create and return a ctypes object from a numpy array. Actually
anything that exposes the __array_interface__ is accepted."""
ai = obj.__array_interface__
if ai["strides"]:
raise TypeError("strided arrays not supported")
if ai["version"] != 3:
raise TypeError("only __array_interface__ version 3 supported")
addr, readonly = ai["data"]
if readonly:
raise TypeError("readonly arrays unsupported")
tp = _typecodes[ai["typestr"]]
for dim in ai["shape"][::-1]:
tp = tp * dim
result = tp.from_address(addr)
result.__keep = ai
return result
|
|
# -*- coding: utf-8 -*-
import cherrypy
import mako
import mimetypes
import os
import girder.events
from girder import constants, logprint, __version__, logStdoutStderr, _setupCache
from girder.models.setting import Setting
from girder import plugin
from girder.settings import SettingKey
from girder.utility import config
from girder.constants import ServerMode
from . import webroot
with open(os.path.join(os.path.dirname(__file__), 'error.mako')) as f:
_errorTemplate = f.read()
def _errorDefault(status, message, *args, **kwargs):
"""
This is used to render error pages outside of the normal Girder app, such as
404's. This overrides the default cherrypy error pages.
"""
return mako.template.Template(_errorTemplate).render(status=status, message=message)
def getApiRoot():
return config.getConfig()['server']['api_root']
def getStaticPublicPath():
return config.getConfig()['server']['static_public_path']
def configureServer(mode=None, plugins=None, curConfig=None):
"""
Function to setup the cherrypy server. It configures it, but does
not actually start it.
:param mode: The server mode to start in.
:type mode: string
:param plugins: If you wish to start the server with a custom set of
plugins, pass this as a list of plugins to load. Otherwise,
all installed plugins will be loaded.
:param curConfig: The configuration dictionary to update.
"""
if curConfig is None:
curConfig = config.getConfig()
appconf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'request.show_tracebacks': mode == ServerMode.TESTING,
'request.methods_with_bodies': ('POST', 'PUT', 'PATCH'),
'response.headers.server': 'Girder %s' % __version__,
'error_page.default': _errorDefault
}
}
# Add MIME types for serving Fontello files from staticdir;
# these may be missing or incorrect in the OS
mimetypes.add_type('application/vnd.ms-fontobject', '.eot')
mimetypes.add_type('application/x-font-ttf', '.ttf')
mimetypes.add_type('application/font-woff', '.woff')
curConfig.update(appconf)
if mode:
curConfig['server']['mode'] = mode
logprint.info('Running in mode: ' + curConfig['server']['mode'])
cherrypy.config['engine.autoreload.on'] = mode == ServerMode.DEVELOPMENT
_setupCache()
# Don't import this until after the configs have been read; some module
# initialization code requires the configuration to be set up.
from girder.api import api_main
root = webroot.Webroot()
api_main.addApiToNode(root)
girder.events.setupDaemon()
cherrypy.engine.subscribe('start', girder.events.daemon.start)
cherrypy.engine.subscribe('stop', girder.events.daemon.stop)
routeTable = loadRouteTable()
info = {
'config': appconf,
'serverRoot': root,
'serverRootPath': routeTable[constants.GIRDER_ROUTE_ID],
'apiRoot': root.api.v1,
}
plugin._loadPlugins(info, plugins)
root, appconf = info['serverRoot'], info['config']
return root, appconf
def loadRouteTable(reconcileRoutes=False):
"""
Retrieves the route table from Girder and reconciles the state of it with the current
application state.
Reconciliation ensures that every enabled plugin has a route by assigning default routes for
plugins that have none, such as newly-enabled plugins.
:returns: The non empty routes (as a dict of name -> route) to be mounted by CherryPy
during Girder's setup phase.
"""
pluginWebroots = plugin.getPluginWebroots()
routeTable = Setting().get(SettingKey.ROUTE_TABLE)
def reconcileRouteTable(routeTable):
hasChanged = False
# Migration for the removed static root setting
if 'core_static_root' in routeTable:
del routeTable['core_static_root']
hasChanged = True
for name in pluginWebroots.keys():
if name not in routeTable:
routeTable[name] = os.path.join('/', name)
hasChanged = True
if hasChanged:
Setting().set(SettingKey.ROUTE_TABLE, routeTable)
return routeTable
if reconcileRoutes:
routeTable = reconcileRouteTable(routeTable)
return {name: route for (name, route) in routeTable.items() if route}
def setup(mode=None, plugins=None, curConfig=None):
"""
Configure and mount the Girder server and plugins under the
appropriate routes.
See ROUTE_TABLE setting.
:param mode: The server mode to start in.
:type mode: string
:param plugins: List of plugins to enable.
:param curConfig: The config object to update.
"""
logStdoutStderr()
pluginWebroots = plugin.getPluginWebroots()
girderWebroot, appconf = configureServer(mode, plugins, curConfig)
routeTable = loadRouteTable(reconcileRoutes=True)
# Mount Girder
application = cherrypy.tree.mount(
girderWebroot, str(routeTable[constants.GIRDER_ROUTE_ID]), appconf)
# Mount static files
cherrypy.tree.mount(None, '/static',
{'/':
{'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(constants.STATIC_ROOT_DIR),
'request.show_tracebacks': appconf['/']['request.show_tracebacks'],
'response.headers.server': 'Girder %s' % __version__,
'error_page.default': _errorDefault}})
# Mount API (special case)
# The API is always mounted at /api AND at api relative to the Girder root
cherrypy.tree.mount(girderWebroot.api, '/api', appconf)
# Mount everything else in the routeTable
for name, route in routeTable.items():
if name != constants.GIRDER_ROUTE_ID and name in pluginWebroots:
cherrypy.tree.mount(pluginWebroots[name], route, appconf)
return application
class _StaticFileRoute:
exposed = True
def __init__(self, path, contentType=None):
self.path = os.path.abspath(path)
self.contentType = contentType
def GET(self):
return cherrypy.lib.static.serve_file(self.path, content_type=self.contentType)
def staticFile(path, contentType=None):
"""
Helper function to serve a static file. This should be bound as the route
object, i.e. info['serverRoot'].route_name = staticFile('...')
:param path: The path of the static file to serve from this route.
:type path: str
:param contentType: The MIME type of the static file. If set to None, the
content type wll be guessed by the file extension of
the 'path' argument.
"""
return _StaticFileRoute(path, contentType)
|
|
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <[email protected]>
from datetime import datetime
import urllib
from twisted.internet import reactor, defer
from twisted.python import failure, util
from twisted.python.filepath import FilePath
from coherence.upnp.core import DIDLLite
import dbus
import dbus.service
import coherence.extern.louie as louie
from coherence.backend import BackendItem, BackendStore
ROOT_CONTAINER_ID = 0
RECORDINGS_CONTAINER_ID = 100
BUS_NAME = 'org.gnome.DVB'
OBJECT_PATH = '/org/gnome/DVB/RecordingsStore'
class Container(BackendItem):
logCategory = 'dvbd_store'
def __init__(self, id, parent_id, name, store=None, children_callback=None, container_class=DIDLLite.Container):
self.id = id
self.parent_id = parent_id
self.name = name
self.mimetype = 'directory'
self.item = container_class(id, parent_id,self.name)
self.item.childCount = 0
self.update_id = 0
if children_callback != None:
self.children = children_callback
else:
self.children = util.OrderedDict()
if store!=None:
self.get_url = lambda: store.urlbase + str(self.id)
def add_child(self, child):
id = child.id
if isinstance(child.id, basestring):
_,id = child.id.split('.')
self.children[id] = child
if self.item.childCount != None:
self.item.childCount += 1
def get_children(self,start=0,end=0):
self.info("container.get_children %r %r", start, end)
if callable(self.children):
return self.children(start,end-start)
else:
children = self.children.values()
if end == 0:
return children[start:]
else:
return children[start:end]
def remove_children(self):
if not callable(self.children):
self.children = util.OrderedDict()
self.item.childCount = 0
def get_child_count(self):
if self.item.childCount != None:
return self.item.childCount
if callable(self.children):
return len(self.children())
else:
return len(self.children)
def get_item(self):
return self.item
def get_name(self):
return self.name
def get_id(self):
return self.id
class Recording(BackendItem):
logCategory = 'dvbd_store'
def __init__(self,store,
id,parent_id,
file,title,
date,duration,
mimetype):
self.store = store
self.id = 'recording.%s' % id
self.parent_id = parent_id
self.real_id = id
self.location = FilePath(unicode(file))
self.title = unicode(title)
self.mimetype = str(mimetype)
self.date = datetime.fromtimestamp(int(date))
self.duration = int(duration)
self.size = self.location.getsize()
self.bitrate = 0
self.url = self.store.urlbase + str(self.id)
def get_children(self, start=0, end=0):
return []
def get_child_count(self):
return 0
def get_item(self, parent_id=None):
self.debug("Recording get_item %r @ %r" %(self.id,self.parent_id))
# create item
item = DIDLLite.VideoBroadcast(self.id,self.parent_id)
item.date = self.date
item.title = self.title
# add http resource
res = DIDLLite.Resource(self.url, 'http-get:*:%s:*' % self.mimetype)
if self.size > 0:
res.size = self.size
if self.duration > 0:
res.duration = str(self.duration)
if self.bitrate > 0:
res.bitrate = str(bitrate)
item.res.append(res)
# add internal resource
res = DIDLLite.Resource('file://'+ urllib.quote(self.get_path()), 'internal:%s:%s:*' % (self.store.server.coherence.hostname,self.mimetype))
if self.size > 0:
res.size = self.size
if self.duration > 0:
res.duration = str(self.duration)
if self.bitrate > 0:
res.bitrate = str(bitrate)
item.res.append(res)
return item
def get_id(self):
return self.id
def get_name(self):
return self.title
def get_url(self):
return self.url
def get_path(self):
return self.location.path
class DVBDStore(BackendStore):
""" this is a backend to the DVB Daemon
http://www.k-d-w.org/node/42
"""
implements = ['MediaServer']
logCategory = 'dvbd_store'
def __init__(self, server, **kwargs):
if server.coherence.config.get('use_dbus','no') != 'yes':
raise Exception, 'this backend needs use_dbus enabled in the configuration'
BackendStore.__init__(self,server,**kwargs)
self.config = kwargs
self.name = kwargs.get('name','TV')
self.update_id = 0
if kwargs.get('enable_destroy','no') == 'yes':
self.upnp_DestroyObject = self.hidden_upnp_DestroyObject
self.bus = dbus.SessionBus()
dvb_daemon = self.bus.get_object(BUS_NAME,OBJECT_PATH)
self.store_interface = dbus.Interface(dvb_daemon, 'org.gnome.DVB.RecordingsStore')
dvb_daemon.connect_to_signal('Changed', self.recording_changed, dbus_interface='org.gnome.DVB.RecordingsStore')
self.containers = {}
self.containers[ROOT_CONTAINER_ID] = \
Container(ROOT_CONTAINER_ID,-1,self.name,store=self)
self.containers[RECORDINGS_CONTAINER_ID] = \
Container(RECORDINGS_CONTAINER_ID,ROOT_CONTAINER_ID,'Recordings',store=self)
self.containers[ROOT_CONTAINER_ID].add_child(self.containers[RECORDINGS_CONTAINER_ID])
def query_finished(r):
louie.send('Coherence.UPnP.Backend.init_completed', None, backend=self)
def query_failed(r):
error = ''
louie.send('Coherence.UPnP.Backend.init_failed', None, backend=self, msg=error)
d = self.get_recordings()
d.addCallback(query_finished)
d.addErrback(lambda x: louie.send('Coherence.UPnP.Backend.init_failed', None, backend=self, msg='Connection to DVB Daemon failed!'))
def __repr__(self):
return "DVBDStore"
def get_by_id(self,id):
self.info("looking for id %r", id)
if isinstance(id, basestring):
id = id.split('@',1)
id = id[0]
item = None
try:
id = int(id)
item = self.containers[id]
except (ValueError,KeyError):
try:
type,id = id.split('.')
if type == 'recording':
return self.containers[RECORDINGS_CONTAINER_ID].children[id]
except (ValueError,KeyError):
return None
return item
def recording_changed(self, id, mode):
self.containers[RECORDINGS_CONTAINER_ID].remove_children()
def handle_result(r):
print "recording changed, handle_result"
print self.containers[RECORDINGS_CONTAINER_ID].update_id
self.containers[RECORDINGS_CONTAINER_ID].update_id += 1
print self.containers[RECORDINGS_CONTAINER_ID].update_id
if( self.server and
hasattr(self.server,'content_directory_server')):
if hasattr(self, 'update_id'):
self.update_id += 1
self.server.content_directory_server.set_variable(0, 'SystemUpdateID', self.update_id)
value = (RECORDINGS_CONTAINER_ID,self.containers[RECORDINGS_CONTAINER_ID].update_id)
print "ContainerUpdateIDs new value", value
self.server.content_directory_server.set_variable(0, 'ContainerUpdateIDs', value)
def handle_error(error):
print error
d = self.get_recordings()
d.addCallback(handle_result)
d.addErrback(handle_error)
def get_recording_details(self,id):
def get_title(id):
d = defer.Deferred()
self.store_interface.GetName(id,
reply_handler=lambda x: d.callback(x),
error_handler=lambda x: d.errback(x))
return d
def get_path(id):
d = defer.Deferred()
self.store_interface.GetLocation(id,
reply_handler=lambda x: d.callback(x),
error_handler=lambda x: d.errback(x))
return d
def get_date(id):
d = defer.Deferred()
self.store_interface.GetStartTimestamp(id,
reply_handler=lambda x: d.callback(x),
error_handler=lambda x: d.errback(x))
return d
def get_duration(id):
d = defer.Deferred()
self.store_interface.GetLength(id,
reply_handler=lambda x: d.callback(x),
error_handler=lambda x: d.errback(x))
return d
def process_details(r, id):
name = r[0][1]
if len(name) == 0:
name = 'Recording ' + str(id)
return {'id':id,'name':name,'path':r[1][1],'date':r[2][1],'duration':r[3][1]}
def handle_error(error):
return error
dl = defer.DeferredList((get_title(id),get_path(id),get_date(id),get_duration(id)))
dl.addCallback(process_details,id)
dl.addErrback(handle_error)
return dl
def get_recordings(self):
def handle_error(error):
return error
def process_query_result(ids):
#print "process_query_result", ids
if len(ids) == 0:
return
l = []
for id in ids:
l.append(self.get_recording_details(id))
dl = defer.DeferredList(l)
return dl
def process_details(results):
#print 'process_details', results
for result,recording in results:
#print result, recording['name']
if result == True:
print "add", recording['id'], recording['name'], recording['path'], recording['date'], recording['duration']
video_item = Recording(self,
recording['id'],
RECORDINGS_CONTAINER_ID,
recording['path'],
recording['name'],
recording['date'],
recording['duration'],
'video/mpegts')
self.containers[RECORDINGS_CONTAINER_ID].add_child(video_item)
d = defer.Deferred()
d.addCallback(process_query_result)
d.addCallback(process_details)
d.addErrback(handle_error)
d.addErrback(handle_error)
self.store_interface.GetRecordings(reply_handler=lambda x: d.callback(x),
error_handler=lambda x: d.errback(x))
return d
def upnp_init(self):
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
['http-get:*:video/mpegts:*',
'internal:%s:video/mpegts:*' % self.server.coherence.hostname,])
def hidden_upnp_DestroyObject(self, *args, **kwargs):
ObjectID = kwargs['ObjectID']
item = self.get_by_id(ObjectID)
if item == None:
return failure.Failure(errorCode(701))
def handle_success(deleted):
print 'deleted', deleted, kwargs['ObjectID']
if deleted == False:
return failure.Failure(errorCode(715))
return {}
def handle_error(error):
return failure.Failure(errorCode(701))
d = defer.Deferred()
self.store_interface.Delete(int(item.real_id),
reply_handler=lambda x: d.callback(x),
error_handler=lambda x: d.errback(x))
d.addCallback(handle_success)
d.addErrback(handle_error)
return d
|
|
# Built-in
# import warnings
# Common
import numpy as np
# import scipy.interpolate as scpinterp
# import scipy.integrate as scpintg
# from inspect import signature as insp
# local
from . import _GG
_APPROX = True
_ANISO = False
_BLOCK = True
_LTYPES = [int, float, np.int_, np.float_]
###############################################################################
###############################################################################
# Check inputs
###############################################################################
def _check_calc_solidangle_particle(
traj=None,
pts=None,
rad=None,
config=None,
approx=None,
aniso=None,
block=None,
):
# Check booleans
if approx is None:
approx = _APPROX
if aniso is None:
aniso = _ANISO
lbool = [('approx', approx), ('aniso', aniso)]
for kk, vv in lbool:
if not isinstance(vv, bool):
msg = ("Arg {} must be a bool\n".format(kk)
+ "\t- provided: {}".format(vv))
raise Exception(msg)
# Check config
c0 = [config is None, config.__class__.__name__ == "Config"]
if not any(c0):
msg = ("Arg config must be either Noen or a tf.geom.Config instance!\n"
+ "\t- provided: {}".format(config))
raise Exception(msg)
# Double-check block vs config
if block is None:
if config is None:
block = False
else:
block = _BLOCK
if not isinstance(block, bool):
msg = ("Arg {} must be a bool\n".format('block')
+ "\t- provided: {}".format(block))
raise Exception(msg)
if config is None and block is True:
msg = ("Arg block cannot be True of config is not provided!")
raise Exception(msg)
# arrays
try:
traj = np.ascontiguousarray(traj, dtype=float)
rad = np.r_[rad].astype(float).ravel()
# Check pts, traj and r are array of good shape
c0 = traj.ndim in [1, 2] and 3 in traj.shape
if pts is not False:
pts = np.ascontiguousarray(pts, dtype=float)
c0 = c0 and pts.ndim in [1, 2] and 3 in pts.shape
assert c0
if traj.ndim == 1:
traj = traj.reshape((3, 1))
if traj.shape[0] != 3:
traj = traj.T
traj = np.ascontiguousarray(traj)
if pts is not False:
if pts.ndim == 1:
pts = pts.reshape((3, 1))
if pts.shape[0] != 3:
pts = pts.T
pts = np.ascontiguousarray(pts)
except Exception:
msg = (
"Args traj and pts must be convertible to np.ndarrays of shape"
+ "\n\t- traj: (N,), (3, N) or (N, 3)"
+ "\n\t- pts: (M,), (3, M) or (M, 3)"
+ "\n\n You provided:\n"
+ "\n\t- traj: {}".format(traj)
+ "\n\t- pts: {}".format(pts)
+ "\n\t- rad: {}".format(rad)
)
raise Exception(msg)
# check rad vs traj
ntraj = traj.shape[1]
nrad = rad.size
nmax = max(nrad, ntraj)
if not (nrad in [1, nmax] and ntraj in [1, nmax]):
msg = ("rad must be an array with shape (1,) or (N,)\n"
+ " provided: {}".format(rad))
raise Exception(msg)
if nrad < nmax:
rad = np.full((nmax,), rad[0])
if ntraj < nmax:
traj = np.repeat(traj, nmax, axis=1)
return traj, pts, rad, config, approx, aniso, block
###############################################################################
###############################################################################
# Solid Angle particle
###############################################################################
def calc_solidangle_particle(
pts=None,
part_traj=None,
part_radius=None,
config=None,
approx=None,
aniso=None,
block=None,
):
""" Compute the solid angle subtended by a particle along a trajectory
The particle has radius r, and trajectory (array of points) traj
It is observed from pts (array of points)
traj and pts are (3, N) and (3, M) arrays of cartesian coordinates
approx = True => use approximation
aniso = True => return also unit vector of emission
block = True consider LOS collisions (with Ves, Struct...)
if block:
config used for LOS collisions
Parameters
----------
pts: np.ndarray
Array of (3, M) pts coordinates (X, Y, Z) representing the points from
which the particle is observed
part_traj: np.ndarray
Array of (3, N) pts coordinates (X, Y, Z) representing the particle
positions
part_radius: float / np.ndarray
Unique of multiple values for the radius of the spherical particle
if multiple, rad is a np.ndarray of shape (N,)
config: None / tf.geom.Config
if block = True, solid angles are non-zero only if the field of view is
not blocked bya structural element in teh chamber
approx: None / bool
Flag indicating whether to compute the solid angle using an 1st-order
series development (in whichcase the solid angle becomes proportional
to the radius of the particle, see Notes_Upgrades/)
aniso: None / bool
Flag indicating whether to consider anisotropic emissivity, meaning the
routine must also compute and return the unit vector directing the flux
from each pts to each position on the trajectory of the particle
block: None / bool
Flag indicating whether to check for vignetting by structural elements
provided by config
Return:
-------
sang: np.ndarray
(N, M) Array of floats, solid angles
"""
################
# Prepare inputs
(
part_traj, pts, part_radius, config,
approx, aniso, block
) = _check_calc_solidangle_particle(
traj=part_traj,
pts=pts,
rad=part_radius,
config=config,
approx=approx,
aniso=aniso,
block=block,
)
################
# Main computation
# traj2pts vector, with length (3d array (3, N, M))
vect = - pts[:, :, None] + part_traj[:, None, :]
len_v = np.ascontiguousarray(np.sqrt(np.sum(vect**2, axis=0)))
# If aniso or block, normalize
if aniso or block:
vect = vect / len_v[None, :, :]
# Solid angle
r_d = part_radius[None, :] / len_v
where_zero = len_v <= part_radius[None, :]
r_d[where_zero] = 0. # temporary value
if approx:
sang = np.pi * (r_d**2 + r_d**4 / 4. + r_d**6 / 8. + r_d**8 * 5 / 64)
else:
sang = 2.*np.pi * (1 - np.sqrt(1. - r_d ** 2))
# when particle in mesh point, distance len_v = 0 thus sang neglected
sang[where_zero] = 0.
# block
if block:
kwdargs = config.get_kwdargs_LOS_isVis()
indvis = _GG.LOS_areVis_PtsFromPts_VesStruct(
pts, part_traj, dist=len_v, **kwdargs
)
iout = indvis == 0
sang[iout] = 0.
vect[:, iout] = np.nan
################
# Return
if aniso:
return sang, vect
return sang
def calc_solidangle_particle_integ(
part_traj=None,
part_radius=None,
config=None,
approx=True,
block=True,
resolution=None,
DR=None,
DZ=None,
DPhi=None,
):
# step0: if block : generate kwdargs from config
# step 1: sample cross-section
# step 2: loop on R of pts of cross-section (parallelize ?)
# => fix nb. of phi for the rest of the loop
# loop of Z
# step 3: loop phi
# Check visibility (if block = True) for each phi (LOS collision)
# If visible => compute solid angle
# integrate (sum * res) on each phi the solid angle
# Return sang as (N,nR,nZ) array
# ----------------
# check resolution
if resolution is None:
resolution = 0.1
if type(resolution) in _LTYPES:
resolution = [resolution, resolution, resolution]
c0 = (
isinstance(resolution, list)
and all([type(ss) in _LTYPES for ss in resolution])
)
if not c0:
msg = (
"Arg resolution must be a list of 3 floats [r, z, rphi]\n"
"Each representing the spatial sampling step in a direction\n"
"If a single float is provided, the same is used for all"
)
raise Exception(msg)
resolution = [float(rr) for rr in resolution]
# ------------------
# Check DR, DZ, DPhi
dD = {'DR': DR, 'DZ': DZ, 'DPhi': DPhi}
dfail = {}
for k0, v0 in dD.items():
c0 = (
v0 is None
or (
isinstance(v0, list)
and len(v0) == 2
and all([v1 is None or type(v1) in _LTYPES for v1 in v0])
)
)
if not c0:
dfail[k0] = str(v0)
if len(dfail) > 0:
lstr = [f'\t- {k0}: {v0}' for k0, v0 in dfail.items()]
msg = (
"The following arguments are invalid:\n"
"Expected None or a list of len(2) of None or floats!\n"
+ "\n".join(lstr)
)
raise Exception(msg)
# ------------------
# check other inputs
(
part_traj, _, part_radius, config,
approx, _, block
) = _check_calc_solidangle_particle(
traj=part_traj,
pts=False,
rad=part_radius,
config=config,
approx=approx,
aniso=False,
block=block,
)
# ------------------
# Define the volume to be sampled: smallest vessel
# Get kwdargs for LOS blocking
kwdargs = config.get_kwdargs_LOS_isVis()
# derive limits for sampling
limits_r = np.r_[
np.min(kwdargs['ves_poly'][0, :]),
np.max(kwdargs['ves_poly'][0, :]),
]
limits_z = np.r_[
np.min(kwdargs['ves_poly'][1, :]),
np.max(kwdargs['ves_poly'][1, :]),
]
return _GG.compute_solid_angle_map(
part_traj, part_radius,
resolution[0], resolution[1], resolution[2],
limits_r, limits_z,
DR=DR, DZ=DZ,
DPhi=DPhi,
block=block,
approx=approx,
limit_vpoly=kwdargs['ves_poly'],
**kwdargs,
)
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'StaticPlaceholder.site'
db.alter_column(u'cms_staticplaceholder', 'site_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'], null=True))
def backwards(self, orm):
# Changing field 'StaticPlaceholder.site'
db.alter_column(u'cms_staticplaceholder', 'site_id', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['sites.Site']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'unique_together': "(('code', 'site'),)", 'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import functools
import inspect
from abc import abstractmethod
from collections import defaultdict
import six
from pants.base.build_file_target_factory import BuildFileTargetFactory
from pants.build_graph.target import Target
from pants.util.memo import memoized_property
class TargetMacro(object):
"""A specialized context aware object factory responsible for instantiating a set of target types.
The macro acts to expand arguments to its alias in a BUILD file into one or more target
addressable instances. This is primarily useful for hiding true target type constructors from
BUILD file authors and providing an extra layer of control over core target parameters like `name`
and `dependencies`.
"""
class Factory(BuildFileTargetFactory):
"""Creates new target macros specialized for a particular BUILD file parse context."""
@classmethod
def wrap(cls, context_aware_object_factory, *target_types):
"""Wraps an existing context aware object factory into a target macro factory.
:param context_aware_object_factory: The existing context aware object factory.
:param *target_types: One or more target types the context aware object factory creates.
:returns: A new target macro factory.
:rtype: :class:`TargetMacro.Factory`
"""
if not target_types:
raise ValueError('The given `context_aware_object_factory` {} must expand at least 1 '
'produced type; none were registered'.format(context_aware_object_factory))
class Factory(cls):
@property
def target_types(self):
return target_types
def macro(self, parse_context):
class Macro(TargetMacro):
def expand(self, *args, **kwargs):
context_aware_object_factory(parse_context, *args, **kwargs)
return Macro()
return Factory()
@abstractmethod
def macro(self, parse_context):
"""Returns a new target macro that can create targets in the given parse context.
:param parse_context: The parse context the target macro will expand targets in.
:type parse_context: :class:`pants.base.parse_context.ParseContext`
:rtype: :class:`TargetMacro`
"""
def target_macro(self, parse_context):
"""Returns a new target macro that can create targets in the given parse context.
The target macro will also act as a build file target factory and report the target types it
creates.
:param parse_context: The parse context the target macro will expand targets in.
:type parse_context: :class:`pants.base.parse_context.ParseContext`
:rtype: :class:`BuildFileTargetFactory` & :class:`TargetMacro`
"""
macro = self.macro(parse_context)
class BuildFileTargetFactoryMacro(BuildFileTargetFactory, TargetMacro):
@property
def target_types(_):
return self.target_types
expand = macro.expand
return BuildFileTargetFactoryMacro()
def __call__(self, *args, **kwargs):
self.expand(*args, **kwargs)
@abstractmethod
def expand(self, *args, **kwargs):
"""Expands the given BUILD file arguments in to one or more target addressable instances."""
class BuildFileAliases(object):
"""A structure containing sets of symbols to be exposed in BUILD files.
There are three types of symbols that can be directly exposed:
- targets: These are Target subclasses or TargetMacro.Factory instances.
- objects: These are any python object, from constants to types.
- context_aware_object_factories: These are object factories that are passed a ParseContext and
produce one or more objects that use data from the context to enable some feature or utility;
you might call them a BUILD file "macro" since they expand parameters to some final, "real"
BUILD file object. Common uses include creating objects that must be aware of the current
BUILD file path or functions that need to be able to create targets or objects from within the
BUILD file parse.
"""
@classmethod
def curry_context(cls, wrappee):
"""Curry a function with a build file context.
Given a function foo(ctx, bar) that you want to expose in BUILD files
as foo(bar), use::
context_aware_object_factories={
'foo': BuildFileAliases.curry_context(foo),
}
"""
# You might wonder: why not just use lambda and functools.partial?
# That loses the __doc__, thus messing up the BUILD dictionary.
wrapper = lambda ctx: functools.partial(wrappee, ctx)
wrapper.__doc__ = wrappee.__doc__
wrapper.__name__ = str(".".join(["curry_context",
wrappee.__module__,
wrappee.__name__]))
return wrapper
@staticmethod
def _is_target_type(obj):
return inspect.isclass(obj) and issubclass(obj, Target)
@staticmethod
def _is_target_macro_factory(obj):
return isinstance(obj, TargetMacro.Factory)
@classmethod
def _validate_alias(cls, category, alias, obj):
if not isinstance(alias, six.string_types):
raise TypeError('Aliases must be strings, given {category} entry {alias!r} of type {typ} as '
'the alias of {obj}'
.format(category=category, alias=alias, typ=type(alias).__name__, obj=obj))
@classmethod
def _validate_not_targets(cls, category, alias, obj):
if cls._is_target_type(obj):
raise TypeError('The {category} entry {alias!r} is a Target subclasss - these should be '
'registered via the `targets` parameter'
.format(category=category, alias=alias))
if cls._is_target_macro_factory(obj):
raise TypeError('The {category} entry {alias!r} is a TargetMacro.Factory instance - these '
'should be registered via the `targets` parameter'
.format(category=category, alias=alias))
@classmethod
def _validate_targets(cls, targets):
if not targets:
return {}, {}
target_types = {}
target_macro_factories = {}
for alias, obj in targets.items():
cls._validate_alias('targets', alias, obj)
if cls._is_target_type(obj):
target_types[alias] = obj
elif cls._is_target_macro_factory(obj):
target_macro_factories[alias] = obj
else:
raise TypeError('Only Target types and TargetMacro.Factory instances can be registered '
'via the `targets` parameter, given item {alias!r} with value {value} of '
'type {typ}'.format(alias=alias, value=obj, typ=type(obj).__name__))
return target_types, target_macro_factories
@classmethod
def _validate_objects(cls, objects):
if not objects:
return {}
for alias, obj in objects.items():
cls._validate_alias('objects', alias, obj)
cls._validate_not_targets('objects', alias, obj)
return objects.copy()
@classmethod
def _validate_context_aware_object_factories(cls, context_aware_object_factories):
if not context_aware_object_factories:
return {}
for alias, obj in context_aware_object_factories.items():
cls._validate_alias('context_aware_object_factories', alias, obj)
cls._validate_not_targets('context_aware_object_factories', alias, obj)
if not callable(obj):
raise TypeError('The given context aware object factory {alias!r} must be a callable.'
.format(alias=alias))
return context_aware_object_factories.copy()
def __init__(self, targets=None, objects=None, context_aware_object_factories=None):
"""
:param dict targets: A mapping from string aliases to Target subclasses or TargetMacro.Factory
instances
:param dict objects: A mapping from string aliases to arbitrary objects.
:param dict context_aware_object_factories: A mapping from string aliases to context aware
object factory callables.
"""
self._target_types, self._target_macro_factories = self._validate_targets(targets)
self._objects = self._validate_objects(objects)
self._context_aware_object_factories = self._validate_context_aware_object_factories(
context_aware_object_factories)
@property
def target_types(self):
"""Returns a mapping from string aliases to Target subclasses.
:rtype: dict
"""
return self._target_types
@property
def target_macro_factories(self):
"""Returns a mapping from string aliases to TargetMacro.Factory instances.
:rtype: dict
"""
return self._target_macro_factories
@property
def objects(self):
"""Returns a mapping from string aliases to arbitrary objects.
:rtype: dict
"""
return self._objects
@property
def context_aware_object_factories(self):
"""Returns a mapping from string aliases to context aware object factory callables.
:rtype: dict
"""
return self._context_aware_object_factories
@memoized_property
def target_types_by_alias(self):
"""Returns a mapping from target alias to the target types produced for that alias.
Normally there is 1 target type per alias, but macros can expand a single alias to several
target types.
:rtype: dict
"""
target_types_by_alias = defaultdict(set)
for alias, target_type in self.target_types.items():
target_types_by_alias[alias].add(target_type)
for alias, target_macro_factory in self.target_macro_factories.items():
target_types_by_alias[alias].update(target_macro_factory.target_types)
return dict(target_types_by_alias)
def merge(self, other):
"""Merges a set of build file aliases and returns a new set of aliases containing both.
Any duplicate aliases from `other` will trump.
:param other: The BuildFileAliases to merge in.
:type other: :class:`BuildFileAliases`
:returns: A new BuildFileAliases containing `other`'s aliases merged into ours.
:rtype: :class:`BuildFileAliases`
"""
if not isinstance(other, BuildFileAliases):
raise TypeError('Can only merge other BuildFileAliases, given {0}'.format(other))
def merge(*items):
merged = {}
for item in items:
merged.update(item)
return merged
targets = merge(self.target_types, self.target_macro_factories,
other.target_types, other.target_macro_factories)
objects = merge(self.objects, other.objects)
context_aware_object_factories=merge(self.context_aware_object_factories,
other.context_aware_object_factories)
return BuildFileAliases(targets=targets,
objects=objects,
context_aware_object_factories=context_aware_object_factories)
def _tuple(self):
tuplize = lambda d: tuple(sorted(d.items()))
return (tuplize(self._target_types),
tuplize(self._target_macro_factories),
tuplize(self._objects),
tuplize(self._context_aware_object_factories))
def __eq__(self, other):
return isinstance(other, BuildFileAliases) and self._tuple() == other._tuple()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._tuple())
|
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import logging
from typing import TYPE_CHECKING, cast
import six
from azure.core.exceptions import HttpResponseError
from azure.core.tracing.decorator import distributed_trace
from . import DecryptResult, EncryptResult, SignResult, VerifyResult, UnwrapResult, WrapResult
from ._key_validity import raise_if_time_invalid
from ._providers import get_local_cryptography_provider, NoLocalCryptography
from .. import KeyOperation
from .._models import JsonWebKey, KeyVaultKey
from .._shared import KeyVaultClientBase, parse_key_vault_id
if TYPE_CHECKING:
# pylint:disable=unused-import,ungrouped-imports
from datetime import datetime
from typing import Any, Optional, Union
from azure.core.credentials import TokenCredential
from . import EncryptionAlgorithm, KeyWrapAlgorithm, SignatureAlgorithm
from .._shared import KeyVaultResourceId
_LOGGER = logging.getLogger(__name__)
def _validate_arguments(operation, algorithm, **kwargs):
# type: (KeyOperation, EncryptionAlgorithm, **Any) -> None
"""Validates the arguments passed to perform an operation with a provided algorithm.
:param KeyOperation operation: the type of operation being requested
:param EncryptionAlgorithm algorithm: the encryption algorithm to use for the operation
:keyword bytes iv: initialization vector
:keyword bytes authentication_tag: authentication tag returned from an encryption
:keyword bytes additional_authenticated_data: data that is authenticated but not encrypted
:raises ValueError: if parameters that are incompatible with the specified algorithm are provided.
"""
iv = kwargs.pop("iv", None)
tag = kwargs.pop("tag", None)
aad = kwargs.pop("aad", None)
if operation == KeyOperation.encrypt:
if iv and "CBC" not in algorithm:
raise ValueError(
"iv should only be provided with AES-CBC algorithms; {} does not accept an iv".format(algorithm)
)
if iv is None and "CBC" in algorithm:
raise ValueError("iv is a required parameter for encryption with AES-CBC algorithms.")
if aad and not ("CBC" in algorithm or "GCM" in algorithm):
raise ValueError(
"additional_authenticated_data should only be provided with AES algorithms; {} does not accept "
"additional authenticated data".format(algorithm)
)
if operation == KeyOperation.decrypt:
if iv and not ("CBC" in algorithm or "GCM" in algorithm):
raise ValueError(
"iv should only be provided with AES algorithms; {} does not accept an iv".format(algorithm)
)
if iv is None and ("CBC" in algorithm or "GCM" in algorithm):
raise ValueError("iv is a required parameter for decryption with AES algorithms.")
if tag and "GCM" not in algorithm:
raise ValueError(
"authentication_tag should only be provided with AES-GCM algorithms; {} does not accept a tag".format(
algorithm
)
)
if tag is None and "GCM" in algorithm:
raise ValueError("authentication_tag is a required parameter for AES-GCM decryption.")
if aad and not ("CBC" in algorithm or "GCM" in algorithm):
raise ValueError(
"additional_authenticated_data should only be provided with AES algorithms; {} does not accept "
"additional authenticated data".format(algorithm)
)
class CryptographyClient(KeyVaultClientBase):
"""Performs cryptographic operations using Azure Key Vault keys.
This client will perform operations locally when it's intialized with the necessary key material or is able to get
that material from Key Vault. When the required key material is unavailable, cryptographic operations are performed
by the Key Vault service.
:param key:
Either a :class:`~azure.keyvault.keys.KeyVaultKey` instance as returned by
:func:`~azure.keyvault.keys.KeyClient.get_key`, or a string.
If a string, the value must be the identifier of an Azure Key Vault key. Including a version is recommended.
:type key: str or :class:`~azure.keyvault.keys.KeyVaultKey`
:param credential: An object which can provide an access token for the vault, such as a credential from
:mod:`azure.identity`
:keyword api_version: version of the Key Vault API to use. Defaults to the most recent.
:paramtype api_version: ~azure.keyvault.keys.ApiVersion
:keyword transport: transport to use. Defaults to :class:`~azure.core.pipeline.transport.RequestsTransport`.
:paramtype transport: ~azure.core.pipeline.transport.HttpTransport
.. literalinclude:: ../tests/test_examples_crypto.py
:start-after: [START create_client]
:end-before: [END create_client]
:caption: Create a CryptographyClient
:language: python
:dedent: 8
"""
def __init__(self, key, credential, **kwargs):
# type: (Union[KeyVaultKey, str], TokenCredential, **Any) -> None
self._jwk = kwargs.pop("_jwk", False)
self._not_before = None # type: Optional[datetime]
self._expires_on = None # type: Optional[datetime]
self._key_id = None # type: Optional[KeyVaultResourceId]
if isinstance(key, KeyVaultKey):
self._key = key.key # type: Union[JsonWebKey, KeyVaultKey, str, None]
self._key_id = parse_key_vault_id(key.id)
if key.properties._attributes: # pylint:disable=protected-access
self._not_before = key.properties.not_before
self._expires_on = key.properties.expires_on
elif isinstance(key, six.string_types):
self._key = None
self._key_id = parse_key_vault_id(key)
if self._key_id.version is None:
self._key_id.version = "" # to avoid an error and get the latest version when getting the key
self._keys_get_forbidden = False
elif self._jwk:
self._key = key
else:
raise ValueError("'key' must be a KeyVaultKey instance or a key ID string")
if self._jwk:
try:
self._local_provider = get_local_cryptography_provider(cast(JsonWebKey, self._key))
self._initialized = True
except Exception as ex: # pylint:disable=broad-except
six.raise_from(ValueError("The provided jwk is not valid for local cryptography"), ex)
else:
self._local_provider = NoLocalCryptography()
self._initialized = False
self._vault_url = None if (self._jwk or self._key_id is None) else self._key_id.vault_url # type: ignore
super(CryptographyClient, self).__init__(
vault_url=self._vault_url or "vault_url", credential=credential, **kwargs
)
@property
def key_id(self):
# type: () -> Optional[str]
"""The full identifier of the client's key.
This property may be None when a client is constructed with :func:`from_jwk`.
:rtype: str or None
"""
if not self._jwk:
return self._key_id.source_id if self._key_id else None
return cast(JsonWebKey, self._key).kid # type: ignore[attr-defined]
@property
def vault_url(self): # type: ignore
# type: () -> Optional[str]
"""The base vault URL of the client's key.
This property may be None when a client is constructed with :func:`from_jwk`.
:rtype: str or None
"""
return self._vault_url
@classmethod
def from_jwk(cls, jwk):
# type: (Union[JsonWebKey, dict]) -> CryptographyClient
"""Creates a client that can only perform cryptographic operations locally.
:param jwk: the key's cryptographic material, as a JsonWebKey or dictionary.
:type jwk: JsonWebKey or dict
:rtype: CryptographyClient
"""
if not isinstance(jwk, JsonWebKey):
jwk = JsonWebKey(**jwk)
return cls(jwk, object(), _jwk=True) # type: ignore
@distributed_trace
def _initialize(self, **kwargs):
# type: (**Any) -> None
if self._initialized:
return
# try to get the key material, if we don't have it and aren't forbidden to do so
if not (self._key or self._keys_get_forbidden):
try:
key_bundle = self._client.get_key(
self._key_id.vault_url if self._key_id else None,
self._key_id.name if self._key_id else None,
self._key_id.version if self._key_id else None,
**kwargs
)
key = KeyVaultKey._from_key_bundle(key_bundle) # pylint:disable=protected-access
self._key = key.key
self._key_id = parse_key_vault_id(key.id) # update the key ID in case we didn't have the version before
except HttpResponseError as ex:
# if we got a 403, we don't have keys/get permission and won't try to get the key again
# (other errors may be transient)
self._keys_get_forbidden = ex.status_code == 403
# if we have the key material, create a local crypto provider with it
if self._key:
self._local_provider = get_local_cryptography_provider(cast(JsonWebKey, self._key))
self._initialized = True
else:
# try to get the key again next time unless we know we're forbidden to do so
self._initialized = self._keys_get_forbidden
@distributed_trace
def encrypt(self, algorithm, plaintext, **kwargs):
# type: (EncryptionAlgorithm, bytes, **Any) -> EncryptResult
"""Encrypt bytes using the client's key.
Requires the keys/encrypt permission. This method encrypts only a single block of data, whose size depends on
the key and encryption algorithm.
:param algorithm: encryption algorithm to use
:type algorithm: :class:`~azure.keyvault.keys.crypto.EncryptionAlgorithm`
:param bytes plaintext: bytes to encrypt
:keyword bytes iv: initialization vector. Required for only AES-CBC(PAD) encryption.
:keyword bytes additional_authenticated_data: optional data that is authenticated but not encrypted. For use
with AES-GCM encryption.
:rtype: :class:`~azure.keyvault.keys.crypto.EncryptResult`
:raises ValueError: if parameters that are incompatible with the specified algorithm are provided.
.. literalinclude:: ../tests/test_examples_crypto.py
:start-after: [START encrypt]
:end-before: [END encrypt]
:caption: Encrypt bytes
:language: python
:dedent: 8
"""
iv = kwargs.pop("iv", None)
aad = kwargs.pop("additional_authenticated_data", None)
_validate_arguments(operation=KeyOperation.encrypt, algorithm=algorithm, iv=iv, aad=aad)
self._initialize(**kwargs)
if self._local_provider.supports(KeyOperation.encrypt, algorithm):
raise_if_time_invalid(self._not_before, self._expires_on)
try:
return self._local_provider.encrypt(algorithm, plaintext, iv=iv)
except Exception as ex: # pylint:disable=broad-except
_LOGGER.warning("Local encrypt operation failed: %s", ex, exc_info=_LOGGER.isEnabledFor(logging.DEBUG))
if self._jwk:
raise
elif self._jwk:
raise NotImplementedError(
'This key does not support the "encrypt" operation with algorithm "{}"'.format(algorithm)
)
operation_result = self._client.encrypt(
vault_base_url=self._key_id.vault_url if self._key_id else None,
key_name=self._key_id.name if self._key_id else None,
key_version=self._key_id.version if self._key_id else None,
parameters=self._models.KeyOperationsParameters(algorithm=algorithm, value=plaintext, iv=iv, aad=aad),
**kwargs
)
result_iv = operation_result.iv if hasattr(operation_result, "iv") else None
result_tag = operation_result.authentication_tag if hasattr(operation_result, "authentication_tag") else None
result_aad = (
operation_result.additional_authenticated_data
if hasattr(operation_result, "additional_authenticated_data")
else None
)
return EncryptResult(
key_id=self.key_id,
algorithm=algorithm,
ciphertext=operation_result.result,
iv=result_iv,
authentication_tag=result_tag,
additional_authenticated_data=result_aad,
)
@distributed_trace
def decrypt(self, algorithm, ciphertext, **kwargs):
# type: (EncryptionAlgorithm, bytes, **Any) -> DecryptResult
"""Decrypt a single block of encrypted data using the client's key.
Requires the keys/decrypt permission. This method decrypts only a single block of data, whose size depends on
the key and encryption algorithm.
:param algorithm: encryption algorithm to use
:type algorithm: :class:`~azure.keyvault.keys.crypto.EncryptionAlgorithm`
:param bytes ciphertext: encrypted bytes to decrypt
:keyword bytes iv: the initialization vector used during encryption. Required for AES decryption.
:keyword bytes authentication_tag: the authentication tag generated during encryption. Required for only AES-GCM
decryption.
:keyword bytes additional_authenticated_data: optional data that is authenticated but not encrypted. For use
with AES-GCM decryption.
:rtype: :class:`~azure.keyvault.keys.crypto.DecryptResult`
:raises ValueError: if parameters that are incompatible with the specified algorithm are provided.
.. literalinclude:: ../tests/test_examples_crypto.py
:start-after: [START decrypt]
:end-before: [END decrypt]
:caption: Decrypt bytes
:language: python
:dedent: 8
"""
iv = kwargs.pop("iv", None)
tag = kwargs.pop("authentication_tag", None)
aad = kwargs.pop("additional_authenticated_data", None)
_validate_arguments(operation=KeyOperation.decrypt, algorithm=algorithm, iv=iv, tag=tag, aad=aad)
self._initialize(**kwargs)
if self._local_provider.supports(KeyOperation.decrypt, algorithm):
try:
return self._local_provider.decrypt(algorithm, ciphertext, iv=iv)
except Exception as ex: # pylint:disable=broad-except
_LOGGER.warning("Local decrypt operation failed: %s", ex, exc_info=_LOGGER.isEnabledFor(logging.DEBUG))
if self._jwk:
raise
elif self._jwk:
raise NotImplementedError(
'This key does not support the "decrypt" operation with algorithm "{}"'.format(algorithm)
)
operation_result = self._client.decrypt(
vault_base_url=self._key_id.vault_url if self._key_id else None,
key_name=self._key_id.name if self._key_id else None,
key_version=self._key_id.version if self._key_id else None,
parameters=self._models.KeyOperationsParameters(
algorithm=algorithm, value=ciphertext, iv=iv, tag=tag, aad=aad
),
**kwargs
)
return DecryptResult(key_id=self.key_id, algorithm=algorithm, plaintext=operation_result.result)
@distributed_trace
def wrap_key(self, algorithm, key, **kwargs):
# type: (KeyWrapAlgorithm, bytes, **Any) -> WrapResult
"""Wrap a key with the client's key.
Requires the keys/wrapKey permission.
:param algorithm: wrapping algorithm to use
:type algorithm: :class:`~azure.keyvault.keys.crypto.KeyWrapAlgorithm`
:param bytes key: key to wrap
:rtype: :class:`~azure.keyvault.keys.crypto.WrapResult`
.. literalinclude:: ../tests/test_examples_crypto.py
:start-after: [START wrap_key]
:end-before: [END wrap_key]
:caption: Wrap a key
:language: python
:dedent: 8
"""
self._initialize(**kwargs)
if self._local_provider.supports(KeyOperation.wrap_key, algorithm):
raise_if_time_invalid(self._not_before, self._expires_on)
try:
return self._local_provider.wrap_key(algorithm, key)
except Exception as ex: # pylint:disable=broad-except
_LOGGER.warning("Local wrap operation failed: %s", ex, exc_info=_LOGGER.isEnabledFor(logging.DEBUG))
if self._jwk:
raise
elif self._jwk:
raise NotImplementedError(
'This key does not support the "wrapKey" operation with algorithm "{}"'.format(algorithm)
)
operation_result = self._client.wrap_key(
vault_base_url=self._key_id.vault_url if self._key_id else None,
key_name=self._key_id.name if self._key_id else None,
key_version=self._key_id.version if self._key_id else None,
parameters=self._models.KeyOperationsParameters(algorithm=algorithm, value=key),
**kwargs
)
return WrapResult(key_id=self.key_id, algorithm=algorithm, encrypted_key=operation_result.result)
@distributed_trace
def unwrap_key(self, algorithm, encrypted_key, **kwargs):
# type: (KeyWrapAlgorithm, bytes, **Any) -> UnwrapResult
"""Unwrap a key previously wrapped with the client's key.
Requires the keys/unwrapKey permission.
:param algorithm: wrapping algorithm to use
:type algorithm: :class:`~azure.keyvault.keys.crypto.KeyWrapAlgorithm`
:param bytes encrypted_key: the wrapped key
:rtype: :class:`~azure.keyvault.keys.crypto.UnwrapResult`
.. literalinclude:: ../tests/test_examples_crypto.py
:start-after: [START unwrap_key]
:end-before: [END unwrap_key]
:caption: Unwrap a key
:language: python
:dedent: 8
"""
self._initialize(**kwargs)
if self._local_provider.supports(KeyOperation.unwrap_key, algorithm):
try:
return self._local_provider.unwrap_key(algorithm, encrypted_key)
except Exception as ex: # pylint:disable=broad-except
_LOGGER.warning("Local unwrap operation failed: %s", ex, exc_info=_LOGGER.isEnabledFor(logging.DEBUG))
if self._jwk:
raise
elif self._jwk:
raise NotImplementedError(
'This key does not support the "unwrapKey" operation with algorithm "{}"'.format(algorithm)
)
operation_result = self._client.unwrap_key(
vault_base_url=self._key_id.vault_url if self._key_id else None,
key_name=self._key_id.name if self._key_id else None,
key_version=self._key_id.version if self._key_id else None,
parameters=self._models.KeyOperationsParameters(algorithm=algorithm, value=encrypted_key),
**kwargs
)
return UnwrapResult(key_id=self.key_id, algorithm=algorithm, key=operation_result.result)
@distributed_trace
def sign(self, algorithm, digest, **kwargs):
# type: (SignatureAlgorithm, bytes, **Any) -> SignResult
"""Create a signature from a digest using the client's key.
Requires the keys/sign permission.
:param algorithm: signing algorithm
:type algorithm: :class:`~azure.keyvault.keys.crypto.SignatureAlgorithm`
:param bytes digest: hashed bytes to sign
:rtype: :class:`~azure.keyvault.keys.crypto.SignResult`
.. literalinclude:: ../tests/test_examples_crypto.py
:start-after: [START sign]
:end-before: [END sign]
:caption: Sign bytes
:language: python
:dedent: 8
"""
self._initialize(**kwargs)
if self._local_provider.supports(KeyOperation.sign, algorithm):
raise_if_time_invalid(self._not_before, self._expires_on)
try:
return self._local_provider.sign(algorithm, digest)
except Exception as ex: # pylint:disable=broad-except
_LOGGER.warning("Local sign operation failed: %s", ex, exc_info=_LOGGER.isEnabledFor(logging.DEBUG))
if self._jwk:
raise
elif self._jwk:
raise NotImplementedError(
'This key does not support the "sign" operation with algorithm "{}"'.format(algorithm)
)
operation_result = self._client.sign(
vault_base_url=self._key_id.vault_url if self._key_id else None,
key_name=self._key_id.name if self._key_id else None,
key_version=self._key_id.version if self._key_id else None,
parameters=self._models.KeySignParameters(algorithm=algorithm, value=digest),
**kwargs
)
return SignResult(key_id=self.key_id, algorithm=algorithm, signature=operation_result.result)
@distributed_trace
def verify(self, algorithm, digest, signature, **kwargs):
# type: (SignatureAlgorithm, bytes, bytes, **Any) -> VerifyResult
"""Verify a signature using the client's key.
Requires the keys/verify permission.
:param algorithm: verification algorithm
:type algorithm: :class:`~azure.keyvault.keys.crypto.SignatureAlgorithm`
:param bytes digest: Pre-hashed digest corresponding to **signature**. The hash algorithm used must be
compatible with **algorithm**.
:param bytes signature: signature to verify
:rtype: :class:`~azure.keyvault.keys.crypto.VerifyResult`
.. literalinclude:: ../tests/test_examples_crypto.py
:start-after: [START verify]
:end-before: [END verify]
:caption: Verify a signature
:language: python
:dedent: 8
"""
self._initialize(**kwargs)
if self._local_provider.supports(KeyOperation.verify, algorithm):
try:
return self._local_provider.verify(algorithm, digest, signature)
except Exception as ex: # pylint:disable=broad-except
_LOGGER.warning("Local verify operation failed: %s", ex, exc_info=_LOGGER.isEnabledFor(logging.DEBUG))
if self._jwk:
raise
elif self._jwk:
raise NotImplementedError(
'This key does not support the "verify" operation with algorithm "{}"'.format(algorithm)
)
operation_result = self._client.verify(
vault_base_url=self._key_id.vault_url if self._key_id else None,
key_name=self._key_id.name if self._key_id else None,
key_version=self._key_id.version if self._key_id else None,
parameters=self._models.KeyVerifyParameters(algorithm=algorithm, digest=digest, signature=signature),
**kwargs
)
return VerifyResult(key_id=self.key_id, algorithm=algorithm, is_valid=operation_result.value)
|
|
'''
Reads the contents of a git repository and write a DOT graph file to stdout.
'''
import dulwich.repo
import dulwich.index
import dulwich.objects
import pydot
import subprocess
DEFAULT_FONTNAME = 'Monaco'
DEFAULT_FONTSIZE = '8'
BLOB_CONTENT_LIMIT = 200 # show at most this many bytes of blob content
DEFAULT_FONT = dict(fontname=DEFAULT_FONTNAME, fontsize=DEFAULT_FONTSIZE)
def emit_repo_as_xdot(repo, options):
'''Emits xdot for the given repo on stdout.'''
global graph # TODO: globals are bad mmmmkay
global vertices
vertices = {}
graph = pydot.Graph(verbose=True)
graph.set_bgcolor('#00000000') # transparent background
objstore = repo.object_store
seen = set()
# walk everything in the object store. (this means orphaned nodes will show.)
for sha in objstore:
if not options.blobs and objstore[sha].type_name in ('blob', 'tree'):
continue
walk_node(objstore, seen, sha, options)
for ref in repo.refs.keys():
if ref == 'HEAD': continue # TODO: let this loop handle symbolic refs too
branch_node = add_branch_node(ref)
graph.add_edge(pydot.Edge(branch_node, repo.refs[ref], **edge_opts(style='dotted')))
# do HEAD as a special case
ref = 'HEAD'
nopts = node_opts(label=ref, shape='diamond', style='filled', fillcolor='#ff3333', fontcolor='white', tooltip='Symbolic Ref: HEAD')
head_node = pydot.Node(ref, **nopts)
graph.add_node(head_node)
symref = repo.refs.read_ref(ref)
if symref.startswith('ref: '):
symref = symref[5:]
points_to = add_branch_node(symref)
graph.add_node(points_to)
graph.add_edge(pydot.Edge(head_node, add_branch_node(symref), **edge_opts(style='dotted')))
# index
if options.index:
try:
head_tree = repo['HEAD'].tree
except KeyError:
head_tree = None
index = repo.open_index()
try:
changes = list(index.changes_from_tree(objstore, head_tree))
except TypeError:
# the official dulwich repo throws a TypeError changes_from_tree is
# called against an empty tree (None)
if head_tree is not None: raise
changes = []
if changes:
index_node = pydot.Node('index', shape='invtriangle', style='filled', fillcolor='#33ff33', fontname=DEFAULT_FONTNAME, fontsize=DEFAULT_FONTSIZE)
graph.add_node(index_node)
for (oldpath, newpath), (oldmode, newmode), (oldsha, newsha) in changes:
graph.add_edge(pydot.Edge(index_node, vert_for_sha(objstore, newsha), label=q(' ' + newpath), fontname=DEFAULT_FONTNAME, fontsize=DEFAULT_FONTSIZE))
# invoke dot -Txdot to turn out DOT file into an xdot file, which canviz is expecting
subprocess.Popen(['dot', '-Txdot'], stdin=subprocess.PIPE).communicate(graph.to_string())
def vert_for_sha(objstore, sha, **opts):
if isinstance(sha, pydot.Node):
sha = sha.sha
vert = vertices.get(sha)
try:
obj = objstore[sha]
except KeyError:
return None
if vert is None:
vertex_opts = vertex_opts_for_obj(obj)
vert = vertices[sha] = pydot.Node(sha, **vertex_opts)
vert.sha = sha
graph.add_node(vert)
vert.obj = obj
return vert
def to_sha(vert):
if not isinstance(vert, str):
return vert.obj.sha().hexdigest()
return vert
def add_edge(a, b, **opts):
edge = pydot.Edge(to_sha(a), to_sha(b), **edge_opts(**opts))
graph.add_edge(edge)
return edge
def walk_node(objstore, seen, sha, options):
vert = vert_for_sha(objstore, sha)
if vert is None or vert in seen: return
seen.add(vert)
obj = vert.obj
# TODO: visitor pattern with polymorphism instead plz
if obj.type_name == 'tree':
if options.blobs:
for stat, filename, sha in vert.obj.entries():
child = vert_for_sha(objstore, sha)
if child is not None:
add_edge(vert, child, label=q(' ' + filename))
walk_node(objstore, seen, child, options)
elif obj.type_name == 'commit':
if options.blobs:
tree = obj.tree
tree_vert = vert_for_sha(objstore, obj.tree)
if tree_vert is not None:
walk_node(objstore, seen, tree, options)
seen.add(tree_vert)
add_edge(vert, tree_vert, weight='1')
num_parents=len(obj.parents)
for i, parent_sha in enumerate(obj.parents):
parent_vert = vert_for_sha(objstore, parent_sha)
weight = num_parents - i + 1
add_edge(vert, parent_vert, weight='%s' % weight)
walk_node(objstore, seen, parent_sha, options)
def add_branch_node(ref):
nopts = node_opts(
label=nice_ref_label(ref),
shape='diamond',
style='filled',
tooltip='Branch: %s' % nice_ref_label(ref))
node = pydot.Node(ref, **nopts)
graph.add_node(node)
return node
def node_opts(**opts):
'Display options for vertices.'
opts.update(DEFAULT_FONT)
return opts
def edge_opts(**opts):
'Display options for edges.'
opts.update(labelfontsize='11', labelfloat="False", **DEFAULT_FONT)
return opts
def q(s):
'''pydot seems to not be quoting colons in labels, even though not doing
so apparently results in invalid DOT files. quote them here.'''
return s.replace(':', r'\:')
def get_blob_content(obj):
"Return the first part of a blob's content for its the label."
blob_content = str(obj).decode('ascii', 'ignore') # TODO: does utf8 just work?
blob_content = blob_content.replace('\0', '').replace('\n', '\\n')
return blob_content[:BLOB_CONTENT_LIMIT]
def vertex_opts_for_obj(obj, **opts):
'Return pydot display options for a git repository object.'
opts = node_opts(**opts)
def shortsha():
return q(obj.sha().hexdigest()[:20])
if obj.type_name == 'commit':
opts.update(
label=q(obj.message),
style='filled',
shape='note',
fillcolor='#ccffcc',
tooltip='Commit: ' + shortsha()
)
elif obj.type_name == 'tree':
opts.update(
shape='folder',
label='tree',
fontcolor='#a0a0a0',
style='filled',
fillcolor='#ffffff',
tooltip='Tree: ' + shortsha()
)
elif obj.type_name == 'blob':
label = q(get_blob_content(obj) or 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391')
opts.update(
style='filled',
fillcolor='#ffffff',
shape='ellipse',
label=label,
tooltip='Blob: ' + shortsha()
)
else:
opts.update(
shape='ellipse',
label=q(repr(obj)),
style='filled',
fillcolor='#ffffff'
)
if 'label' in opts:
opts['label'] = opts['label'].strip()
return opts
def nice_ref_label(ref):
'Formats a ref to be more readable for the graph.'
if ref.startswith('refs/heads'):
label = ref[11:]
elif ref.startswith('refs/remotes'):
label = 'remote: ' + ref[13:]
else:
label = ref
return label
def main(repo_dir, options):
emit_repo_as_xdot(dulwich.repo.Repo(repo_dir), options)
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--no-blobs",
action="store_false", dest="blobs", default=True,
help="don't show blobs")
parser.add_option("--no-index",
action="store_false", dest="index", default=True,
help="don't show the index")
options, args = parser.parse_args()
repo_dir = args[0]
main(repo_dir, options)
|
|
"""
This module contains the 'base' GEOSGeometry object -- all GEOS Geometries
inherit from this object.
"""
from __future__ import unicode_literals
from ctypes import addressof, byref, c_double
from django.contrib.gis import gdal
from django.contrib.gis.geometry.regex import hex_regex, json_regex, wkt_regex
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.mutable_list import ListMixin
from django.contrib.gis.geos.prepared import PreparedGeometry
from django.contrib.gis.geos.prototypes.io import (
ewkb_w, wkb_r, wkb_w, wkt_r, wkt_w,
)
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class GEOSGeometry(GEOSBase, ListMixin):
"A class that, generally, encapsulates a GEOS geometry."
# Raise GEOSIndexError instead of plain IndexError
# (see ticket #4740 and GEOSIndexError docstring)
_IndexError = GEOSIndexError
_GEOS_CLASSES = None
ptr_type = GEOM_PTR
has_cs = False # Only Point, LineString, LinearRing have coordinate sequences
def __init__(self, geo_input, srid=None):
"""
The base constructor for GEOS geometry objects, and may take the
following inputs:
* strings:
- WKT
- HEXEWKB (a PostGIS-specific canonical form)
- GeoJSON (requires GDAL)
* buffer:
- WKB
The `srid` keyword is used to specify the Source Reference Identifier
(SRID) number for this Geometry. If not set, the SRID will be None.
"""
if isinstance(geo_input, bytes):
geo_input = force_text(geo_input)
if isinstance(geo_input, six.string_types):
wkt_m = wkt_regex.match(geo_input)
if wkt_m:
# Handling WKT input.
if wkt_m.group('srid'):
srid = int(wkt_m.group('srid'))
g = wkt_r().read(force_bytes(wkt_m.group('wkt')))
elif hex_regex.match(geo_input):
# Handling HEXEWKB input.
g = wkb_r().read(force_bytes(geo_input))
elif json_regex.match(geo_input):
# Handling GeoJSON input.
if not gdal.HAS_GDAL:
raise ValueError('Initializing geometry from JSON input requires GDAL.')
g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb)
else:
raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.')
elif isinstance(geo_input, GEOM_PTR):
# When the input is a pointer to a geometry (GEOM_PTR).
g = geo_input
elif isinstance(geo_input, six.memoryview):
# When the input is a buffer (WKB).
g = wkb_r().read(geo_input)
elif isinstance(geo_input, GEOSGeometry):
g = capi.geom_clone(geo_input.ptr)
else:
# Invalid geometry type.
raise TypeError('Improper geometry input type: %s' % str(type(geo_input)))
if g:
# Setting the pointer object with a valid pointer.
self.ptr = g
else:
raise GEOSException('Could not initialize GEOS Geometry with given input.')
# Post-initialization setup.
self._post_init(srid)
def _post_init(self, srid):
"Helper routine for performing post-initialization setup."
# Setting the SRID, if given.
if srid and isinstance(srid, int):
self.srid = srid
# Setting the class type (e.g., Point, Polygon, etc.)
if GEOSGeometry._GEOS_CLASSES is None:
# Lazy-loaded variable to avoid import conflicts with GEOSGeometry.
from .linestring import LineString, LinearRing
from .point import Point
from .polygon import Polygon
from .collections import (
GeometryCollection, MultiPoint, MultiLineString, MultiPolygon)
GEOSGeometry._GEOS_CLASSES = {
0: Point,
1: LineString,
2: LinearRing,
3: Polygon,
4: MultiPoint,
5: MultiLineString,
6: MultiPolygon,
7: GeometryCollection,
}
self.__class__ = GEOSGeometry._GEOS_CLASSES[self.geom_typeid]
# Setting the coordinate sequence for the geometry (will be None on
# geometries that do not have coordinate sequences)
self._set_cs()
def __del__(self):
"""
Destroys this Geometry; in other words, frees the memory used by the
GEOS C++ object.
"""
if self._ptr and capi:
capi.destroy_geom(self._ptr)
def __copy__(self):
"""
Returns a clone because the copy of a GEOSGeometry may contain an
invalid pointer location if the original is garbage collected.
"""
return self.clone()
def __deepcopy__(self, memodict):
"""
The `deepcopy` routine is used by the `Node` class of django.utils.tree;
thus, the protocol routine needs to be implemented to return correct
copies (clones) of these GEOS objects, which use C pointers.
"""
return self.clone()
def __str__(self):
"EWKT is used for the string representation."
return self.ewkt
def __repr__(self):
"Short-hand representation because WKT may be very large."
return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr)))
# Pickling support
def __getstate__(self):
# The pickled state is simply a tuple of the WKB (in string form)
# and the SRID.
return bytes(self.wkb), self.srid
def __setstate__(self, state):
# Instantiating from the tuple state that was pickled.
wkb, srid = state
ptr = wkb_r().read(six.memoryview(wkb))
if not ptr:
raise GEOSException('Invalid Geometry loaded from pickled state.')
self.ptr = ptr
self._post_init(srid)
# Comparison operators
def __eq__(self, other):
"""
Equivalence testing, a Geometry may be compared with another Geometry
or a WKT representation.
"""
if isinstance(other, six.string_types):
return self.wkt == other
elif isinstance(other, GEOSGeometry):
return self.equals_exact(other)
else:
return False
def __ne__(self, other):
"The not equals operator."
return not (self == other)
# ### Geometry set-like operations ###
# Thanks to Sean Gillies for inspiration:
# http://lists.gispython.org/pipermail/community/2007-July/001034.html
# g = g1 | g2
def __or__(self, other):
"Returns the union of this Geometry and the other."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
# #### Coordinate Sequence Routines ####
def _set_cs(self):
"Sets the coordinate sequence for this Geometry."
if self.has_cs:
self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz)
else:
self._cs = None
@property
def coord_seq(self):
"Returns a clone of the coordinate sequence for this Geometry."
if self.has_cs:
return self._cs.clone()
# #### Geometry Info ####
@property
def geom_type(self):
"Returns a string representing the Geometry type, e.g. 'Polygon'"
return capi.geos_type(self.ptr).decode()
@property
def geom_typeid(self):
"Returns an integer representing the Geometry type."
return capi.geos_typeid(self.ptr)
@property
def num_geom(self):
"Returns the number of geometries in the Geometry."
return capi.get_num_geoms(self.ptr)
@property
def num_coords(self):
"Returns the number of coordinates in the Geometry."
return capi.get_num_coords(self.ptr)
@property
def num_points(self):
"Returns the number points, or coordinates, in the Geometry."
return self.num_coords
@property
def dims(self):
"Returns the dimension of this Geometry (0=point, 1=line, 2=surface)."
return capi.get_dims(self.ptr)
def normalize(self):
"Converts this Geometry to normal form (or canonical form)."
return capi.geos_normalize(self.ptr)
# #### Unary predicates ####
@property
def empty(self):
"""
Returns a boolean indicating whether the set of points in this Geometry
are empty.
"""
return capi.geos_isempty(self.ptr)
@property
def hasz(self):
"Returns whether the geometry has a 3D dimension."
return capi.geos_hasz(self.ptr)
@property
def ring(self):
"Returns whether or not the geometry is a ring."
return capi.geos_isring(self.ptr)
@property
def simple(self):
"Returns false if the Geometry not simple."
return capi.geos_issimple(self.ptr)
@property
def valid(self):
"This property tests the validity of this Geometry."
return capi.geos_isvalid(self.ptr)
@property
def valid_reason(self):
"""
Returns a string containing the reason for any invalidity.
"""
return capi.geos_isvalidreason(self.ptr).decode()
# #### Binary predicates. ####
def contains(self, other):
"Returns true if other.within(this) returns true."
return capi.geos_contains(self.ptr, other.ptr)
def crosses(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T****** (for a point and a curve,a point and an area or a line and
an area) 0******** (for two curves).
"""
return capi.geos_crosses(self.ptr, other.ptr)
def disjoint(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FF*FF****.
"""
return capi.geos_disjoint(self.ptr, other.ptr)
def equals(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**FFF*.
"""
return capi.geos_equals(self.ptr, other.ptr)
def equals_exact(self, other, tolerance=0):
"""
Returns true if the two Geometries are exactly equal, up to a
specified tolerance.
"""
return capi.geos_equalsexact(self.ptr, other.ptr, float(tolerance))
def intersects(self, other):
"Returns true if disjoint returns false."
return capi.geos_intersects(self.ptr, other.ptr)
def overlaps(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves).
"""
return capi.geos_overlaps(self.ptr, other.ptr)
def relate_pattern(self, other, pattern):
"""
Returns true if the elements in the DE-9IM intersection matrix for the
two Geometries match the elements in pattern.
"""
if not isinstance(pattern, six.string_types) or len(pattern) > 9:
raise GEOSException('invalid intersection matrix pattern')
return capi.geos_relatepattern(self.ptr, other.ptr, force_bytes(pattern))
def touches(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FT*******, F**T***** or F***T****.
"""
return capi.geos_touches(self.ptr, other.ptr)
def within(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**F***.
"""
return capi.geos_within(self.ptr, other.ptr)
# #### SRID Routines ####
def get_srid(self):
"Gets the SRID for the geometry, returns None if no SRID is set."
s = capi.geos_get_srid(self.ptr)
if s == 0:
return None
else:
return s
def set_srid(self, srid):
"Sets the SRID for the geometry."
capi.geos_set_srid(self.ptr, srid)
srid = property(get_srid, set_srid)
# #### Output Routines ####
@property
def ewkt(self):
"""
Returns the EWKT (SRID + WKT) of the Geometry. Note that Z values
are only included in this representation if GEOS >= 3.3.0.
"""
if self.get_srid():
return 'SRID=%s;%s' % (self.srid, self.wkt)
else:
return self.wkt
@property
def wkt(self):
"Returns the WKT (Well-Known Text) representation of this Geometry."
return wkt_w(3 if self.hasz else 2).write(self).decode()
@property
def hex(self):
"""
Returns the WKB of this Geometry in hexadecimal form. Please note
that the SRID is not included in this representation because it is not
a part of the OGC specification (use the `hexewkb` property instead).
"""
# A possible faster, all-python, implementation:
# str(self.wkb).encode('hex')
return wkb_w(3 if self.hasz else 2).write_hex(self)
@property
def hexewkb(self):
"""
Returns the EWKB of this Geometry in hexadecimal form. This is an
extension of the WKB specification that includes SRID value that are
a part of this geometry.
"""
return ewkb_w(3 if self.hasz else 2).write_hex(self)
@property
def json(self):
"""
Returns GeoJSON representation of this Geometry if GDAL is installed.
"""
if gdal.HAS_GDAL:
return self.ogr.json
else:
raise GEOSException('GeoJSON output only supported when GDAL is installed.')
geojson = json
@property
def wkb(self):
"""
Returns the WKB (Well-Known Binary) representation of this Geometry
as a Python buffer. SRID and Z values are not included, use the
`ewkb` property instead.
"""
return wkb_w(3 if self.hasz else 2).write(self)
@property
def ewkb(self):
"""
Return the EWKB representation of this Geometry as a Python buffer.
This is an extension of the WKB specification that includes any SRID
value that are a part of this geometry.
"""
return ewkb_w(3 if self.hasz else 2).write(self)
@property
def kml(self):
"Returns the KML representation of this Geometry."
gtype = self.geom_type
return '<%s>%s</%s>' % (gtype, self.coord_seq.kml, gtype)
@property
def prepared(self):
"""
Returns a PreparedGeometry corresponding to this geometry -- it is
optimized for the contains, intersects, and covers operations.
"""
return PreparedGeometry(self)
# #### GDAL-specific output routines ####
@property
def ogr(self):
"Returns the OGR Geometry for this Geometry."
if not gdal.HAS_GDAL:
raise GEOSException('GDAL required to convert to an OGRGeometry.')
if self.srid:
try:
return gdal.OGRGeometry(self.wkb, self.srid)
except gdal.SRSException:
pass
return gdal.OGRGeometry(self.wkb)
@property
def srs(self):
"Returns the OSR SpatialReference for SRID of this Geometry."
if not gdal.HAS_GDAL:
raise GEOSException('GDAL required to return a SpatialReference object.')
if self.srid:
try:
return gdal.SpatialReference(self.srid)
except gdal.SRSException:
pass
return None
@property
def crs(self):
"Alias for `srs` property."
return self.srs
def transform(self, ct, clone=False):
"""
Requires GDAL. Transforms the geometry according to the given
transformation object, which may be an integer SRID, and WKT or
PROJ.4 string. By default, the geometry is transformed in-place and
nothing is returned. However if the `clone` keyword is set, then this
geometry will not be modified and a transformed clone will be returned
instead.
"""
srid = self.srid
if ct == srid:
# short-circuit where source & dest SRIDs match
if clone:
return self.clone()
else:
return
if (srid is None) or (srid < 0):
raise GEOSException("Calling transform() with no SRID set is not supported")
if not gdal.HAS_GDAL:
raise GEOSException("GDAL library is not available to transform() geometry.")
# Creating an OGR Geometry, which is then transformed.
g = self.ogr
g.transform(ct)
# Getting a new GEOS pointer
ptr = wkb_r().read(g.wkb)
if clone:
# User wants a cloned transformed geometry returned.
return GEOSGeometry(ptr, srid=g.srid)
if ptr:
# Reassigning pointer, and performing post-initialization setup
# again due to the reassignment.
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(g.srid)
else:
raise GEOSException('Transformed WKB was invalid.')
# #### Topology Routines ####
def _topology(self, gptr):
"Helper routine to return Geometry from the given pointer."
return GEOSGeometry(gptr, srid=self.srid)
@property
def boundary(self):
"Returns the boundary as a newly allocated Geometry object."
return self._topology(capi.geos_boundary(self.ptr))
def buffer(self, width, quadsegs=8):
"""
Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry. The optional third parameter sets
the number of segment used to approximate a quarter circle (defaults to 8).
(Text from PostGIS documentation at ch. 6.1.3)
"""
return self._topology(capi.geos_buffer(self.ptr, width, quadsegs))
@property
def centroid(self):
"""
The centroid is equal to the centroid of the set of component Geometries
of highest dimension (since the lower-dimension geometries contribute zero
"weight" to the centroid).
"""
return self._topology(capi.geos_centroid(self.ptr))
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points
in the Geometry.
"""
return self._topology(capi.geos_convexhull(self.ptr))
def difference(self, other):
"""
Returns a Geometry representing the points making up this Geometry
that do not make up other.
"""
return self._topology(capi.geos_difference(self.ptr, other.ptr))
@property
def envelope(self):
"Return the envelope for this geometry (a polygon)."
return self._topology(capi.geos_envelope(self.ptr))
def intersection(self, other):
"Returns a Geometry representing the points shared by this Geometry and other."
return self._topology(capi.geos_intersection(self.ptr, other.ptr))
@property
def point_on_surface(self):
"Computes an interior point of this Geometry."
return self._topology(capi.geos_pointonsurface(self.ptr))
def relate(self, other):
"Returns the DE-9IM intersection matrix for this Geometry and the other."
return capi.geos_relate(self.ptr, other.ptr).decode()
def simplify(self, tolerance=0.0, preserve_topology=False):
"""
Returns the Geometry, simplified using the Douglas-Peucker algorithm
to the specified tolerance (higher tolerance => less points). If no
tolerance provided, defaults to 0.
By default, this function does not preserve topology - e.g. polygons can
be split, collapse to lines or disappear holes can be created or
disappear, and lines can cross. By specifying preserve_topology=True,
the result will have the same dimension and number of components as the
input. This is significantly slower.
"""
if preserve_topology:
return self._topology(capi.geos_preservesimplify(self.ptr, tolerance))
else:
return self._topology(capi.geos_simplify(self.ptr, tolerance))
def sym_difference(self, other):
"""
Returns a set combining the points in this Geometry not in other,
and the points in other not in this Geometry.
"""
return self._topology(capi.geos_symdifference(self.ptr, other.ptr))
def union(self, other):
"Returns a Geometry representing all the points in this Geometry and other."
return self._topology(capi.geos_union(self.ptr, other.ptr))
# #### Other Routines ####
@property
def area(self):
"Returns the area of the Geometry."
return capi.geos_area(self.ptr, byref(c_double()))
def distance(self, other):
"""
Returns the distance between the closest points on this Geometry
and the other. Units will be in those of the coordinate system of
the Geometry.
"""
if not isinstance(other, GEOSGeometry):
raise TypeError('distance() works only on other GEOS Geometries.')
return capi.geos_distance(self.ptr, other.ptr, byref(c_double()))
@property
def extent(self):
"""
Returns the extent of this geometry as a 4-tuple, consisting of
(xmin, ymin, xmax, ymax).
"""
from .point import Point
env = self.envelope
if isinstance(env, Point):
xmin, ymin = env.tuple
xmax, ymax = xmin, ymin
else:
xmin, ymin = env[0][0]
xmax, ymax = env[0][2]
return (xmin, ymin, xmax, ymax)
@property
def length(self):
"""
Returns the length of this Geometry (e.g., 0 for point, or the
circumference of a Polygon).
"""
return capi.geos_length(self.ptr, byref(c_double()))
def clone(self):
"Clones this Geometry."
return GEOSGeometry(capi.geom_clone(self.ptr), srid=self.srid)
class ProjectInterpolateMixin(object):
"""
Used for LineString and MultiLineString.
"""
def interpolate(self, distance):
return self._topology(capi.geos_interpolate(self.ptr, distance))
def interpolate_normalized(self, distance):
return self._topology(capi.geos_interpolate_normalized(self.ptr, distance))
def project(self, point):
from .point import Point
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
return capi.geos_project(self.ptr, point.ptr)
def project_normalized(self, point):
from .point import Point
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
return capi.geos_project_normalized(self.ptr, point.ptr)
|
|
#!/usr/bin/env/python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for visualization component."""
import os
import json
import tempfile
from unittest.mock import patch
import mock
from pytorch_kfp_components.components.visualization.component import Visualization
from pytorch_kfp_components.components.visualization.executor import Executor
import pytest
metdata_dir = tempfile.mkdtemp()
@pytest.fixture(scope="class")
def viz_params():
"""Setting visualization parameters.
Returns:
viz_param : dict of visualization parameters.
"""
markdown_params = {
"storage": "dummy-storage",
"source": {
"dummy_key": "dummy_value"
},
}
viz_param = {
"mlpipeline_ui_metadata":
os.path.join(metdata_dir, "mlpipeline_ui_metadata.json"),
"mlpipeline_metrics":
os.path.join(metdata_dir, "mlpipeline_metrics"),
"confusion_matrix_dict": {},
"test_accuracy":
99.05,
"markdown":
markdown_params,
}
return viz_param
@pytest.fixture(scope="class")
def confusion_matrix_params():
"""Setting the confusion matrix parameters.
Returns:
confusion_matrix_params : Dict of confusion matrix parmas
"""
confusion_matrix_param = {
"actuals": ["1", "2", "3", "4"],
"preds": ["2", "3", "4", "0"],
"classes": ["dummy", "dummy"],
"url": "minio://dummy_bucket/folder_name",
}
return confusion_matrix_param
def generate_visualization(viz_params: dict): #pylint: disable=redefined-outer-name
"""Generates the visualization object.
Returns:
output_dict : output dict of vizualization obj.
"""
viz_obj = Visualization(
mlpipeline_ui_metadata=viz_params["mlpipeline_ui_metadata"],
mlpipeline_metrics=viz_params["mlpipeline_metrics"],
confusion_matrix_dict=viz_params["confusion_matrix_dict"],
test_accuracy=viz_params["test_accuracy"],
markdown=viz_params["markdown"],
)
return viz_obj.output_dict
@pytest.mark.parametrize(
"viz_key",
[
"confusion_matrix_dict",
"test_accuracy",
"markdown",
],
)
def test_invalid_type_viz_params(viz_params, viz_key): #pylint: disable=redefined-outer-name
"""Test visualization for invalid parameter type."""
viz_params[viz_key] = "dummy"
if viz_key == "test_accuracy":
expected_type = "<class 'float'>"
else:
expected_type = "<class 'dict'>"
expected_exception_msg = f"{viz_key} must be of type {expected_type} but" \
f" received as {type(viz_params[viz_key])}"
with pytest.raises(TypeError, match=expected_exception_msg):
generate_visualization(viz_params)
@pytest.mark.parametrize(
"viz_key",
[
"mlpipeline_ui_metadata",
"mlpipeline_metrics",
],
)
def test_invalid_type_metadata_path(viz_params, viz_key): #pylint: disable=redefined-outer-name
"""Test visualization with invalid metadata path."""
viz_params[viz_key] = ["dummy"]
expected_exception_msg = f"{viz_key} must be of type <class 'str'> " \
f"but received as {type(viz_params[viz_key])}"
with pytest.raises(TypeError, match=expected_exception_msg):
generate_visualization(viz_params)
@pytest.mark.parametrize(
"viz_key",
[
"mlpipeline_ui_metadata",
"mlpipeline_metrics",
],
)
def test_default_metadata_path(viz_params, viz_key): #pylint: disable=redefined-outer-name
"""Test visualization with default metadata path."""
viz_params[viz_key] = None
expected_output = {
"mlpipeline_ui_metadata": "/mlpipeline-ui-metadata.json",
"mlpipeline_metrics": "/mlpipeline-metrics.json",
}
with patch(
"test_visualization.generate_visualization",
return_value=expected_output,
):
output_dict = generate_visualization(viz_params)
assert output_dict == expected_output
def test_custom_metadata_path(viz_params, tmpdir): #pylint: disable=redefined-outer-name
"""Test visualization with custom metadata path."""
metadata_ui_path = os.path.join(str(tmpdir), "mlpipeline_ui_metadata.json")
metadata_metrics_path = os.path.join(str(tmpdir),
"mlpipeline_metrics.json")
viz_params["mlpipeline_ui_metadata"] = metadata_ui_path
viz_params["mlpipeline_metrics"] = metadata_metrics_path
output_dict = generate_visualization(viz_params)
assert output_dict is not None
assert output_dict["mlpipeline_ui_metadata"] == metadata_ui_path
assert output_dict["mlpipeline_metrics"] == metadata_metrics_path
assert os.path.exists(metadata_ui_path)
assert os.path.exists(metadata_metrics_path)
def test_setting_all_keys_to_none(viz_params): #pylint: disable=redefined-outer-name
"""Test visialization with all parameters set to None tyoe."""
for key in viz_params.keys():
viz_params[key] = None
expected_exception_msg = r"Any one of these keys should be set -" \
r" confusion_matrix_dict, test_accuracy, markdown"
with pytest.raises(ValueError, match=expected_exception_msg):
generate_visualization(viz_params)
def test_accuracy_metric(viz_params): #pylint: disable=redefined-outer-name
"""Test for getting proper accuracy metric."""
output_dict = generate_visualization(viz_params)
assert output_dict is not None
metadata_metric_file = viz_params["mlpipeline_metrics"]
assert os.path.exists(metadata_metric_file)
with open(metadata_metric_file) as file:
data = json.load(file)
assert data["metrics"][0]["numberValue"] == viz_params["test_accuracy"]
def test_markdown_storage_invalid_datatype(viz_params): #pylint: disable=redefined-outer-name
"""Test for passing invalid markdown storage datatype."""
viz_params["markdown"]["storage"] = ["test"]
expected_exception_msg = (
r"storage must be of type <class 'str'> but received as {}".format(
type(viz_params["markdown"]["storage"])))
with pytest.raises(TypeError, match=expected_exception_msg):
generate_visualization(viz_params)
def test_markdown_source_invalid_datatype(viz_params): #pylint: disable=redefined-outer-name
"""Test for passing invalid markdown source datatype."""
viz_params["markdown"]["source"] = "test"
expected_exception_msg = (
r"source must be of type <class 'dict'> but received as {}".format(
type(viz_params["markdown"]["source"])))
with pytest.raises(TypeError, match=expected_exception_msg):
generate_visualization(viz_params)
@pytest.mark.parametrize(
"markdown_key",
[
"source",
"storage",
],
)
def test_markdown_source_missing_key(viz_params, markdown_key): #pylint: disable=redefined-outer-name
"""Test with markdown source missing keys."""
del viz_params["markdown"][markdown_key]
expected_exception_msg = r"Missing mandatory key - {}".format(markdown_key)
with pytest.raises(ValueError, match=expected_exception_msg):
generate_visualization(viz_params)
def test_markdown_success(viz_params): #pylint: disable=redefined-outer-name
"""Test for successful markdown generation."""
output_dict = generate_visualization(viz_params)
assert output_dict is not None
assert "mlpipeline_ui_metadata" in output_dict
assert os.path.exists(output_dict["mlpipeline_ui_metadata"])
with open(output_dict["mlpipeline_ui_metadata"]) as file:
data = file.read()
assert "dummy_key" in data
assert "dummy_value" in data
def test_different_storage_value(viz_params): #pylint: disable=redefined-outer-name
"""Test for different storgae values for markdown."""
viz_params["markdown"]["storage"] = "inline"
output_dict = generate_visualization(viz_params)
assert output_dict is not None
assert "mlpipeline_ui_metadata" in output_dict
assert os.path.exists(output_dict["mlpipeline_ui_metadata"])
with open(output_dict["mlpipeline_ui_metadata"]) as file:
data = file.read()
assert "inline" in data
def test_multiple_metadata_appends(viz_params): #pylint: disable=redefined-outer-name
"""Test for multiple metadata append."""
if os.path.exists(viz_params["mlpipeline_ui_metadata"]):
os.remove(viz_params["mlpipeline_ui_metadata"])
if os.path.exists(viz_params["mlpipeline_metrics"]):
os.remove(viz_params["mlpipeline_metrics"])
generate_visualization(viz_params)
generate_visualization(viz_params)
output_dict = generate_visualization(viz_params)
assert output_dict is not None
assert "mlpipeline_ui_metadata" in output_dict
assert os.path.exists(output_dict["mlpipeline_ui_metadata"])
with open(output_dict["mlpipeline_ui_metadata"]) as file:
data = json.load(file)
assert len(data["outputs"]) == 3
@pytest.mark.parametrize(
"cm_key",
["actuals", "preds", "classes", "url"],
)
def test_confusion_matrix_invalid_types(
viz_params, #pylint: disable=redefined-outer-name
confusion_matrix_params, #pylint: disable=redefined-outer-name
cm_key):
"""Test for invalid type keys for confusion matrix."""
confusion_matrix_params[cm_key] = {"test": "dummy"}
viz_params["confusion_matrix_dict"] = confusion_matrix_params
with pytest.raises(TypeError):
generate_visualization(viz_params)
@pytest.mark.parametrize(
"cm_key",
["actuals", "preds", "classes", "url"],
)
def test_confusion_matrix_optional_check(
viz_params, #pylint: disable=redefined-outer-name
confusion_matrix_params, #pylint: disable=redefined-outer-name
cm_key):
"""Tests for passing confusion matrix keys as optional."""
confusion_matrix_params[cm_key] = {}
viz_params["confusion_matrix_dict"] = confusion_matrix_params
expected_error_msg = f"{cm_key} is not optional. " \
f"Received value: {confusion_matrix_params[cm_key]}"
with pytest.raises(ValueError, match=expected_error_msg):
generate_visualization(viz_params)
@pytest.mark.parametrize(
"cm_key",
["actuals", "preds", "classes", "url"],
)
def test_confusion_matrix_missing_check(
viz_params, #pylint: disable=redefined-outer-name
confusion_matrix_params, #pylint: disable=redefined-outer-name
cm_key):
"""Tests for missing confusion matrix keys."""
del confusion_matrix_params[cm_key]
viz_params["confusion_matrix_dict"] = confusion_matrix_params
expected_error_msg = f"Missing mandatory key - {cm_key}"
with pytest.raises(ValueError, match=expected_error_msg):
generate_visualization(viz_params)
def test_confusion_matrix_success(viz_params, confusion_matrix_params): #pylint: disable=redefined-outer-name
"""Test for successful confusion matrix generation."""
if os.path.exists(viz_params["mlpipeline_ui_metadata"]):
os.remove(viz_params["mlpipeline_ui_metadata"])
viz_params["confusion_matrix_dict"] = confusion_matrix_params
with mock.patch.object(Executor, "_upload_confusion_matrix_to_minio"):
output_dict = generate_visualization(viz_params)
assert output_dict is not None
assert "mlpipeline_ui_metadata" in output_dict
assert os.path.exists(output_dict["mlpipeline_ui_metadata"])
with open(output_dict["mlpipeline_ui_metadata"]) as file:
data = file.read()
assert "confusion_matrix" in data
|
|
# Copyright 2014 Nervana Systems Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pycuda.driver as drv
from pycuda.tools import context_dependent_memoize
from operator import mul
from math import ceil
import sys
if sys.version_info >= (3, 0):
from functools import reduce
class Layer(object):
def __init__(self, lib, dtype, N):
self.N = N
self.dtype = dtype
self.lib = lib
self.fprop_in = None
self.fprop_out = None
self.bprop_in = None
self.bprop_out = None
self.weights = None
self.updates = None
self.velocity = None
self.dimF2 = None
self.flops = 0
self.sizeO = 0
self.sizeF = 0
def init_activations(self):
self.fprop_out = self.lib.empty(self.dimO2, dtype=self.dtype)
self.act_stats1 = self.lib.empty((self.dimO2[0],1), dtype=np.float32)
self.act_stats2 = self.act_stats1[0:1,0:1]
def init_deltas(self, shared=None):
if shared is None:
self.bprop_in = self.lib.empty(self.dimO2, dtype=self.dtype)
else:
self.bprop_in = shared.share(self.dimO2)
def init_weights(self, loc=0.0, scale=0.1, shared=None):
if self.dimF2 is not None:
weights = np.random.normal(loc, scale, self.dimF2)
self.weights = self.lib.array(weights, dtype=self.dtype)
self.velocity = self.lib.zeros(self.dimF2, dtype=self.dtype)
if shared is None:
self.updates = self.lib.empty(self.dimF2, dtype=self.dtype)
else:
self.updates = shared.share(self.dimF2)
self.weight_stats1 = self.lib.empty((self.dimF2[0],1), dtype=np.float32)
self.weight_stats2 = self.weight_stats1[0:1,0:1]
def connect(self, prev_layer):
if prev_layer is not None:
self.fprop_in = prev_layer.fprop_out
self.bprop_out = prev_layer.bprop_in
def reduction_factor(self):
return 1.0
def fprop(self): pass
def bprop(self): pass
def update(self, momentum, learning_rate): pass
# fprop relu happens inside of the conv and gemm kernels
def bprop_relu(self):
self.bprop_in *= self.fprop_out > 0
def grad_descent_momentum(self, momentum, learning_rate):
self.velocity[:] = self.velocity*momentum - self.updates*learning_rate
self.weights += self.velocity
def get_activation_mean(self):
return self._get_mean(self.fprop_out, self.act_stats1, self.act_stats2)
def get_delta_mean(self, mean=False):
return self._get_mean(self.bprop_in, self.act_stats1, self.act_stats2)
def get_update_mean(self, mean=False):
if self.dimF2 is not None:
return self._get_mean(self.updates, self.weight_stats1, self.weight_stats2)
return self._get_mean(self.bprop_in, self.act_stats1, self.act_stats2)
def get_weight_mean(self, mean=False):
if self.dimF2 is not None:
return self._get_mean(self.weights, self.weight_stats1, self.weight_stats2)
def get_activation_max(self):
return self._get_max(self.fprop_out, self.act_stats1, self.act_stats2)
def get_delta_max(self, mean=False):
return self._get_max(self.bprop_in, self.act_stats1, self.act_stats2)
def get_update_max(self, mean=False):
if self.dimF2 is not None:
return self._get_max(self.updates, self.weight_stats1, self.weight_stats2)
def get_weight_max(self, mean=False):
if self.dimF2 is not None:
return self._get_max(self.weights, self.weight_stats1, self.weight_stats2)
def _get_mean(self, ary, buf1, buf2):
return float(self.lib.mean(abs(ary), partial=buf1, out=buf2).get()[0,0])
def _get_max(self, ary, buf1, buf2):
return float(self.lib.max(abs(ary), partial=buf1, out=buf2).get()[0,0])
class DataLayer(Layer):
def __init__(self, lib, dtype, N, C, D=1, H=1, W=1):
super(DataLayer, self).__init__(lib, dtype, N)
self.C = C
self.K = C
self.M = D
self.P = H
self.Q = W
self.DHW = (D,H,W)
self.dimO2 = (C*D*H*W,N)
def init_data(self, ary):
self.fprop_out.set(ary)
def init_deltas(self, shared=None): pass
def init_weights(self, loc=0.0, scale=0.1, shared=None): pass
def __str__(self):
return "DataLayer: NCK: (%d, %d, %d) DHW:%s" % (self.N, self.C, self.K, self.DHW)
class FullLayer(Layer):
def __init__(self, lib, dtype, N, nIn, nOut, fprop_size=None, bprop_size=None):
super(FullLayer, self).__init__(lib, dtype, N)
self.nIn = nIn
self.nOut = nOut
self.flops = N * nIn * nOut * 2.0
self.dimF2 = (nOut, nIn)
self.dimO2 = (nOut, N)
self.sizeO = nOut * N
self.sizeF = nIn * nOut
self.fprop_size = fprop_size
self.bprop_size = bprop_size
def fprop(self):
self.lib.dot(self.weights, self.fprop_in, self.fprop_out, relu=True, size=self.fprop_size)
def bprop(self):
self.bprop_relu()
self.lib.dot(self.weights.T, self.bprop_in, self.bprop_out, size=self.bprop_size)
def update(self, momentum, learning_rate):
self.lib.dot(self.bprop_in, self.fprop_in.T, self.updates)
self.grad_descent_momentum(momentum, learning_rate)
def __str__(self):
return "FullLayer: N, nIn, nOut: (%d, %d, %d)" % (self.N, self.nIn, self.nOut)
class ConvLayer(Layer):
def __init__(self, lib, dtype,
N, C, K,
D=1, H=1, W=1,
T=1, R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1,
grid_P=0, grid_Q=0, update_size=None):
super(ConvLayer, self).__init__(lib, dtype, N)
assert N % 8 == 0, "N dim must be multiple of 8"
assert K % 8 == 0, "K dim must be multiple of 8"
# Compute the output spatial dimensions
M = int(ceil(float(D - T + 1 + 2*pad_d) / str_d))
#if not P:
P = int(ceil(float(H - R + 1 + 2*pad_h) / str_h))
#if not Q:
Q = int(ceil(float(W - S + 1 + 2*pad_w) / str_w))
self.C = C
self.K = K
self.M = M
self.P = P
self.Q = Q
self.NCK = (N,C,K)
self.TRS = (T,R,S)
self.DHW = (D,H,W)
self.MPQ = (M,P,Q)
self.padding = (pad_d, pad_h, pad_w)
self.strides = (str_d, str_h, str_w)
self.dimI = (C,D,H,W,N)
self.dimF = (C,T,R,S,K)
self.dimO = (K,M,P,Q,N)
self.dimI2 = (C*D*H*W,N)
self.dimF2 = (C*T*R*S,K)
self.dimO2 = (K*M*P*Q,N)
self.sizeI = reduce(mul, self.dimI, 1)
self.sizeF = reduce(mul, self.dimF, 1)
self.sizeO = reduce(mul, self.dimO, 1)
self.nOut = reduce(mul, self.MPQ, 1) * K
# precompute some multiplications for fast constant memory access
WN = W*N
HWN = H*WN
DHWN = D*HWN
RS = R*S
RST = T*RS
CRST = C*RST
PQ = P*Q
PM = P*M
PQM = M*PQ
QN = Q*N
PQN = P*QN
MPQN = M*PQN
# I can easily get the kernels working with larger values here..
# But this is what version 1 is coded to support.
assert PQM < 2**16, "Integer division is faster with 16bit numerators"
# Kernels can be recoded to support 32bit numerators at
# some performance loss.
assert CRST+8 < 2**16, "Integer division is faster with 16bit numerators"
# precompute grid dimensions
grid_N64 = N // 64 + (N % 64 != 0)
grid_K64 = K // 64 + (K % 64 != 0)
grid_C64 = CRST // 64 + (CRST % 64 != 0)
grid_N128 = N // 128 + (N % 128 != 0)
grid_K128 = K // 128 + (K % 128 != 0)
grid_C128 = CRST // 128 + (CRST % 128 != 0)
#TODO: add more 128x128 kernels for better performance at fp32.
self.fprop_grid = (PQM, grid_K64, grid_N64)
self.bprop_grid = (PQM, grid_C128, grid_N64)
self.fprop_block = (64, 1, 1)
self.bprop_block = (128, 1, 1)
self.fprop_size = "K64_N64"
self.bprop_size = "C128_N64"
#TODO: tune this further
if (update_size is None or update_size == "C64_K64" or update_size == "C128_K64") and \
(CRST <= 64 or K <= 64 or (K % 64 == 0 and K % 128 != 0)):
if self.dtype is np.float32:
self.updat_size = "C128_K64"
updat_grid = [0, grid_C128, grid_K64]
updat_block = 128
else:
self.updat_size = "C64_K64"
updat_grid = [0, grid_C64, grid_K64]
updat_block = 64
else:
self.updat_size = "C128_K128"
updat_grid = [0, grid_C128, grid_K128]
updat_block = 256
if grid_P == 0 or grid_Q == 0:
# Performance seems good with at least 4096 total threads per SM
# More threads might be faster but accuracy starts dropping off.
# Cap grid_P*grid_Q at 64 for fp16.
# TODO: explore L2 utilization here:
if self.dtype is np.float16:
inc_P = False
grid_P = 1
grid_Q = 1
grid_O = updat_grid[1] * updat_grid[2] * M * updat_block
thresh = _get_sm_count() * 4096
while grid_O * grid_P * grid_Q < thresh and \
grid_P <= P and grid_Q <= Q and \
grid_P * grid_Q < 64:
if inc_P:
grid_P += 1
else:
grid_Q += 1
inc_P = not inc_P
# When not concerned about accumulation accuracy just unroll things a bit
# but maximize the distribution. This has the effect of better utilizing the L2.
else:
grid_P = P
grid_Q = Q // 4
# TitanX optimization: make grid multiple of 24 for small grids
# TODO: explore L2 utilization here:
# TODO: add 980, 750, etc optimizations
if _get_sm_count() == 24:
grid_PQ = grid_P * grid_Q
if grid_PQ < 30:
grid_P = 6
grid_Q = 4
elif grid_PQ < 54:
grid_P = 8
grid_Q = 6
elif grid_PQ < 78:
grid_P = 9
grid_Q = 8
elif grid_PQ <= 108:
grid_P = 12
grid_Q = 8
if grid_P >= P: grid_P = P
if grid_Q >= Q: grid_Q = Q
grid_PQ = grid_P * grid_Q
grid_PQM = updat_grid[0] = grid_PQ * M
self.updat_grid = tuple(updat_grid)
self.updat_block = (updat_block,1,1)
# precompute the magic numbers and shift amounts for integer division
magic_RST = _magic32(CRST+8, RST)
magic_RS = _magic32(RST+32, RS)
magic_S = _magic32(RS+32, S)
magic_PQ = _magic32(PQM, PQ)
magic_Q = _magic32(PQ, Q)
magic_PQu = _magic32(grid_PQM, grid_PQ)
magic_Qu = _magic32(grid_PQ, grid_Q)
# generate the convolution kernel args for fprop and bprop
self.kernel_args = _flatten([
N, K, D, H, W, WN, HWN, DHWN,
C, CRST, RST, magic_RST, RS, magic_RS, S, magic_S,
pad_d, pad_h, pad_w, str_d, str_h, str_w,
P, Q, PQ, QN, PQN, MPQN, magic_Q, magic_PQ,
grid_P, grid_Q, grid_PQ])
# update uses slightly different args
self.update_args = _flatten([
N, K, D, H, W, WN, HWN, DHWN,
C, CRST, RST, magic_RST, RS, magic_RS, S, magic_S,
pad_d, pad_h, pad_w, str_d, str_h, str_w,
P, Q, PQ, QN, PQN, MPQN, magic_Qu, magic_PQu,
grid_P, grid_Q, grid_PQ])
# shared lookup table size
self.lut_size = (RST // 32 + (RST % 32 != 0)) * 32 * 4
# flop count for benchmarking
self.flops = PQM * K * N * CRST * 2.0
def fprop(self):
self.lib.fprop_conv(self, self.fprop_in, self.weights, self.fprop_out, relu=True)
def bprop(self):
self.bprop_relu()
if self.bprop_out is not None:
self.lib.bprop_conv(self, self.weights, self.bprop_in, self.bprop_out)
def update(self, momentum, learning_rate):
self.lib.update_conv(self, self.fprop_in, self.bprop_in, self.updates)
self.grad_descent_momentum(momentum, learning_rate)
def __str__(self):
return "ConvLayer: NCK: (%d, %d, %d) DHW:%s TRS:%s MPQ:%s" % \
(self.N, self.C, self.K, self.DHW, self.TRS, self.MPQ)
# Add Deconv class
class DeconvLayer(Layer):
def __init__(self, lib, dtype,
N, C, K,
P, Q,
R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1,
grid_P=0, grid_Q=0, update_size=None):
super(DeconvLayer, self).__init__(lib, dtype, N)
assert N % 8 == 0, "N dim must be multiple of 8"
assert K % 8 == 0, "K dim must be multiple of 8"
# Set T, M and D to be consts.
T = 1
M = 1
D = 1
# Cannot get exact, e.g. because not unique
H = (P-1) * str_h - 2 * pad_h + R
W = (Q-1) * str_w - 2 * pad_w + S
# Add below to get H and W tracked
self.H = H
self.W = W
self.C = C
self.K = K
self.M = M
self.P = P
self.Q = Q
self.NCK = (N,C,K)
self.TRS = (T,R,S)
self.DHW = (D,H,W)
self.MPQ = (M,P,Q)
self.padding = (pad_d, pad_h, pad_w)
self.strides = (str_d, str_h, str_w)
# Did not change the names of dimI, dimO, etc. even though dimI is now technically the
# dimension of the output
self.dimI = (C,D,H,W,N)
self.dimF = (C,T,R,S,K)
self.dimO = (K,M,P,Q,N)
self.dimI2 = (C*D*H*W,N)
self.dimF2 = (C*T*R*S,K)
self.dimO2 = (K*M*P*Q,N)
self.sizeI = reduce(mul, self.dimI, 1)
self.sizeF = reduce(mul, self.dimF, 1)
self.sizeO = reduce(mul, self.dimO, 1)
# nOut has to change because P and Q are now the inputs
self.nOut = reduce(mul, self.DHW, 1) * C
# precompute some multiplications for fast constant memory access
WN = W*N
HWN = H*WN
DHWN = D*HWN
RS = R*S
RST = T*RS
CRST = C*RST
PQ = P*Q
PM = P*M
PQM = M*PQ
QN = Q*N
PQN = P*QN
MPQN = M*PQN
# I can easily get the kernels working with larger values here..
# But this is what version 1 is coded to support.
assert PQM < 2**16, "Integer division is faster with 16bit numerators"
# Kernels can be recoded to support 32bit numerators at
# some performance loss.
assert CRST+8 < 2**16, "Integer division is faster with 16bit numerators"
# precompute grid dimensions
grid_N64 = N // 64 + (N % 64 != 0)
grid_K64 = K // 64 + (K % 64 != 0)
grid_C64 = CRST // 64 + (CRST % 64 != 0)
grid_N128 = N // 128 + (N % 128 != 0)
grid_K128 = K // 128 + (K % 128 != 0)
grid_C128 = CRST // 128 + (CRST % 128 != 0)
#TODO: add more 128x128 kernels for better performance at fp32.
self.fprop_grid = (PQM, grid_K64, grid_N64)
self.bprop_grid = (PQM, grid_C128, grid_N64)
self.fprop_block = (64, 1, 1)
self.bprop_block = (128, 1, 1)
self.fprop_size = "K64_N64"
self.bprop_size = "C128_N64"
#TODO: tune this further
if (update_size is None or update_size == "C64_K64" or update_size == "C128_K64") and \
(CRST <= 64 or K <= 64 or (K % 64 == 0 and K % 128 != 0)):
if self.dtype is np.float32:
self.updat_size = "C128_K64"
updat_grid = [0, grid_C128, grid_K64]
updat_block = 128
else:
self.updat_size = "C64_K64"
updat_grid = [0, grid_C64, grid_K64]
updat_block = 64
else:
self.updat_size = "C128_K128"
updat_grid = [0, grid_C128, grid_K128]
updat_block = 256
if grid_P == 0 or grid_Q == 0:
# Performance seems good with at least 4096 total threads per SM
# More threads might be faster but accuracy starts dropping off.
# Cap grid_P*grid_Q at 64 for fp16.
# TODO: explore L2 utilization here:
if self.dtype is np.float16:
inc_P = False
grid_P = 1
grid_Q = 1
grid_O = updat_grid[1] * updat_grid[2] * M * updat_block
thresh = _get_sm_count() * 4096
while grid_O * grid_P * grid_Q < thresh and \
grid_P <= P and grid_Q <= Q and \
grid_P * grid_Q < 64:
if inc_P:
grid_P += 1
else:
grid_Q += 1
inc_P = not inc_P
# When not concerned about accumulation accuracy just unroll things a bit
# but maximize the distribution. This has the effect of better utilizing the L2.
else:
grid_P = P
grid_Q = Q // 4
# TitanX optimization: make grid multiple of 24 for small grids
# TODO: explore L2 utilization here:
# TODO: add 980, 750, etc optimizations
if _get_sm_count() == 24:
grid_PQ = grid_P * grid_Q
if grid_PQ < 30:
grid_P = 6
grid_Q = 4
elif grid_PQ < 54:
grid_P = 8
grid_Q = 6
elif grid_PQ < 78:
grid_P = 9
grid_Q = 8
elif grid_PQ <= 108:
grid_P = 12
grid_Q = 8
if grid_P >= P: grid_P = P
if grid_Q >= Q: grid_Q = Q
grid_PQ = grid_P * grid_Q
grid_PQM = updat_grid[0] = grid_PQ * M
self.updat_grid = tuple(updat_grid)
self.updat_block = (updat_block,1,1)
# precompute the magic numbers and shift amounts for integer division
magic_RST = _magic32(CRST+8, RST)
magic_RS = _magic32(RST+32, RS)
magic_S = _magic32(RS+32, S)
magic_PQ = _magic32(PQM, PQ)
magic_Q = _magic32(PQ, Q)
magic_PQu = _magic32(grid_PQM, grid_PQ)
magic_Qu = _magic32(grid_PQ, grid_Q)
# generate the convolution kernel args for fprop and bprop
self.kernel_args = _flatten([
N, K, D, H, W, WN, HWN, DHWN,
C, CRST, RST, magic_RST, RS, magic_RS, S, magic_S,
pad_d, pad_h, pad_w, str_d, str_h, str_w,
P, Q, PQ, QN, PQN, MPQN, magic_Q, magic_PQ,
grid_P, grid_Q, grid_PQ])
# update uses slightly different args
self.update_args = _flatten([
N, K, D, H, W, WN, HWN, DHWN,
C, CRST, RST, magic_RST, RS, magic_RS, S, magic_S,
pad_d, pad_h, pad_w, str_d, str_h, str_w,
P, Q, PQ, QN, PQN, MPQN, magic_Qu, magic_PQu,
grid_P, grid_Q, grid_PQ])
# shared lookup table size
self.lut_size = (RST // 32 + (RST % 32 != 0)) * 32 * 4
# flop count for benchmarking
self.flops = PQM * K * N * CRST * 2.0
def fprop(self):
self.lib.fprop_conv(self, self.fprop_in, self.weights, self.fprop_out, relu=True)
def bprop(self):
self.bprop_relu()
if self.bprop_out is not None:
self.lib.bprop_conv(self, self.weights, self.bprop_in, self.bprop_out)
def update(self, momentum, learning_rate):
self.lib.update_conv(self, self.fprop_in, self.bprop_in, self.updates)
self.grad_descent_momentum(momentum, learning_rate)
def __str__(self):
return "DeconvLayer: NCK: (%d, %d, %d) DHW:%s TRS:%s MPQ:%s" % \
(self.N, self.C, self.K, self.DHW, self.TRS, self.MPQ)
class PoolLayer(Layer):
def __init__(self, lib, dtype,
op, N, C,
D=1, H=1, W=1,
J=1, T=1, R=1, S=1,
pad_j=0, pad_d=0, pad_h=0, pad_w=0,
str_j=None, str_d=None, str_h=None, str_w=None):
super(PoolLayer, self).__init__(lib, dtype, N)
# default to non-overlapping
if str_j is None: str_j = J
if str_d is None: str_d = T
if str_h is None: str_h = R
if str_w is None: str_w = S
if str_j < J or str_d < T or str_h < R or str_w < S:
self.overlap = ceil(float(J)/str_j) * \
ceil(float(T)/str_d) * \
ceil(float(R)/str_h) * \
ceil(float(S)/str_w)
else:
self.overlap = 0.0
# Compute the output dimensions
K = int(ceil(float(C - J + 1 + 2*pad_j) / str_j))
M = int(ceil(float(D - T + 1 + 2*pad_d) / str_d))
P = int(ceil(float(H - R + 1 + 2*pad_h) / str_h))
Q = int(ceil(float(W - S + 1 + 2*pad_w) / str_w))
self.op = op
self.C = C
self.K = K
self.M = M
self.P = P
self.Q = Q
self.JTRS = (J,T,R,S)
self.DHW = (D,H,W)
self.MPQ = (M,P,Q)
self.padding = (pad_j, pad_d, pad_h, pad_w)
self.strides = (str_j, str_d, str_h, str_w)
self.dimI = (C,D,H,W,N)
self.dimO = (K,M,P,Q,N)
self.dimF2 = None
self.dimI2 = (C*D*H*W,N)
self.dimO2 = (K*M*P*Q,N)
self.sizeI = reduce(mul, self.dimI, 1)
self.sizeO = reduce(mul, self.dimO, 1)
self.nOut = reduce(mul, self.MPQ, 1) * K
# precompute some multiplications for fast constant memory access
WN = W*N
HWN = H*WN
DHWN = D*HWN
RS = R*S
RST = T*RS
JRST = J*RST
QN = Q*N
PM = P*M
PQN = P*QN
MPQN = M*PQN
assert JRST <= N or N >= 32, "Edge case not currently implemented"
assert JRST+32 < 2**16, "Integer division is faster with 16bit numerators"
# precompute the magic numbers and shift amounts for integer division
magic_RST = _magic32(JRST+32, RST)
magic_RS = _magic32( RST+32, RS)
magic_S = _magic32( RS+32, S)
magic_P = _magic32(PM, P)
# generate the convolution kernel args for all three operations
self.kernel_args = _flatten([
N, W, H, D, C, WN, HWN, DHWN,
P, magic_P, QN, PQN, MPQN,
pad_j, pad_d, pad_h, pad_w,
str_j, str_d, str_h, str_w,
S, RS, RST, JRST, magic_S, magic_RS, magic_RST, self.overlap])
# precompute grid dimensions
self.grid = (Q, PM, K)
self.block = (N, 1, 1)
# shared lookup table size
self.lut_size = (JRST // 32 + (JRST % 32 != 0)) * 32 * 4
def fprop(self):
self.lib.fprop_pool(self, self.fprop_in, self.fprop_out)
def bprop(self):
self.lib.bprop_pool(self, self.fprop_in, self.bprop_in, self.bprop_out)
def reduction_factor(self):
return float(self.dimI2[0]) / float(self.dimO2[0])
def __str__(self):
return "PoolLayer: NCK: (%d, %d, %d) DHW:%s JTRS:%s MPQ:%s op: %s " % \
(self.N, self.C, self.K, self.DHW, self.JTRS, self.MPQ, self.op)
# Magic numbers and shift amounts for integer division
# Suitable for when nmax*magic fits in 32 bits
# Shamelessly pulled directly from:
# http://www.hackersdelight.org/hdcodetxt/magicgu.py.txt
def _magic32(nmax, d):
nc = ((nmax + 1)//d)*d - 1
nbits = len(bin(nmax)) - 2
for p in range(0, 2*nbits + 1):
if 2**p > nc*(d - 1 - (2**p - 1)%d):
m = (2**p + d - 1 - (2**p - 1)%d)//d
return (m, p)
raise ValueError("Can't find magic number for division")
# flatten a nested list of lists or values
def _flatten(lst):
return sum( ([x] if not isinstance(x, (list,tuple))
else _flatten(x) for x in lst), [] )
@context_dependent_memoize
def _get_sm_count():
attributes = drv.Context.get_device().get_attributes()
return attributes[drv.device_attribute.MULTIPROCESSOR_COUNT]
|
|
import glob
import h5py
import mahotas as mh
import matplotlib.pyplot as plt
import numpy as np
import partition_comparison
import os
from scipy import ndimage as nd
import skimage.measure
import tifffile as tif
class Util(object):
@staticmethod
def read_cremi_section(DATADIR, z, verbose=True):
image = sorted(glob.glob(DATADIR+'image/*'))
probs = sorted(glob.glob(DATADIR+'prob/*'))
gold = sorted(glob.glob(DATADIR+'gold/*'))
rhoana = sorted(glob.glob(DATADIR+'rhoana/*'))
image = tif.imread(image[z])
prob = tif.imread(probs[z])
gold = tif.imread(gold[z])
rhoana = mh.imread(rhoana[z])
#convert ids from rgb to single channel
rhoana_single = np.zeros((rhoana.shape[0], rhoana.shape[1]), dtype=np.uint64)
rhoana_single[:, :] = rhoana[:,:,0]*256*256 + rhoana[:,:,1]*256 + rhoana[:,:,2]
# relabel the segmentations
gold = Util.relabel(gold)
rhoana_single = Util.relabel(rhoana_single)
return image, prob, gold, rhoana_single
@staticmethod
def read_section(path, z, verbose=True):
'''
'''
image = sorted(glob.glob(os.path.join(path, 'image', '*'+str(z)+'.png')))
mask = sorted(glob.glob(os.path.join(path, 'mask', '*'+str(z)+'.png')))
gold = sorted(glob.glob(os.path.join(path, 'gold', '*'+str(z)+'.png')))
rhoana = sorted(glob.glob(os.path.join(path, 'rhoana', '*'+str(z)+'.png')))
prob = sorted(glob.glob(os.path.join(path, 'prob', '*'+str(z)+'.tif')))
if verbose:
print 'Loading', os.path.basename(image[0])
image = mh.imread(image[0])
mask = mh.imread(mask[0]).astype(np.bool)
gold = mh.imread(gold[0])
rhoana = mh.imread(rhoana[0])
prob = tif.imread(prob[0])
#convert ids from rgb to single channel
rhoana_single = np.zeros((rhoana.shape[0], rhoana.shape[1]), dtype=np.uint64)
rhoana_single[:, :] = rhoana[:,:,0]*256*256 + rhoana[:,:,1]*256 + rhoana[:,:,2]
gold_single = np.zeros((gold.shape[0], gold.shape[1]), dtype=np.uint64)
gold_single[:, :] = gold[:,:,0]*256*256 + gold[:,:,1]*256 + gold[:,:,2]
# relabel the segmentations
gold_single = Util.relabel(gold_single)
rhoana_single = Util.relabel(rhoana_single)
#mask the rhoana output
rhoana_single[mask==0] = 0
return image, prob, mask, gold_single, rhoana_single
@staticmethod
def get_histogram(array):
'''
'''
return mh.fullhistogram(array.astype(np.uint64))
@staticmethod
def get_largest_label(array, ignore_zero=False):
'''
'''
hist = Util.get_histogram(array)
if ignore_zero:
hist[0] = 0
return np.argmax(hist)
@staticmethod
def normalize_labels(array):
'''
'''
return mh.labeled.relabel(array)
@staticmethod
def relabel(array):
relabeled_array = np.array(array)
relabeled_array = skimage.measure.label(array).astype(np.uint64)
# relabeled_array[relabeled_array==0] = relabeled_array.max()
return Util.normalize_labels(relabeled_array)[0]
@staticmethod
def load_colormap(f):
'''
'''
hdf5_file = h5py.File(f, 'r')
list_of_names = []
hdf5_file.visit(list_of_names.append)
return hdf5_file[list_of_names[0]].value
@staticmethod
def colorize(segmentation):
'''
'''
cm_path = os.path.expanduser('~/data/colorMap.hdf5')
cm = Util.load_colormap(cm_path)
segmentation = cm[segmentation % len(cm)]
return segmentation
@staticmethod
def view(array,color=True,large=False,crop=False, text=None, no_axis=True, file=''):
if large:
figsize = (10,10)
else:
figsize = (3,3)
fig = plt.figure(figsize=figsize)
if crop:
array = mh.croptobbox(array)
if text:
text = '\n\n\n'+str(text)
fig.text(0,1,text)
if no_axis:
plt.axis('off')
if color:
plt.imshow(Util.colorize(array), picker=True)
iii = Util.colorize(array)
else:
plt.imshow(array, cmap='gray', picker=True)
iii = array
if file!='':
mh.imsave(file, iii.astype(np.uint8))
#plt.savefig(file)
@staticmethod
def view_rgba(patch, text=''):
'''
'''
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4)
text = ''+str(text)
fig.text(0,0.5,text)
image = patch[0]
prob = patch[1]
if patch.shape[0] > 2:
binary = patch[2]
if patch.shape[0] > 3:
border = patch[3]
ax1.axis('off')
ax1.imshow(image, cmap='gray')
ax2.axis('off')
ax2.imshow(prob, cmap='gray')
if patch.shape[0] > 2:
ax3.axis('off')
ax3.imshow(binary, cmap='gray')
if patch.shape[0] > 3:
ax4.axis('off')
ax4.imshow(border, cmap='gray')
@staticmethod
def view_labels(array, labels, crop=True, large=True, return_it=False):
if type(labels) != type(list()):
labels = [labels]
out = np.zeros(array.shape)
for l in labels:
l_arr = Util.threshold(array, l)
out[l_arr == 1] = out.max()+1
if crop:
out = mh.croptobbox(out)
if large:
figsize = (10,10)
else:
figsize = (3,3)
if return_it:
return out
fig = plt.figure(figsize=figsize)
plt.imshow(out)
@staticmethod
def propagate_max_overlap(rhoana, gold):
out = np.array(rhoana)
rhoana_labels = Util.get_histogram(rhoana.astype(np.uint64))
for l,k in enumerate(rhoana_labels):
if l == 0 or k==0:
# ignore 0 since rhoana does not have it
continue
values = gold[rhoana == l]
largest_label = Util.get_largest_label(values.astype(np.uint64))
out[rhoana == l] = largest_label # set the largest label from gold here
return out
@staticmethod
def grab_neighbors(array, label):
thresholded_array = Util.threshold(array, label)
thresholded_array_dilated = mh.dilate(thresholded_array.astype(np.uint64))
copy = np.array(array)
copy[thresholded_array_dilated != thresholded_array_dilated.max()] = 0
copy[thresholded_array == 1] = 0
copy_hist = Util.get_histogram(copy.astype(np.uint64))
copy_hist[0] = 0 # ignore zeros
# copy_hist[label] = 0 # ignore ourselves
return np.where(copy_hist>0)[0]
@staticmethod
def threshold(array, value):
'''
'''
output_array = np.zeros(array.shape)
output_array[array == value] = 1
return output_array
@staticmethod
def frame_image(image, shape=(75,75)):
framed = np.array(image)
framed[:shape[0]/2+1] = 0
framed[-shape[0]/2+1:] = 0
framed[:,0:shape[0]/2+1] = 0
framed[:,-shape[0]/2+1:] = 0
return framed
@staticmethod
def fill(data, invalid=None):
"""
Replace the value of invalid 'data' cells (indicated by 'invalid')
by the value of the nearest valid data cell
Input:
data: numpy array of any dimension
invalid: a binary array of same shape as 'data'.
data value are replaced where invalid is True
If None (default), use: invalid = np.isnan(data)
Output:
Return a filled array.
"""
if invalid is None: invalid = np.isnan(data)
ind = nd.distance_transform_edt(invalid,
return_distances=False,
return_indices=True)
return data[tuple(ind)]
@staticmethod
def crop_by_bbox(array, bbox, offset=0):
return array[bbox[0]-offset:bbox[1]+offset, bbox[2]-offset:bbox[3]+offset]
@staticmethod
def vi(array1, array2):
'''
'''
return partition_comparison.variation_of_information(array1.ravel(), array2.ravel())
|
|
import aes.utils as U
import argparse
import keras.backend as K
import logging
import numpy as np
import os
import pickle as pk
from aes.models import Models
import keras.optimizers as opt
from keras.preprocessing import sequence
from keras.utils.vis_utils import plot_model
from time import time
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
logger = logging.getLogger(__name__)
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument(
'-tr',
'--train',
dest='train_path',
type=str,
metavar='<str>',
required=True,
help='The path to the training set')
parser.add_argument(
'-tu',
'--tune',
dest='dev_path',
type=str,
metavar='<str>',
required=True,
help='The path to the development set')
parser.add_argument(
'-ts',
'--test',
dest='test_path',
type=str,
metavar='<str>',
required=True,
help='The path to the test set')
parser.add_argument(
'-o',
'--out-dir',
dest='out_dir',
type=str,
metavar='<str>',
required=True,
help='The path to the output directory')
parser.add_argument(
'-p',
'--prompt',
dest='prompt_id',
type=int,
metavar='<int>',
required=False,
help='Promp ID for ASAP dataset. 0 means all prompts.')
parser.add_argument(
'-m',
'--model-type',
dest='model_type',
type=str,
metavar='<str>',
default='gate-matrix',
help='Model type (gate-positional|gate-matrix|gate-vector|concat|' +
'char-cnn|char-lstm|char-gru|char-rnn|' +
'word-cnn|word-lstm|word-gru|word-rnn)' + '(default=gate-matrix)')
parser.add_argument(
'--emb',
dest='emb_path',
type=str,
metavar='<str>',
help='The path to the word embeddings file (Word2Vec format)')
parser.add_argument(
'-v',
'--vocab-word-size',
dest='vocab_word_size',
type=int,
metavar='<int>',
default=4000,
help='Word vocab size (default=4000)')
parser.add_argument(
'--emb-dim',
dest='emb_dim',
type=int,
metavar='<int>',
default=50,
help='Embeddings dimension (default=50)')
parser.add_argument(
'-b',
'--batch-size',
dest='batch_size',
type=int,
metavar='<int>',
default=32,
help='Batch size (default=32)')
parser.add_argument(
'-e',
'--epochs',
dest='epochs',
type=int,
metavar='<int>',
default=50,
help='Embeddings dimension (default=50)')
parser.add_argument(
'-cpw',
'--char-per-word',
dest='char_per_word',
type=int,
metavar='<int>',
default=7,
help='Character per word. (default=7)')
parser.add_argument(
'-ccnn',
'--char-cnn-kernel',
dest='char_cnn_kernel',
type=int,
metavar='<int>',
default=3,
help='Character CNN kernel size. (default=3)')
parser.add_argument(
'-cnn',
'--cnn-kernel',
dest='cnn_kernel',
type=int,
metavar='<int>',
default=3,
help='CNN kernel size. (default=3)')
# Optional arguments
parser.add_argument(
'-cvp',
'--vocab-char-path',
dest='vocab_char_path',
type=str,
metavar='<str>',
help='(Optional) The path to the existing char vocab file (*.pkl)')
parser.add_argument(
'-vp',
'--vocab-path',
dest='vocab_word_path',
type=str,
metavar='<str>',
help='(Optional) The path to the existing vocab file (*.pkl)')
# Get all arguments
args = parser.parse_args()
train_path, dev_path, test_path, out_dir, \
prompt_id, model_type, emb_path, vocab_word_size, \
emb_dim, batch_size, epochs, \
char_cnn_kernel, cnn_kernel, \
char_per_word, vocab_char_path, vocab_word_path = \
args.train_path, args.dev_path, args.test_path, args.out_dir, \
args.prompt_id, args.model_type, args.emb_path, args.vocab_word_size, \
args.emb_dim, args.batch_size, args.epochs, \
args.char_cnn_kernel, args.cnn_kernel, \
args.char_per_word, args.vocab_char_path, args.vocab_word_path
if prompt_id == 2 and model_type in ['word-cnn', 'concat']:
np.random.seed(11)
elif prompt_id == 7 and model_type in ['word-cnn', 'concat']:
np.random.seed(113)
else:
np.random.seed(1234)
assert model_type in {
'gate-positional', 'gate-matrix', 'gate-vector', 'concat', 'char-cnn',
'char-lstm', 'char-gru', 'char-rnn', 'word-cnn', 'word-lstm', 'word-gru',
'word-rnn'
}
U.mkdir_p(out_dir + '/preds')
U.set_logger(out_dir)
U.print_args(args)
if 'word' in model_type:
char_per_word = 0
logger.info('char_per_word: ' + str(char_per_word))
if prompt_id:
from aes.evaluator import Evaluator
import aes.reader as dataset
else:
raise NotImplementedError
# Get data
(train_x_char, train_x, train_y, train_pmt), \
(dev_x_char, dev_x, dev_y, dev_pmt), \
(test_x_char, test_x, test_y, test_pmt), \
vocab_char, vocab_char_size, char_maxlen, \
vocab_word, vocab_word_size, word_maxlen, \
num_outputs = \
dataset.get_data(
paths=(train_path, dev_path, test_path),
prompt_id=prompt_id,
char_per_word=char_per_word,
vocab_char_path=vocab_char_path,
vocab_word_size=vocab_word_size,
vocab_word_path=vocab_word_path)
# Dump vocab
with open(out_dir + '/vocab_word.pkl', 'wb') as vocab_word_file:
pk.dump(vocab_word, vocab_word_file)
with open(out_dir + '/vocab_char.pkl', 'wb') as vocab_char_file:
pk.dump(vocab_char, vocab_char_file)
# Pad sequences for mini-batch processing for word level
logger.info('Processing word data')
train_x = sequence.pad_sequences(
train_x, maxlen=word_maxlen, padding='post', truncating='post')
dev_x = sequence.pad_sequences(
dev_x, maxlen=word_maxlen, padding='post', truncating='post')
test_x = sequence.pad_sequences(
test_x, maxlen=word_maxlen, padding='post', truncating='post')
# Pad sequences for mini-batch processing for char level
if 'word' not in model_type:
logger.info('Processing character data')
train_x_char = sequence.pad_sequences(
train_x_char, maxlen=char_maxlen, padding='post', truncating='post')
train_x_char = np.reshape(train_x_char, (len(train_x_char), -1))
dev_x_char = sequence.pad_sequences(
dev_x_char, maxlen=char_maxlen, padding='post', truncating='post')
dev_x_char = np.reshape(dev_x_char, (len(dev_x_char), -1))
test_x_char = sequence.pad_sequences(
test_x_char, maxlen=char_maxlen, padding='post', truncating='post')
test_x_char = np.reshape(test_x_char, (len(test_x_char), -1))
char_maxlen = len(test_x_char[0])
# Some statistics
train_y = np.array(train_y, dtype=K.floatx())
dev_y = np.array(dev_y, dtype=K.floatx())
test_y = np.array(test_y, dtype=K.floatx())
if prompt_id:
train_pmt = np.array(train_pmt, dtype='int32')
dev_pmt = np.array(dev_pmt, dtype='int32')
test_pmt = np.array(test_pmt, dtype='int32')
bincounts, mfs_list = U.bincounts(train_y)
with open('%s/bincounts.txt' % out_dir, 'w') as output_file:
for bincount in bincounts:
output_file.write(str(bincount) + '\n')
train_mean = train_y.mean(axis=0)
train_std = train_y.std(axis=0)
dev_mean = dev_y.mean(axis=0)
dev_std = dev_y.std(axis=0)
test_mean = test_y.mean(axis=0)
test_std = test_y.std(axis=0)
logger.info('Statistics:')
logger.info(' train_x shape: ' + str(np.array(train_x).shape))
logger.info(' dev_x shape: ' + str(np.array(dev_x).shape))
logger.info(' test_x shape: ' + str(np.array(test_x).shape))
if 'word' not in model_type:
logger.info(' train_x_char shape: ' + str(np.array(train_x_char).shape))
logger.info(' dev_x_char shape: ' + str(np.array(dev_x_char).shape))
logger.info(' test_x_char shape: ' + str(np.array(test_x_char).shape))
logger.info(' train_y shape: ' + str(train_y.shape))
logger.info(' dev_y shape: ' + str(dev_y.shape))
logger.info(' test_y shape: ' + str(test_y.shape))
logger.info(' train_y mean: %s, stdev: %s, MFC: %s' %
(str(train_mean), str(train_std), str(mfs_list)))
# Dev and test sets needs to be in the original scale for evaluation
dev_y_org = dev_y.astype(dataset.get_ref_dtype())
test_y_org = test_y.astype(dataset.get_ref_dtype())
# Convert scores to boundary of [0 1] for training and evaluation
# (loss calculation)
train_y = dataset.get_model_friendly_scores(train_y, train_pmt)
dev_y = dataset.get_model_friendly_scores(dev_y, dev_pmt)
test_y = dataset.get_model_friendly_scores(test_y, test_pmt)
# Building model
models = Models(prompt_id=prompt_id, initial_mean_value=train_y.mean(axis=0))
if model_type == 'gate-positional':
model = models.create_gate_positional_model(
char_cnn_kernel=char_cnn_kernel,
cnn_kernel=cnn_kernel,
emb_dim=emb_dim,
emb_path=emb_path,
vocab_word=vocab_word,
vocab_word_size=vocab_word_size,
word_maxlen=word_maxlen,
vocab_char_size=vocab_char_size,
char_maxlen=char_maxlen)
elif model_type == 'gate-matrix':
model = models.create_gate_matrix_model(
char_cnn_kernel=char_cnn_kernel,
cnn_kernel=cnn_kernel,
emb_dim=emb_dim,
emb_path=emb_path,
vocab_word=vocab_word,
vocab_word_size=vocab_word_size,
word_maxlen=word_maxlen,
vocab_char_size=vocab_char_size,
char_maxlen=char_maxlen)
elif model_type == 'gate-vector':
model = models.create_gate_vector_model(
char_cnn_kernel=char_cnn_kernel,
cnn_kernel=cnn_kernel,
emb_dim=emb_dim,
emb_path=emb_path,
vocab_word=vocab_word,
vocab_word_size=vocab_word_size,
word_maxlen=word_maxlen,
vocab_char_size=vocab_char_size,
char_maxlen=char_maxlen)
elif model_type == 'concat':
model = models.create_concat_model(
emb_dim=emb_dim,
emb_path=emb_path,
vocab_word=vocab_word,
vocab_word_size=vocab_word_size,
word_maxlen=word_maxlen,
vocab_char_size=vocab_char_size,
char_maxlen=char_maxlen)
elif model_type == 'char-cnn':
model = models.create_char_cnn_model(
emb_dim=emb_dim,
word_maxlen=word_maxlen,
vocab_char_size=vocab_char_size,
char_maxlen=char_maxlen)
elif model_type == 'char-lstm':
model = models.create_char_lstm_model(
emb_dim=emb_dim,
word_maxlen=word_maxlen,
vocab_char_size=vocab_char_size,
char_maxlen=char_maxlen)
elif model_type == 'char-gru':
model = models.create_char_gru_model(
emb_dim=emb_dim,
word_maxlen=word_maxlen,
vocab_char_size=vocab_char_size,
char_maxlen=char_maxlen)
elif model_type == 'char-rnn':
model = models.create_char_rnn_model(
emb_dim=emb_dim,
word_maxlen=word_maxlen,
vocab_char_size=vocab_char_size,
char_maxlen=char_maxlen)
elif model_type == 'word-cnn':
model = models.create_word_cnn_model(
emb_dim=emb_dim,
emb_path=emb_path,
vocab_word=vocab_word,
vocab_word_size=vocab_word_size,
word_maxlen=word_maxlen)
elif model_type == 'word-lstm':
model = models.create_word_lstm_model(
emb_dim=emb_dim,
emb_path=emb_path,
vocab_word=vocab_word,
vocab_word_size=vocab_word_size,
word_maxlen=word_maxlen)
elif model_type == 'word-gru':
model = models.create_word_gru_model(
emb_dim=emb_dim,
emb_path=emb_path,
vocab_word=vocab_word,
vocab_word_size=vocab_word_size,
word_maxlen=word_maxlen)
elif model_type == 'word-rnn':
model = models.create_word_rnn_model(
emb_dim=emb_dim,
emb_path=emb_path,
vocab_word=vocab_word,
vocab_word_size=vocab_word_size,
word_maxlen=word_maxlen)
model.compile(
loss='mean_squared_error',
optimizer=opt.RMSprop(
lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=10, clipvalue=0),
metrics=['mean_absolute_error'])
logger.info(model.summary())
plot_model(model, to_file=out_dir + '/model.png') # Plotting model
# Save model architecture
logger.info('Saving model architecture')
with open(out_dir + '/model_arch.json', 'w') as arch:
arch.write(model.to_json(indent=2))
logger.info(' Done')
# Evaluator
if model_type in ['gate-positional', 'gate-matrix', 'gate-vector', 'concat']:
evaluator = Evaluator(
model_type=model_type,
batch_size=batch_size,
dataset=dataset,
prompt_id=prompt_id,
out_dir=out_dir,
dev_x=[dev_x_char, dev_x],
test_x=[test_x_char, test_x],
dev_y=dev_y,
test_y=test_y,
dev_y_org=dev_y_org,
test_y_org=test_y_org)
elif 'char' in model_type:
evaluator = Evaluator(
model_type=model_type,
batch_size=batch_size,
dataset=dataset,
prompt_id=prompt_id,
out_dir=out_dir,
dev_x=dev_x_char,
test_x=test_x_char,
dev_y=dev_y,
test_y=test_y,
dev_y_org=dev_y_org,
test_y_org=test_y_org)
else:
evaluator = Evaluator(
model_type=model_type,
batch_size=batch_size,
dataset=dataset,
prompt_id=prompt_id,
out_dir=out_dir,
dev_x=dev_x,
test_x=test_x,
dev_y=dev_y,
test_y=test_y,
dev_y_org=dev_y_org,
test_y_org=test_y_org)
logger.info(
'-------------------------------------------------------------------------'
)
logger.info('Initial Evaluation:')
evaluator.evaluate(model=model, epoch=-1, print_info=True)
total_train_time = 0
total_eval_time = 0
for ii in range(epochs):
# Training
t0 = time()
if model_type in ['gate-positional', 'gate-matrix', 'gate-vector',
'concat']:
train_history = model.fit(
[train_x_char, train_x],
train_y,
batch_size=batch_size,
epochs=1,
verbose=0)
elif 'char' in model_type:
train_history = model.fit(
train_x_char, train_y, batch_size=batch_size, epochs=1, verbose=0)
else:
train_history = model.fit(
train_x, train_y, batch_size=batch_size, epochs=1, verbose=0)
tr_time = time() - t0
total_train_time += tr_time
# Evaluate
t0 = time()
evaluator.evaluate(model=model, epoch=ii)
evaluator_time = time() - t0
total_eval_time += evaluator_time
# Print information
train_loss = train_history.history['loss'][0]
train_metric = train_history.history['mean_absolute_error'][0]
logger.info('Epoch %d, train: %is, evaluation: %is' % (ii, tr_time,
evaluator_time))
logger.info('[Train] loss: %.4f, metric: %.4f' % (train_loss,
train_metric))
evaluator.print_info()
# Summary of the results
logger.info('Training: %i seconds in total' % total_train_time)
logger.info('Evaluation: %i seconds in total' % total_eval_time)
evaluator.print_final_info()
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import datetime
from oslo_config import cfg
import six
from st2common.constants.keyvalue import FULL_SYSTEM_SCOPE, FULL_USER_SCOPE, ALLOWED_SCOPES
from st2common.constants.keyvalue import SYSTEM_SCOPE, USER_SCOPE
from st2common.exceptions.keyvalue import CryptoKeyNotSetupException, InvalidScopeException
from st2common.log import logging
from st2common.util import isotime
from st2common.util import date as date_utils
from st2common.util.crypto import read_crypto_key, symmetric_encrypt, symmetric_decrypt
from st2common.models.api.base import BaseAPI
from st2common.models.system.keyvalue import UserKeyReference
from st2common.models.db.keyvalue import KeyValuePairDB
__all__ = [
'KeyValuePairAPI',
'KeyValuePairSetAPI'
]
LOG = logging.getLogger(__name__)
class KeyValuePairAPI(BaseAPI):
crypto_setup = False
model = KeyValuePairDB
schema = {
'type': 'object',
'properties': {
'id': {
'type': 'string'
},
"uid": {
"type": "string"
},
'name': {
'type': 'string'
},
'description': {
'type': 'string'
},
'value': {
'type': 'string',
'required': True
},
'secret': {
'type': 'boolean',
'required': False,
'default': False
},
'encrypted': {
'type': 'boolean',
'required': False,
'default': False
},
'scope': {
'type': 'string',
'required': False,
'default': FULL_SYSTEM_SCOPE
},
'expire_timestamp': {
'type': 'string',
'pattern': isotime.ISO8601_UTC_REGEX
},
# Note: Those values are only used for input
# TODO: Improve
'ttl': {
'type': 'integer'
}
},
'additionalProperties': False
}
@staticmethod
def _setup_crypto():
if KeyValuePairAPI.crypto_setup:
# Crypto already set up
return
LOG.info('Checking if encryption is enabled for key-value store.')
KeyValuePairAPI.is_encryption_enabled = cfg.CONF.keyvalue.enable_encryption
LOG.debug('Encryption enabled? : %s', KeyValuePairAPI.is_encryption_enabled)
if KeyValuePairAPI.is_encryption_enabled:
KeyValuePairAPI.crypto_key_path = cfg.CONF.keyvalue.encryption_key_path
LOG.info('Encryption enabled. Looking for key in path %s',
KeyValuePairAPI.crypto_key_path)
if not os.path.exists(KeyValuePairAPI.crypto_key_path):
msg = ('Encryption key file does not exist in path %s.' %
KeyValuePairAPI.crypto_key_path)
LOG.exception(msg)
LOG.info('All API requests will now send out BAD_REQUEST ' +
'if you ask to store secrets in key value store.')
KeyValuePairAPI.crypto_key = None
else:
KeyValuePairAPI.crypto_key = read_crypto_key(
key_path=KeyValuePairAPI.crypto_key_path
)
KeyValuePairAPI.crypto_setup = True
@classmethod
def from_model(cls, model, mask_secrets=True):
if not KeyValuePairAPI.crypto_setup:
KeyValuePairAPI._setup_crypto()
doc = cls._from_model(model, mask_secrets=mask_secrets)
if getattr(model, 'expire_timestamp', None) and model.expire_timestamp:
doc['expire_timestamp'] = isotime.format(model.expire_timestamp, offset=False)
encrypted = False
secret = getattr(model, 'secret', False)
if secret:
encrypted = True
if not mask_secrets and secret:
doc['value'] = symmetric_decrypt(KeyValuePairAPI.crypto_key, model.value)
encrypted = False
scope = getattr(model, 'scope', SYSTEM_SCOPE)
if scope:
doc['scope'] = scope
key = doc.get('name', None)
if (scope == USER_SCOPE or scope == FULL_USER_SCOPE) and key:
doc['user'] = UserKeyReference.get_user(key)
doc['name'] = UserKeyReference.get_name(key)
doc['encrypted'] = encrypted
attrs = {attr: value for attr, value in six.iteritems(doc) if value is not None}
return cls(**attrs)
@classmethod
def to_model(cls, kvp):
if not KeyValuePairAPI.crypto_setup:
KeyValuePairAPI._setup_crypto()
kvp_id = getattr(kvp, 'id', None)
name = getattr(kvp, 'name', None)
description = getattr(kvp, 'description', None)
value = kvp.value
secret = False
if getattr(kvp, 'ttl', None):
expire_timestamp = (date_utils.get_datetime_utc_now() +
datetime.timedelta(seconds=kvp.ttl))
else:
expire_timestamp = None
secret = getattr(kvp, 'secret', False)
if secret:
if not KeyValuePairAPI.crypto_key:
msg = ('Crypto key not found in %s. Unable to encrypt value for key %s.' %
(KeyValuePairAPI.crypto_key_path, name))
raise CryptoKeyNotSetupException(msg)
value = symmetric_encrypt(KeyValuePairAPI.crypto_key, value)
scope = getattr(kvp, 'scope', FULL_SYSTEM_SCOPE)
if scope not in ALLOWED_SCOPES:
raise InvalidScopeException('Invalid scope "%s"! Allowed scopes are %s.' % (
scope, ALLOWED_SCOPES)
)
model = cls.model(id=kvp_id, name=name, description=description, value=value,
secret=secret, scope=scope,
expire_timestamp=expire_timestamp)
return model
class KeyValuePairSetAPI(KeyValuePairAPI):
"""
API model for key value set operations.
"""
schema = copy.deepcopy(KeyValuePairAPI.schema)
schema['properties']['ttl'] = {
'description': 'Items TTL',
'type': 'integer'
}
schema['properties']['user'] = {
'description': ('User to which the value should be scoped to. Only applicable to '
'scope == user'),
'type': 'string',
'default': None
}
|
|
"""
Core MozTrap models (Product).
"""
import uuid
from django.core.exceptions import ValidationError
from django.db import models
from pkg_resources import parse_version
from preferences.models import Preferences
from ..environments.models import HasEnvironmentsModel
from ..mtmodel import MTModel, MTManager, TeamModel
from ..auth.models import Role, User
class Product(MTModel, TeamModel):
name = models.CharField(db_index=True, max_length=100)
description = models.TextField(blank=True)
def __unicode__(self):
return self.name
class Meta:
permissions = [
("manage_products", "Can add/edit/delete products."),
("manage_users", "Can add/edit/delete user accounts."),
]
ordering = ["name"]
def clone(self, *args, **kwargs):
"""
Clone Product, with team.
"""
kwargs.setdefault("cascade", ["team"])
overrides = kwargs.setdefault("overrides", {})
overrides.setdefault("name", "Cloned: {0}".format(self.name))
return super(Product, self).clone(*args, **kwargs)
def reorder_versions(self, update_instance=None):
"""
Reorder versions of this product, saving new order in db.
If an ``update_instance`` is given, update it with new order and
``latest`` flag.
"""
ordered = sorted(self.versions.all(), key=by_version)
for i, version in enumerate(ordered, 1):
version.order = i
version.latest = (i == len(ordered))
version.save(force_update=True, skip_reorder=True, notrack=True)
if version == update_instance:
update_instance.order = version.order
update_instance.latest = version.latest
update_instance.cc_version += 1
# now we have to update latest caseversions too, @@@ too slow?
for case in self.cases.all():
case.set_latest_version()
class ProductVersion(MTModel, TeamModel, HasEnvironmentsModel):
product = models.ForeignKey(Product, related_name="versions")
version = models.CharField(max_length=100)
codename = models.CharField(max_length=100, blank=True)
order = models.IntegerField(default=0, editable=False)
# denormalized for querying
latest = models.BooleanField(default=False, editable=False)
@property
def name(self):
"""A ProductVersion's name is its product name and version."""
return u"%s %s" % (self.product, self.version)
def __unicode__(self):
"""A ProductVersion's unicode representation is its name."""
return self.name
class Meta:
ordering = ["product", "order"]
def save(self, *args, **kwargs):
"""Save productversion, updating latest version."""
skip_reorder = kwargs.pop("skip_reorder", False)
super(ProductVersion, self).save(*args, **kwargs)
if not skip_reorder:
self.product.reorder_versions(update_instance=self)
def delete(self, *args, **kwargs):
"""Delete productversion, updating latest version."""
super(ProductVersion, self).delete(*args, **kwargs)
self.product.reorder_versions()
def undelete(self, *args, **kwargs):
"""Undelete productversion, updating latest version."""
super(ProductVersion, self).undelete(*args, **kwargs)
self.product.reorder_versions()
def clean(self):
"""
Validate uniqueness of product/version combo.
Can't use actual unique constraint due to soft-deletion; if we don't
include deleted-on in the constraint, deleted objects can cause
integrity errors; if we include deleted-on in the constraint it
nullifies the constraint entirely, since NULL != NULL in SQL.
"""
try:
dupes = ProductVersion.objects.filter(
product=self.product, version=self.version)
except Product.DoesNotExist:
# product is not set or is invalid; dupes are not an issue.
return
if self.pk is not None:
dupes = dupes.exclude(pk=self.pk)
if dupes.exists():
raise ValidationError(
"Product version '{0}' for '{1}' already exists.".format(
self.version, self.product)
)
@property
def parent(self):
return self.product
@classmethod
def cascade_envs_to(cls, objs, adding):
Run = cls.runs.related.model
CaseVersion = cls.caseversions.related.model
runs = Run.objects.filter(productversion__in=objs)
caseversions = CaseVersion.objects.filter(productversion__in=objs)
if adding:
runs = runs.filter(status=Run.STATUS.draft)
caseversions = caseversions.filter(envs_narrowed=False)
return {Run: runs, CaseVersion: caseversions}
def clone(self, *args, **kwargs):
"""
Clone ProductVersion, with ".next" version and "Cloned:" codename.
"""
overrides = kwargs.setdefault("overrides", {})
overrides["version"] = "%s.next" % self.version
overrides["codename"] = "Cloned: %s" % self.codename
kwargs.setdefault("cascade", ["environments", "team"])
return super(ProductVersion, self).clone(*args, **kwargs)
def by_version(productversion):
"""
Given a ProductVersion, return a version tuple that will order correctly.
Uses pkg_resources' ``parse_version`` function.
This function is intended to be passed to the ``key`` argument of the
``sorted`` builtin.
"""
return parse_version(productversion.version)
class CorePreferences(Preferences):
__module__ = "preferences.models"
default_new_user_role = models.ForeignKey(Role, blank=True, null=True)
class Meta:
verbose_name_plural = "core preferences"
class ApiKeyManager(MTManager):
use_for_related_fields = True
def active(self):
return self.get_query_set().filter(active=True)
class ApiKey(MTModel):
owner = models.ForeignKey(User, related_name="api_keys")
key = models.CharField(max_length=36, unique=True)
active = models.BooleanField(default=True, db_index=True)
objects = ApiKeyManager()
def __unicode__(self):
return self.key
@classmethod
def generate(cls, owner, user=None):
"""
Generate, save and return a new API key.
``owner`` is the owner of the new key, ``user`` is the creating user.
"""
if user is None:
user = owner
return cls.objects.create(
owner=owner, user=user, key=unicode(uuid.uuid4()))
|
|
import operator
import re
import sqlalchemy as sa
from .. import config
from .. import engines
from .. import eq_
from .. import expect_warnings
from .. import fixtures
from .. import is_
from ..provision import temp_table_keyword_args
from ..schema import Column
from ..schema import Table
from ... import event
from ... import ForeignKey
from ... import inspect
from ... import Integer
from ... import MetaData
from ... import String
from ... import testing
from ... import types as sql_types
from ...schema import DDL
from ...schema import Index
from ...sql.elements import quoted_name
from ...testing import is_false
from ...testing import is_true
metadata, users = None, None
class HasTableTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"test_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
if testing.requires.schemas.enabled:
Table(
"test_table_s",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
schema=config.test_schema,
)
def test_has_table(self):
with config.db.begin() as conn:
is_true(config.db.dialect.has_table(conn, "test_table"))
is_false(config.db.dialect.has_table(conn, "test_table_s"))
is_false(config.db.dialect.has_table(conn, "nonexistent_table"))
@testing.requires.schemas
def test_has_table_schema(self):
with config.db.begin() as conn:
is_false(
config.db.dialect.has_table(
conn, "test_table", schema=config.test_schema
)
)
is_true(
config.db.dialect.has_table(
conn, "test_table_s", schema=config.test_schema
)
)
is_false(
config.db.dialect.has_table(
conn, "nonexistent_table", schema=config.test_schema
)
)
class HasIndexTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
tt = Table(
"test_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
Index("my_idx", tt.c.data)
if testing.requires.schemas.enabled:
tt = Table(
"test_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
schema=config.test_schema,
)
Index("my_idx_s", tt.c.data)
def test_has_index(self):
with config.db.begin() as conn:
assert config.db.dialect.has_index(conn, "test_table", "my_idx")
assert not config.db.dialect.has_index(
conn, "test_table", "my_idx_s"
)
assert not config.db.dialect.has_index(
conn, "nonexistent_table", "my_idx"
)
assert not config.db.dialect.has_index(
conn, "test_table", "nonexistent_idx"
)
@testing.requires.schemas
def test_has_index_schema(self):
with config.db.begin() as conn:
assert config.db.dialect.has_index(
conn, "test_table", "my_idx_s", schema=config.test_schema
)
assert not config.db.dialect.has_index(
conn, "test_table", "my_idx", schema=config.test_schema
)
assert not config.db.dialect.has_index(
conn,
"nonexistent_table",
"my_idx_s",
schema=config.test_schema,
)
assert not config.db.dialect.has_index(
conn,
"test_table",
"nonexistent_idx_s",
schema=config.test_schema,
)
class ComponentReflectionTest(fixtures.TablesTest):
run_inserts = run_deletes = None
__backend__ = True
@classmethod
def setup_bind(cls):
if config.requirements.independent_connections.enabled:
from sqlalchemy import pool
return engines.testing_engine(
options=dict(poolclass=pool.StaticPool)
)
else:
return config.db
@classmethod
def define_tables(cls, metadata):
cls.define_reflected_tables(metadata, None)
if testing.requires.schemas.enabled:
cls.define_reflected_tables(metadata, testing.config.test_schema)
@classmethod
def define_reflected_tables(cls, metadata, schema):
if schema:
schema_prefix = schema + "."
else:
schema_prefix = ""
if testing.requires.self_referential_foreign_keys.enabled:
users = Table(
"users",
metadata,
Column("user_id", sa.INT, primary_key=True),
Column("test1", sa.CHAR(5), nullable=False),
Column("test2", sa.Float(5), nullable=False),
Column(
"parent_user_id",
sa.Integer,
sa.ForeignKey(
"%susers.user_id" % schema_prefix, name="user_id_fk"
),
),
schema=schema,
test_needs_fk=True,
)
else:
users = Table(
"users",
metadata,
Column("user_id", sa.INT, primary_key=True),
Column("test1", sa.CHAR(5), nullable=False),
Column("test2", sa.Float(5), nullable=False),
schema=schema,
test_needs_fk=True,
)
Table(
"dingalings",
metadata,
Column("dingaling_id", sa.Integer, primary_key=True),
Column(
"address_id",
sa.Integer,
sa.ForeignKey("%semail_addresses.address_id" % schema_prefix),
),
Column("data", sa.String(30)),
schema=schema,
test_needs_fk=True,
)
Table(
"email_addresses",
metadata,
Column("address_id", sa.Integer),
Column(
"remote_user_id", sa.Integer, sa.ForeignKey(users.c.user_id)
),
Column("email_address", sa.String(20)),
sa.PrimaryKeyConstraint("address_id", name="email_ad_pk"),
schema=schema,
test_needs_fk=True,
)
Table(
"comment_test",
metadata,
Column("id", sa.Integer, primary_key=True, comment="id comment"),
Column("data", sa.String(20), comment="data % comment"),
Column(
"d2",
sa.String(20),
comment=r"""Comment types type speedily ' " \ '' Fun!""",
),
schema=schema,
comment=r"""the test % ' " \ table comment""",
)
if testing.requires.cross_schema_fk_reflection.enabled:
if schema is None:
Table(
"local_table",
metadata,
Column("id", sa.Integer, primary_key=True),
Column("data", sa.String(20)),
Column(
"remote_id",
ForeignKey(
"%s.remote_table_2.id" % testing.config.test_schema
),
),
test_needs_fk=True,
schema=config.db.dialect.default_schema_name,
)
else:
Table(
"remote_table",
metadata,
Column("id", sa.Integer, primary_key=True),
Column(
"local_id",
ForeignKey(
"%s.local_table.id"
% config.db.dialect.default_schema_name
),
),
Column("data", sa.String(20)),
schema=schema,
test_needs_fk=True,
)
Table(
"remote_table_2",
metadata,
Column("id", sa.Integer, primary_key=True),
Column("data", sa.String(20)),
schema=schema,
test_needs_fk=True,
)
if testing.requires.index_reflection.enabled:
cls.define_index(metadata, users)
if not schema:
# test_needs_fk is at the moment to force MySQL InnoDB
noncol_idx_test_nopk = Table(
"noncol_idx_test_nopk",
metadata,
Column("q", sa.String(5)),
test_needs_fk=True,
)
noncol_idx_test_pk = Table(
"noncol_idx_test_pk",
metadata,
Column("id", sa.Integer, primary_key=True),
Column("q", sa.String(5)),
test_needs_fk=True,
)
if testing.requires.indexes_with_ascdesc.enabled:
Index("noncol_idx_nopk", noncol_idx_test_nopk.c.q.desc())
Index("noncol_idx_pk", noncol_idx_test_pk.c.q.desc())
if testing.requires.view_column_reflection.enabled:
cls.define_views(metadata, schema)
if not schema and testing.requires.temp_table_reflection.enabled:
cls.define_temp_tables(metadata)
@classmethod
def define_temp_tables(cls, metadata):
kw = temp_table_keyword_args(config, config.db)
user_tmp = Table(
"user_tmp",
metadata,
Column("id", sa.INT, primary_key=True),
Column("name", sa.VARCHAR(50)),
Column("foo", sa.INT),
sa.UniqueConstraint("name", name="user_tmp_uq"),
sa.Index("user_tmp_ix", "foo"),
**kw
)
if (
testing.requires.view_reflection.enabled
and testing.requires.temporary_views.enabled
):
event.listen(
user_tmp,
"after_create",
DDL(
"create temporary view user_tmp_v as "
"select * from user_tmp"
),
)
event.listen(user_tmp, "before_drop", DDL("drop view user_tmp_v"))
@classmethod
def define_index(cls, metadata, users):
Index("users_t_idx", users.c.test1, users.c.test2)
Index("users_all_idx", users.c.user_id, users.c.test2, users.c.test1)
@classmethod
def define_views(cls, metadata, schema):
for table_name in ("users", "email_addresses"):
fullname = table_name
if schema:
fullname = "%s.%s" % (schema, table_name)
view_name = fullname + "_v"
query = "CREATE VIEW %s AS SELECT * FROM %s" % (
view_name,
fullname,
)
event.listen(metadata, "after_create", DDL(query))
event.listen(
metadata, "before_drop", DDL("DROP VIEW %s" % view_name)
)
@testing.requires.schema_reflection
def test_get_schema_names(self):
insp = inspect(testing.db)
self.assert_(testing.config.test_schema in insp.get_schema_names())
@testing.requires.schema_reflection
def test_dialect_initialize(self):
engine = engines.testing_engine()
inspect(engine)
assert hasattr(engine.dialect, "default_schema_name")
@testing.requires.schema_reflection
def test_get_default_schema_name(self):
insp = inspect(testing.db)
eq_(insp.default_schema_name, testing.db.dialect.default_schema_name)
@testing.provide_metadata
def _test_get_table_names(
self, schema=None, table_type="table", order_by=None
):
_ignore_tables = [
"comment_test",
"noncol_idx_test_pk",
"noncol_idx_test_nopk",
"local_table",
"remote_table",
"remote_table_2",
]
meta = self.metadata
insp = inspect(meta.bind)
if table_type == "view":
table_names = insp.get_view_names(schema)
table_names.sort()
answer = ["email_addresses_v", "users_v"]
eq_(sorted(table_names), answer)
else:
if order_by:
tables = [
rec[0]
for rec in insp.get_sorted_table_and_fkc_names(schema)
if rec[0]
]
else:
tables = insp.get_table_names(schema)
table_names = [t for t in tables if t not in _ignore_tables]
if order_by == "foreign_key":
answer = ["users", "email_addresses", "dingalings"]
eq_(table_names, answer)
else:
answer = ["dingalings", "email_addresses", "users"]
eq_(sorted(table_names), answer)
@testing.requires.temp_table_names
def test_get_temp_table_names(self):
insp = inspect(self.bind)
temp_table_names = insp.get_temp_table_names()
eq_(sorted(temp_table_names), ["user_tmp"])
@testing.requires.view_reflection
@testing.requires.temp_table_names
@testing.requires.temporary_views
def test_get_temp_view_names(self):
insp = inspect(self.bind)
temp_table_names = insp.get_temp_view_names()
eq_(sorted(temp_table_names), ["user_tmp_v"])
@testing.requires.table_reflection
def test_get_table_names(self):
self._test_get_table_names()
@testing.requires.table_reflection
@testing.requires.foreign_key_constraint_reflection
def test_get_table_names_fks(self):
self._test_get_table_names(order_by="foreign_key")
@testing.requires.comment_reflection
def test_get_comments(self):
self._test_get_comments()
@testing.requires.comment_reflection
@testing.requires.schemas
def test_get_comments_with_schema(self):
self._test_get_comments(testing.config.test_schema)
def _test_get_comments(self, schema=None):
insp = inspect(testing.db)
eq_(
insp.get_table_comment("comment_test", schema=schema),
{"text": r"""the test % ' " \ table comment"""},
)
eq_(insp.get_table_comment("users", schema=schema), {"text": None})
eq_(
[
{"name": rec["name"], "comment": rec["comment"]}
for rec in insp.get_columns("comment_test", schema=schema)
],
[
{"comment": "id comment", "name": "id"},
{"comment": "data % comment", "name": "data"},
{
"comment": (
r"""Comment types type speedily ' " \ '' Fun!"""
),
"name": "d2",
},
],
)
@testing.requires.table_reflection
@testing.requires.schemas
def test_get_table_names_with_schema(self):
self._test_get_table_names(testing.config.test_schema)
@testing.requires.view_column_reflection
def test_get_view_names(self):
self._test_get_table_names(table_type="view")
@testing.requires.view_column_reflection
@testing.requires.schemas
def test_get_view_names_with_schema(self):
self._test_get_table_names(
testing.config.test_schema, table_type="view"
)
@testing.requires.table_reflection
@testing.requires.view_column_reflection
def test_get_tables_and_views(self):
self._test_get_table_names()
self._test_get_table_names(table_type="view")
def _test_get_columns(self, schema=None, table_type="table"):
meta = MetaData(testing.db)
users, addresses = (self.tables.users, self.tables.email_addresses)
table_names = ["users", "email_addresses"]
if table_type == "view":
table_names = ["users_v", "email_addresses_v"]
insp = inspect(meta.bind)
for table_name, table in zip(table_names, (users, addresses)):
schema_name = schema
cols = insp.get_columns(table_name, schema=schema_name)
self.assert_(len(cols) > 0, len(cols))
# should be in order
for i, col in enumerate(table.columns):
eq_(col.name, cols[i]["name"])
ctype = cols[i]["type"].__class__
ctype_def = col.type
if isinstance(ctype_def, sa.types.TypeEngine):
ctype_def = ctype_def.__class__
# Oracle returns Date for DateTime.
if testing.against("oracle") and ctype_def in (
sql_types.Date,
sql_types.DateTime,
):
ctype_def = sql_types.Date
# assert that the desired type and return type share
# a base within one of the generic types.
self.assert_(
len(
set(ctype.__mro__)
.intersection(ctype_def.__mro__)
.intersection(
[
sql_types.Integer,
sql_types.Numeric,
sql_types.DateTime,
sql_types.Date,
sql_types.Time,
sql_types.String,
sql_types._Binary,
]
)
)
> 0,
"%s(%s), %s(%s)"
% (col.name, col.type, cols[i]["name"], ctype),
)
if not col.primary_key:
assert cols[i]["default"] is None
@testing.requires.table_reflection
def test_get_columns(self):
self._test_get_columns()
@testing.provide_metadata
def _type_round_trip(self, *types):
t = Table(
"t",
self.metadata,
*[Column("t%d" % i, type_) for i, type_ in enumerate(types)]
)
t.create()
return [
c["type"] for c in inspect(self.metadata.bind).get_columns("t")
]
@testing.requires.table_reflection
def test_numeric_reflection(self):
for typ in self._type_round_trip(sql_types.Numeric(18, 5)):
assert isinstance(typ, sql_types.Numeric)
eq_(typ.precision, 18)
eq_(typ.scale, 5)
@testing.requires.table_reflection
def test_varchar_reflection(self):
typ = self._type_round_trip(sql_types.String(52))[0]
assert isinstance(typ, sql_types.String)
eq_(typ.length, 52)
@testing.requires.table_reflection
@testing.provide_metadata
def test_nullable_reflection(self):
t = Table(
"t",
self.metadata,
Column("a", Integer, nullable=True),
Column("b", Integer, nullable=False),
)
t.create()
eq_(
dict(
(col["name"], col["nullable"])
for col in inspect(self.metadata.bind).get_columns("t")
),
{"a": True, "b": False},
)
@testing.requires.table_reflection
@testing.requires.schemas
def test_get_columns_with_schema(self):
self._test_get_columns(schema=testing.config.test_schema)
@testing.requires.temp_table_reflection
def test_get_temp_table_columns(self):
meta = MetaData(self.bind)
user_tmp = self.tables.user_tmp
insp = inspect(meta.bind)
cols = insp.get_columns("user_tmp")
self.assert_(len(cols) > 0, len(cols))
for i, col in enumerate(user_tmp.columns):
eq_(col.name, cols[i]["name"])
@testing.requires.temp_table_reflection
@testing.requires.view_column_reflection
@testing.requires.temporary_views
def test_get_temp_view_columns(self):
insp = inspect(self.bind)
cols = insp.get_columns("user_tmp_v")
eq_([col["name"] for col in cols], ["id", "name", "foo"])
@testing.requires.view_column_reflection
def test_get_view_columns(self):
self._test_get_columns(table_type="view")
@testing.requires.view_column_reflection
@testing.requires.schemas
def test_get_view_columns_with_schema(self):
self._test_get_columns(
schema=testing.config.test_schema, table_type="view"
)
@testing.provide_metadata
def _test_get_pk_constraint(self, schema=None):
meta = self.metadata
users, addresses = self.tables.users, self.tables.email_addresses
insp = inspect(meta.bind)
users_cons = insp.get_pk_constraint(users.name, schema=schema)
users_pkeys = users_cons["constrained_columns"]
eq_(users_pkeys, ["user_id"])
addr_cons = insp.get_pk_constraint(addresses.name, schema=schema)
addr_pkeys = addr_cons["constrained_columns"]
eq_(addr_pkeys, ["address_id"])
with testing.requires.reflects_pk_names.fail_if():
eq_(addr_cons["name"], "email_ad_pk")
@testing.requires.primary_key_constraint_reflection
def test_get_pk_constraint(self):
self._test_get_pk_constraint()
@testing.requires.table_reflection
@testing.requires.primary_key_constraint_reflection
@testing.requires.schemas
def test_get_pk_constraint_with_schema(self):
self._test_get_pk_constraint(schema=testing.config.test_schema)
@testing.provide_metadata
def _test_get_foreign_keys(self, schema=None):
meta = self.metadata
users, addresses = (self.tables.users, self.tables.email_addresses)
insp = inspect(meta.bind)
expected_schema = schema
# users
if testing.requires.self_referential_foreign_keys.enabled:
users_fkeys = insp.get_foreign_keys(users.name, schema=schema)
fkey1 = users_fkeys[0]
with testing.requires.named_constraints.fail_if():
eq_(fkey1["name"], "user_id_fk")
eq_(fkey1["referred_schema"], expected_schema)
eq_(fkey1["referred_table"], users.name)
eq_(fkey1["referred_columns"], ["user_id"])
if testing.requires.self_referential_foreign_keys.enabled:
eq_(fkey1["constrained_columns"], ["parent_user_id"])
# addresses
addr_fkeys = insp.get_foreign_keys(addresses.name, schema=schema)
fkey1 = addr_fkeys[0]
with testing.requires.implicitly_named_constraints.fail_if():
self.assert_(fkey1["name"] is not None)
eq_(fkey1["referred_schema"], expected_schema)
eq_(fkey1["referred_table"], users.name)
eq_(fkey1["referred_columns"], ["user_id"])
eq_(fkey1["constrained_columns"], ["remote_user_id"])
@testing.requires.foreign_key_constraint_reflection
def test_get_foreign_keys(self):
self._test_get_foreign_keys()
@testing.requires.foreign_key_constraint_reflection
@testing.requires.schemas
def test_get_foreign_keys_with_schema(self):
self._test_get_foreign_keys(schema=testing.config.test_schema)
@testing.requires.cross_schema_fk_reflection
@testing.requires.schemas
def test_get_inter_schema_foreign_keys(self):
local_table, remote_table, remote_table_2 = self.tables(
"%s.local_table" % testing.db.dialect.default_schema_name,
"%s.remote_table" % testing.config.test_schema,
"%s.remote_table_2" % testing.config.test_schema,
)
insp = inspect(config.db)
local_fkeys = insp.get_foreign_keys(local_table.name)
eq_(len(local_fkeys), 1)
fkey1 = local_fkeys[0]
eq_(fkey1["referred_schema"], testing.config.test_schema)
eq_(fkey1["referred_table"], remote_table_2.name)
eq_(fkey1["referred_columns"], ["id"])
eq_(fkey1["constrained_columns"], ["remote_id"])
remote_fkeys = insp.get_foreign_keys(
remote_table.name, schema=testing.config.test_schema
)
eq_(len(remote_fkeys), 1)
fkey2 = remote_fkeys[0]
assert fkey2["referred_schema"] in (
None,
testing.db.dialect.default_schema_name,
)
eq_(fkey2["referred_table"], local_table.name)
eq_(fkey2["referred_columns"], ["id"])
eq_(fkey2["constrained_columns"], ["local_id"])
@testing.requires.foreign_key_constraint_option_reflection_ondelete
def test_get_foreign_key_options_ondelete(self):
self._test_get_foreign_key_options(ondelete="CASCADE")
@testing.requires.foreign_key_constraint_option_reflection_onupdate
def test_get_foreign_key_options_onupdate(self):
self._test_get_foreign_key_options(onupdate="SET NULL")
@testing.requires.foreign_key_constraint_option_reflection_onupdate
def test_get_foreign_key_options_onupdate_noaction(self):
self._test_get_foreign_key_options(onupdate="NO ACTION", expected={})
@testing.requires.fk_constraint_option_reflection_ondelete_noaction
def test_get_foreign_key_options_ondelete_noaction(self):
self._test_get_foreign_key_options(ondelete="NO ACTION", expected={})
@testing.requires.fk_constraint_option_reflection_onupdate_restrict
def test_get_foreign_key_options_onupdate_restrict(self):
self._test_get_foreign_key_options(onupdate="RESTRICT")
@testing.requires.fk_constraint_option_reflection_ondelete_restrict
def test_get_foreign_key_options_ondelete_restrict(self):
self._test_get_foreign_key_options(ondelete="RESTRICT")
@testing.provide_metadata
def _test_get_foreign_key_options(self, expected=None, **options):
meta = self.metadata
if expected is None:
expected = options
Table(
"x",
meta,
Column("id", Integer, primary_key=True),
test_needs_fk=True,
)
Table(
"table",
meta,
Column("id", Integer, primary_key=True),
Column("x_id", Integer, sa.ForeignKey("x.id", name="xid")),
Column("test", String(10)),
test_needs_fk=True,
)
Table(
"user",
meta,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("tid", Integer),
sa.ForeignKeyConstraint(
["tid"], ["table.id"], name="myfk", **options
),
test_needs_fk=True,
)
meta.create_all()
insp = inspect(meta.bind)
# test 'options' is always present for a backend
# that can reflect these, since alembic looks for this
opts = insp.get_foreign_keys("table")[0]["options"]
eq_(dict((k, opts[k]) for k in opts if opts[k]), {})
opts = insp.get_foreign_keys("user")[0]["options"]
eq_(opts, expected)
# eq_(dict((k, opts[k]) for k in opts if opts[k]), expected)
def _assert_insp_indexes(self, indexes, expected_indexes):
index_names = [d["name"] for d in indexes]
for e_index in expected_indexes:
assert e_index["name"] in index_names
index = indexes[index_names.index(e_index["name"])]
for key in e_index:
eq_(e_index[key], index[key])
@testing.provide_metadata
def _test_get_indexes(self, schema=None):
meta = self.metadata
# The database may decide to create indexes for foreign keys, etc.
# so there may be more indexes than expected.
insp = inspect(meta.bind)
indexes = insp.get_indexes("users", schema=schema)
expected_indexes = [
{
"unique": False,
"column_names": ["test1", "test2"],
"name": "users_t_idx",
},
{
"unique": False,
"column_names": ["user_id", "test2", "test1"],
"name": "users_all_idx",
},
]
self._assert_insp_indexes(indexes, expected_indexes)
@testing.requires.index_reflection
def test_get_indexes(self):
self._test_get_indexes()
@testing.requires.index_reflection
@testing.requires.schemas
def test_get_indexes_with_schema(self):
self._test_get_indexes(schema=testing.config.test_schema)
@testing.provide_metadata
def _test_get_noncol_index(self, tname, ixname):
meta = self.metadata
insp = inspect(meta.bind)
indexes = insp.get_indexes(tname)
# reflecting an index that has "x DESC" in it as the column.
# the DB may or may not give us "x", but make sure we get the index
# back, it has a name, it's connected to the table.
expected_indexes = [{"unique": False, "name": ixname}]
self._assert_insp_indexes(indexes, expected_indexes)
t = Table(tname, meta, autoload_with=meta.bind)
eq_(len(t.indexes), 1)
is_(list(t.indexes)[0].table, t)
eq_(list(t.indexes)[0].name, ixname)
@testing.requires.index_reflection
@testing.requires.indexes_with_ascdesc
def test_get_noncol_index_no_pk(self):
self._test_get_noncol_index("noncol_idx_test_nopk", "noncol_idx_nopk")
@testing.requires.index_reflection
@testing.requires.indexes_with_ascdesc
def test_get_noncol_index_pk(self):
self._test_get_noncol_index("noncol_idx_test_pk", "noncol_idx_pk")
@testing.requires.indexes_with_expressions
@testing.provide_metadata
def test_reflect_expression_based_indexes(self):
Table(
"t",
self.metadata,
Column("x", String(30)),
Column("y", String(30)),
)
event.listen(
self.metadata,
"after_create",
DDL("CREATE INDEX t_idx ON t(lower(x), lower(y))"),
)
event.listen(
self.metadata, "after_create", DDL("CREATE INDEX t_idx_2 ON t(x)")
)
self.metadata.create_all()
insp = inspect(self.metadata.bind)
with expect_warnings(
"Skipped unsupported reflection of expression-based index t_idx"
):
eq_(
insp.get_indexes("t"),
[{"name": "t_idx_2", "column_names": ["x"], "unique": 0}],
)
@testing.requires.unique_constraint_reflection
def test_get_unique_constraints(self):
self._test_get_unique_constraints()
@testing.requires.temp_table_reflection
@testing.requires.unique_constraint_reflection
def test_get_temp_table_unique_constraints(self):
insp = inspect(self.bind)
reflected = insp.get_unique_constraints("user_tmp")
for refl in reflected:
# Different dialects handle duplicate index and constraints
# differently, so ignore this flag
refl.pop("duplicates_index", None)
eq_(reflected, [{"column_names": ["name"], "name": "user_tmp_uq"}])
@testing.requires.temp_table_reflection
def test_get_temp_table_indexes(self):
insp = inspect(self.bind)
indexes = insp.get_indexes("user_tmp")
for ind in indexes:
ind.pop("dialect_options", None)
eq_(
# TODO: we need to add better filtering for indexes/uq constraints
# that are doubled up
[idx for idx in indexes if idx["name"] == "user_tmp_ix"],
[
{
"unique": False,
"column_names": ["foo"],
"name": "user_tmp_ix",
}
],
)
@testing.requires.unique_constraint_reflection
@testing.requires.schemas
def test_get_unique_constraints_with_schema(self):
self._test_get_unique_constraints(schema=testing.config.test_schema)
@testing.provide_metadata
def _test_get_unique_constraints(self, schema=None):
# SQLite dialect needs to parse the names of the constraints
# separately from what it gets from PRAGMA index_list(), and
# then matches them up. so same set of column_names in two
# constraints will confuse it. Perhaps we should no longer
# bother with index_list() here since we have the whole
# CREATE TABLE?
uniques = sorted(
[
{"name": "unique_a", "column_names": ["a"]},
{"name": "unique_a_b_c", "column_names": ["a", "b", "c"]},
{"name": "unique_c_a_b", "column_names": ["c", "a", "b"]},
{"name": "unique_asc_key", "column_names": ["asc", "key"]},
{"name": "i.have.dots", "column_names": ["b"]},
{"name": "i have spaces", "column_names": ["c"]},
],
key=operator.itemgetter("name"),
)
orig_meta = self.metadata
table = Table(
"testtbl",
orig_meta,
Column("a", sa.String(20)),
Column("b", sa.String(30)),
Column("c", sa.Integer),
# reserved identifiers
Column("asc", sa.String(30)),
Column("key", sa.String(30)),
schema=schema,
)
for uc in uniques:
table.append_constraint(
sa.UniqueConstraint(*uc["column_names"], name=uc["name"])
)
orig_meta.create_all()
inspector = inspect(orig_meta.bind)
reflected = sorted(
inspector.get_unique_constraints("testtbl", schema=schema),
key=operator.itemgetter("name"),
)
names_that_duplicate_index = set()
for orig, refl in zip(uniques, reflected):
# Different dialects handle duplicate index and constraints
# differently, so ignore this flag
dupe = refl.pop("duplicates_index", None)
if dupe:
names_that_duplicate_index.add(dupe)
eq_(orig, refl)
reflected_metadata = MetaData()
reflected = Table(
"testtbl",
reflected_metadata,
autoload_with=orig_meta.bind,
schema=schema,
)
# test "deduplicates for index" logic. MySQL and Oracle
# "unique constraints" are actually unique indexes (with possible
# exception of a unique that is a dupe of another one in the case
# of Oracle). make sure # they aren't duplicated.
idx_names = set([idx.name for idx in reflected.indexes])
uq_names = set(
[
uq.name
for uq in reflected.constraints
if isinstance(uq, sa.UniqueConstraint)
]
).difference(["unique_c_a_b"])
assert not idx_names.intersection(uq_names)
if names_that_duplicate_index:
eq_(names_that_duplicate_index, idx_names)
eq_(uq_names, set())
@testing.requires.check_constraint_reflection
def test_get_check_constraints(self):
self._test_get_check_constraints()
@testing.requires.check_constraint_reflection
@testing.requires.schemas
def test_get_check_constraints_schema(self):
self._test_get_check_constraints(schema=testing.config.test_schema)
@testing.provide_metadata
def _test_get_check_constraints(self, schema=None):
orig_meta = self.metadata
Table(
"sa_cc",
orig_meta,
Column("a", Integer()),
sa.CheckConstraint("a > 1 AND a < 5", name="cc1"),
sa.CheckConstraint("a = 1 OR (a > 2 AND a < 5)", name="cc2"),
schema=schema,
)
orig_meta.create_all()
inspector = inspect(orig_meta.bind)
reflected = sorted(
inspector.get_check_constraints("sa_cc", schema=schema),
key=operator.itemgetter("name"),
)
# trying to minimize effect of quoting, parenthesis, etc.
# may need to add more to this as new dialects get CHECK
# constraint reflection support
def normalize(sqltext):
return " ".join(
re.findall(r"and|\d|=|a|or|<|>", sqltext.lower(), re.I)
)
reflected = [
{"name": item["name"], "sqltext": normalize(item["sqltext"])}
for item in reflected
]
eq_(
reflected,
[
{"name": "cc1", "sqltext": "a > 1 and a < 5"},
{"name": "cc2", "sqltext": "a = 1 or a > 2 and a < 5"},
],
)
@testing.provide_metadata
def _test_get_view_definition(self, schema=None):
meta = self.metadata
view_name1 = "users_v"
view_name2 = "email_addresses_v"
insp = inspect(meta.bind)
v1 = insp.get_view_definition(view_name1, schema=schema)
self.assert_(v1)
v2 = insp.get_view_definition(view_name2, schema=schema)
self.assert_(v2)
@testing.requires.view_reflection
def test_get_view_definition(self):
self._test_get_view_definition()
@testing.requires.view_reflection
@testing.requires.schemas
def test_get_view_definition_with_schema(self):
self._test_get_view_definition(schema=testing.config.test_schema)
@testing.only_on("postgresql", "PG specific feature")
@testing.provide_metadata
def _test_get_table_oid(self, table_name, schema=None):
meta = self.metadata
insp = inspect(meta.bind)
oid = insp.get_table_oid(table_name, schema)
self.assert_(isinstance(oid, int))
def test_get_table_oid(self):
self._test_get_table_oid("users")
@testing.requires.schemas
def test_get_table_oid_with_schema(self):
self._test_get_table_oid("users", schema=testing.config.test_schema)
@testing.requires.table_reflection
@testing.provide_metadata
def test_autoincrement_col(self):
"""test that 'autoincrement' is reflected according to sqla's policy.
Don't mark this test as unsupported for any backend !
(technically it fails with MySQL InnoDB since "id" comes before "id2")
A backend is better off not returning "autoincrement" at all,
instead of potentially returning "False" for an auto-incrementing
primary key column.
"""
meta = self.metadata
insp = inspect(meta.bind)
for tname, cname in [
("users", "user_id"),
("email_addresses", "address_id"),
("dingalings", "dingaling_id"),
]:
cols = insp.get_columns(tname)
id_ = {c["name"]: c for c in cols}[cname]
assert id_.get("autoincrement", True)
class NormalizedNameTest(fixtures.TablesTest):
__requires__ = ("denormalized_names",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
quoted_name("t1", quote=True),
metadata,
Column("id", Integer, primary_key=True),
)
Table(
quoted_name("t2", quote=True),
metadata,
Column("id", Integer, primary_key=True),
Column("t1id", ForeignKey("t1.id")),
)
def test_reflect_lowercase_forced_tables(self):
m2 = MetaData(testing.db)
t2_ref = Table(quoted_name("t2", quote=True), m2, autoload=True)
t1_ref = m2.tables["t1"]
assert t2_ref.c.t1id.references(t1_ref.c.id)
m3 = MetaData(testing.db)
m3.reflect(only=lambda name, m: name.lower() in ("t1", "t2"))
assert m3.tables["t2"].c.t1id.references(m3.tables["t1"].c.id)
def test_get_table_names(self):
tablenames = [
t
for t in inspect(testing.db).get_table_names()
if t.lower() in ("t1", "t2")
]
eq_(tablenames[0].upper(), tablenames[0].lower())
eq_(tablenames[1].upper(), tablenames[1].lower())
class ComputedReflectionTest(fixtures.ComputedReflectionFixtureTest):
def test_computed_col_default_not_set(self):
insp = inspect(config.db)
cols = insp.get_columns("computed_column_table")
for col in cols:
if col["name"] == "with_default":
is_true("42" in col["default"])
elif not col["autoincrement"]:
is_(col["default"], None)
def test_get_column_returns_computed(self):
insp = inspect(config.db)
cols = insp.get_columns("computed_default_table")
data = {c["name"]: c for c in cols}
for key in ("id", "normal", "with_default"):
is_true("computed" not in data[key])
compData = data["computed_col"]
is_true("computed" in compData)
is_true("sqltext" in compData["computed"])
eq_(self.normalize(compData["computed"]["sqltext"]), "normal+42")
eq_(
"persisted" in compData["computed"],
testing.requires.computed_columns_reflect_persisted.enabled,
)
if testing.requires.computed_columns_reflect_persisted.enabled:
eq_(
compData["computed"]["persisted"],
testing.requires.computed_columns_default_persisted.enabled,
)
def check_column(self, data, column, sqltext, persisted):
is_true("computed" in data[column])
compData = data[column]["computed"]
eq_(self.normalize(compData["sqltext"]), sqltext)
if testing.requires.computed_columns_reflect_persisted.enabled:
is_true("persisted" in compData)
is_(compData["persisted"], persisted)
def test_get_column_returns_persisted(self):
insp = inspect(config.db)
cols = insp.get_columns("computed_column_table")
data = {c["name"]: c for c in cols}
self.check_column(
data,
"computed_no_flag",
"normal+42",
testing.requires.computed_columns_default_persisted.enabled,
)
if testing.requires.computed_columns_virtual.enabled:
self.check_column(
data, "computed_virtual", "normal+2", False,
)
if testing.requires.computed_columns_stored.enabled:
self.check_column(
data, "computed_stored", "normal-42", True,
)
@testing.requires.schemas
def test_get_column_returns_persisted_with_schema(self):
insp = inspect(config.db)
cols = insp.get_columns(
"computed_column_table", schema=config.test_schema
)
data = {c["name"]: c for c in cols}
self.check_column(
data,
"computed_no_flag",
"normal/42",
testing.requires.computed_columns_default_persisted.enabled,
)
if testing.requires.computed_columns_virtual.enabled:
self.check_column(
data, "computed_virtual", "normal/2", False,
)
if testing.requires.computed_columns_stored.enabled:
self.check_column(
data, "computed_stored", "normal*42", True,
)
__all__ = (
"ComponentReflectionTest",
"HasTableTest",
"HasIndexTest",
"NormalizedNameTest",
"ComputedReflectionTest",
)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=True
"""Worker operations executor."""
import sys
from apache_beam.internal import util
from apache_beam.metrics.execution import ScopedMetricsContainer
from apache_beam.pvalue import SideOutputValue
from apache_beam.transforms import core
from apache_beam.transforms.window import TimestampedValue
from apache_beam.transforms.window import WindowFn
from apache_beam.transforms.window import GlobalWindow
from apache_beam.utils.windowed_value import WindowedValue
class LoggingContext(object):
def enter(self):
pass
def exit(self):
pass
class Receiver(object):
"""An object that consumes a WindowedValue.
This class can be efficiently used to pass values between the
sdk and worker harnesses.
"""
def receive(self, windowed_value):
raise NotImplementedError
class DoFnRunner(Receiver):
"""A helper class for executing ParDo operations.
"""
def __init__(self,
fn,
args,
kwargs,
side_inputs,
windowing,
context=None,
tagged_receivers=None,
logger=None,
step_name=None,
# Preferred alternative to logger
# TODO(robertwb): Remove once all runners are updated.
logging_context=None,
# Preferred alternative to context
# TODO(robertwb): Remove once all runners are updated.
state=None,
scoped_metrics_container=None):
"""Initializes a DoFnRunner.
Args:
fn: user DoFn to invoke
args: positional side input arguments (static and placeholder), if any
kwargs: keyword side input arguments (static and placeholder), if any
side_inputs: list of sideinput.SideInputMaps for deferred side inputs
windowing: windowing properties of the output PCollection(s)
context: a DoFnContext to use (deprecated)
tagged_receivers: a dict of tag name to Receiver objects
logger: a logging module (deprecated)
step_name: the name of this step
logging_context: a LoggingContext object
state: handle for accessing DoFn state
scoped_metrics_container: Context switcher for metrics container
"""
self.step_name = step_name
self.window_fn = windowing.windowfn
self.tagged_receivers = tagged_receivers
self.scoped_metrics_container = (scoped_metrics_container
or ScopedMetricsContainer())
global_window = GlobalWindow()
# Need to support multiple iterations.
side_inputs = list(side_inputs)
if logging_context:
self.logging_context = logging_context
else:
self.logging_context = get_logging_context(logger, step_name=step_name)
# Optimize for the common case.
self.main_receivers = as_receiver(tagged_receivers[None])
# TODO(sourabh): Deprecate the use of context
if state:
assert context is None
self.context = DoFnContext(self.step_name, state=state)
else:
assert context is not None
self.context = context
class ArgPlaceholder(object):
def __init__(self, placeholder):
self.placeholder = placeholder
# Stash values for use in dofn_process.
self.side_inputs = side_inputs
self.has_windowed_inputs = not all(
si.is_globally_windowed() for si in self.side_inputs)
self.args = args if args else []
self.kwargs = kwargs if kwargs else {}
self.dofn = fn
self.dofn_process = fn.process
arguments, _, _, defaults = self.dofn.get_function_arguments('process')
defaults = defaults if defaults else []
self_in_args = int(self.dofn.is_process_bounded())
self.use_simple_invoker = (
not side_inputs and not args and not kwargs and not defaults)
if self.use_simple_invoker:
# As we're using the simple invoker we don't need to compute placeholders
return
self.has_windowed_inputs = (self.has_windowed_inputs or
core.DoFn.WindowParam in defaults)
# Try to prepare all the arguments that can just be filled in
# without any additional work. in the process function.
# Also cache all the placeholders needed in the process function.
# Fill in sideInputs if they are globally windowed
if not self.has_windowed_inputs:
self.args, self.kwargs = util.insert_values_in_args(
args, kwargs, [si[global_window] for si in side_inputs])
# Create placeholder for element parameter
if core.DoFn.ElementParam not in defaults:
args_to_pick = len(arguments) - len(defaults) - 1 - self_in_args
final_args = [ArgPlaceholder(core.DoFn.ElementParam)] + \
self.args[:args_to_pick]
else:
args_to_pick = len(arguments) - len(defaults) - self_in_args
final_args = self.args[:args_to_pick]
# Fill the OtherPlaceholders for context, window or timestamp
args = iter(self.args[args_to_pick:])
for a, d in zip(arguments[-len(defaults):], defaults):
if d == core.DoFn.ElementParam:
final_args.append(ArgPlaceholder(d))
elif d == core.DoFn.ContextParam:
final_args.append(ArgPlaceholder(d))
elif d == core.DoFn.WindowParam:
final_args.append(ArgPlaceholder(d))
elif d == core.DoFn.TimestampParam:
final_args.append(ArgPlaceholder(d))
elif d == core.DoFn.SideInputParam:
# If no more args are present then the value must be passed via kwarg
try:
final_args.append(args.next())
except StopIteration:
if a not in self.kwargs:
raise ValueError("Value for sideinput %s not provided" % a)
else:
# If no more args are present then the value must be passed via kwarg
try:
final_args.append(args.next())
except StopIteration:
pass
final_args.extend(list(args))
self.args = final_args
# Stash the list of placeholder positions for performance
self.placeholders = [(i, x.placeholder) for (i, x) in enumerate(self.args)
if isinstance(x, ArgPlaceholder)]
def receive(self, windowed_value):
self.process(windowed_value)
def _dofn_simple_invoker(self, element):
self._process_outputs(element, self.dofn_process(element.value))
def _dofn_per_window_invoker(self, element):
if self.has_windowed_inputs:
window, = element.windows
args, kwargs = util.insert_values_in_args(
self.args, self.kwargs, [si[window] for si in self.side_inputs])
else:
args, kwargs = self.args, self.kwargs
# TODO(sourabhbajaj): Investigate why we can't use `is` instead of ==
for i, p in self.placeholders:
if p == core.DoFn.ElementParam:
args[i] = element.value
elif p == core.DoFn.ContextParam:
args[i] = self.context
elif p == core.DoFn.WindowParam:
args[i] = window
elif p == core.DoFn.TimestampParam:
args[i] = element.timestamp
if not kwargs:
self._process_outputs(element, self.dofn_process(*args))
else:
self._process_outputs(element, self.dofn_process(*args, **kwargs))
def _dofn_invoker(self, element):
self.context.set_element(element)
# Call for the process function for each window if has windowed side inputs
# or if the process accesses the window parameter. We can just call it once
# otherwise as none of the arguments are changing
if self.has_windowed_inputs and len(element.windows) != 1:
for w in element.windows:
self._dofn_per_window_invoker(
WindowedValue(element.value, element.timestamp, (w,)))
else:
self._dofn_per_window_invoker(element)
def _invoke_bundle_method(self, method):
try:
self.logging_context.enter()
self.scoped_metrics_container.enter()
self.context.set_element(None)
f = getattr(self.dofn, method)
_, _, _, defaults = self.dofn.get_function_arguments(method)
defaults = defaults if defaults else []
args = [self.context if d == core.DoFn.ContextParam else d
for d in defaults]
self._process_outputs(None, f(*args))
except BaseException as exn:
self.reraise_augmented(exn)
finally:
self.scoped_metrics_container.exit()
self.logging_context.exit()
def start(self):
self._invoke_bundle_method('start_bundle')
def finish(self):
self._invoke_bundle_method('finish_bundle')
def process(self, element):
try:
self.logging_context.enter()
self.scoped_metrics_container.enter()
if self.use_simple_invoker:
self._dofn_simple_invoker(element)
else:
self._dofn_invoker(element)
except BaseException as exn:
self.reraise_augmented(exn)
finally:
self.scoped_metrics_container.exit()
self.logging_context.exit()
def reraise_augmented(self, exn):
if getattr(exn, '_tagged_with_step', False) or not self.step_name:
raise
args = exn.args
if args and isinstance(args[0], str):
args = (args[0] + " [while running '%s']" % self.step_name,) + args[1:]
# Poor man's exception chaining.
raise type(exn), args, sys.exc_info()[2]
else:
raise
def _process_outputs(self, windowed_input_element, results):
"""Dispatch the result of computation to the appropriate receivers.
A value wrapped in a SideOutputValue object will be unwrapped and
then dispatched to the appropriate indexed output.
"""
if results is None:
return
for result in results:
tag = None
if isinstance(result, SideOutputValue):
tag = result.tag
if not isinstance(tag, basestring):
raise TypeError('In %s, tag %s is not a string' % (self, tag))
result = result.value
if isinstance(result, WindowedValue):
windowed_value = result
if (windowed_input_element is not None
and len(windowed_input_element.windows) != 1):
windowed_value.windows *= len(windowed_input_element.windows)
elif windowed_input_element is None:
# Start and finish have no element from which to grab context,
# but may emit elements.
if isinstance(result, TimestampedValue):
value = result.value
timestamp = result.timestamp
assign_context = NoContext(value, timestamp)
else:
value = result
timestamp = -1
assign_context = NoContext(value)
windowed_value = WindowedValue(
value, timestamp, self.window_fn.assign(assign_context))
elif isinstance(result, TimestampedValue):
assign_context = WindowFn.AssignContext(result.timestamp, result.value)
windowed_value = WindowedValue(
result.value, result.timestamp,
self.window_fn.assign(assign_context))
if len(windowed_input_element.windows) != 1:
windowed_value.windows *= len(windowed_input_element.windows)
else:
windowed_value = windowed_input_element.with_value(result)
if tag is None:
self.main_receivers.receive(windowed_value)
else:
self.tagged_receivers[tag].output(windowed_value)
class NoContext(WindowFn.AssignContext):
"""An uninspectable WindowFn.AssignContext."""
NO_VALUE = object()
def __init__(self, value, timestamp=NO_VALUE):
self.value = value
self._timestamp = timestamp
@property
def timestamp(self):
if self._timestamp is self.NO_VALUE:
raise ValueError('No timestamp in this context.')
else:
return self._timestamp
@property
def existing_windows(self):
raise ValueError('No existing_windows in this context.')
class DoFnState(object):
"""Keeps track of state that DoFns want, currently, user counters.
"""
def __init__(self, counter_factory):
self.step_name = ''
self._counter_factory = counter_factory
def counter_for(self, aggregator):
"""Looks up the counter for this aggregator, creating one if necessary."""
return self._counter_factory.get_aggregator_counter(
self.step_name, aggregator)
# TODO(robertwb): Replace core.DoFnContext with this.
class DoFnContext(object):
def __init__(self, label, element=None, state=None):
self.label = label
self.state = state
if element is not None:
self.set_element(element)
def set_element(self, windowed_value):
self.windowed_value = windowed_value
@property
def element(self):
if self.windowed_value is None:
raise AttributeError('element not accessible in this context')
else:
return self.windowed_value.value
@property
def timestamp(self):
if self.windowed_value is None:
raise AttributeError('timestamp not accessible in this context')
else:
return self.windowed_value.timestamp
@property
def windows(self):
if self.windowed_value is None:
raise AttributeError('windows not accessible in this context')
else:
return self.windowed_value.windows
# TODO(robertwb): Remove all these adapters once service is updated out.
class _LoggingContextAdapter(LoggingContext):
def __init__(self, underlying):
self.underlying = underlying
def enter(self):
self.underlying.enter()
def exit(self):
self.underlying.exit()
def get_logging_context(maybe_logger, **kwargs):
if maybe_logger:
maybe_context = maybe_logger.PerThreadLoggingContext(**kwargs)
if isinstance(maybe_context, LoggingContext):
return maybe_context
else:
return _LoggingContextAdapter(maybe_context)
else:
return LoggingContext()
class _ReceiverAdapter(Receiver):
def __init__(self, underlying):
self.underlying = underlying
def receive(self, windowed_value):
self.underlying.output(windowed_value)
def as_receiver(maybe_receiver):
if isinstance(maybe_receiver, Receiver):
return maybe_receiver
else:
return _ReceiverAdapter(maybe_receiver)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import custom_gradient
from tensorflow.python.eager import tape
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import training
class BackpropTest(test.TestCase):
def testAggregateGradients(self):
def fn(x):
ind1 = constant_op.constant(np.array([0, 1]))
ind2 = constant_op.constant(np.array([2, 3]))
ind3 = constant_op.constant(np.array([1, 3]))
# A mixture of IndexedSlices and dense tensor to aggregate.
g1 = embedding_ops.embedding_lookup(x, ind1)
g2 = embedding_ops.embedding_lookup(x, ind2)
g3 = embedding_ops.embedding_lookup(x, ind3)
g4 = math_ops.reduce_sum(x * constant_op.constant(2.0))
return g1 * g2 * g3 * g4
var_np = np.random.rand(4, 2).astype(np.float32)
var = constant_op.constant(var_np)
grad = backprop.gradients_function(fn, [0])(var)[0]
grad = ops.convert_to_tensor(grad).numpy()
with context.graph_mode(), self.test_session():
tf_var = array_ops.constant(var_np, dtypes.float32)
tf_ind1 = array_ops.constant([0, 1])
tf_ind2 = array_ops.constant([2, 3])
tf_ind3 = array_ops.constant([1, 3])
tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
tf_g4 = math_ops.reduce_sum(tf_var * 2.0, reduction_indices=(0, 1))
tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4
tf_grad = gradients.gradients(tf_y, [tf_var])[0]
tf_dense_grad = math_ops.unsorted_segment_sum(
tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])
self.assertAllClose(grad, tf_dense_grad.eval())
def testImplicitGradWithResourceVariable(self):
x = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(1.0), name='x')
def fn():
tape.watch_variable(x)
b = constant_op.constant(2.0)
c = math_ops.add(x.value(), b)
return math_ops.add(c, constant_op.constant(3.0))
grads_and_vars = backprop.implicit_grad(fn)()
self.assertAllEqual(grads_and_vars[0][0], 1.0)
self.assertAllEqual(id(grads_and_vars[0][1]), id(x))
def testDy(self):
def f(x):
return x
grad_fn = backprop.gradients_function(f)
self.assertAllEqual(2., grad_fn(1., dy=2.)[0])
def testErrors(self):
@custom_gradient.custom_gradient
def f(x):
def grad(_):
raise RuntimeError('x')
return x, grad
# TODO(apassos) raise the right error here
with self.assertRaises(errors_impl.InternalError):
backprop.gradients_function(f)(constant_op.constant(1.0))
def testImplicitGradOverEmbeddingLookup(self):
batch_size = 8
embedding_size = 512
vocab_size = 1000
lrn_rate = 0.1
random_init = random_ops.random_uniform([vocab_size, embedding_size])
x = array_ops.ones((batch_size), dtypes.int64)
embedding = resource_variable_ops.ResourceVariable(
initial_value=random_init, dtype=dtypes.float32, name='embedding')
def f():
tape.watch_variable(embedding)
embedded_x = embedding_ops.embedding_lookup(embedding, x)
return constant_op.constant(1.0, dtypes.float32) - embedded_x
grad = backprop.implicit_grad(f)()[0][0]
opt = training.GradientDescentOptimizer(lrn_rate)
with context.graph_mode(), self.test_session():
tf_x = array_ops.ones((batch_size), dtypes.int64)
# TODO(ashankar,apassos): Change to ResourceVariable.
tf_embedding = variables.Variable(
random_init.numpy(), name='tf_embedding')
tf_embedded_x = embedding_ops.embedding_lookup(tf_embedding, tf_x)
tf_y = 1.0 - tf_embedded_x
tf_grad = gradients.gradients(tf_y, [tf_embedding])[0]
tf_opt = training.GradientDescentOptimizer(0.1)
tf_embedding.initializer.run()
self.assertAllClose(tf_grad.indices.eval(), grad.indices)
self.assertAllClose(tf_grad.values.eval(), grad.values)
tf_opt.apply_gradients([(tf_grad, tf_embedding)]).run()
expected = tf_embedding.eval()
opt.apply_gradients([(grad, embedding)])
self.assertAllClose(expected, embedding.read_value())
def testGradientNone(self):
def loss(x, l):
return math_ops.reduce_mean(
nn_ops.softmax_cross_entropy_with_logits(logits=x, labels=l),
constant_op.constant([0]))
logits = constant_op.constant([[0.0, 0.0]])
labels = constant_op.constant([[1.0, 0.0]])
# softmax_cross_entropy_with_logits returns two outputs and in this case the
# gradient wrt the second is None.
g, = backprop.gradients_function(loss, [0])(logits, labels)
self.assertAllEqual(g.numpy(), [[-0.5, 0.5]])
def testSecondGrad(self):
def first(x):
l = constant_op.constant([[0.0]])
x = nn_ops.softmax_cross_entropy_with_logits(labels=l, logits=x)
x = math_ops.reduce_sum(x, constant_op.constant([0]))
return x
def second(x):
grad = backprop.gradients_function(first, [0])(x)[0]
return math_ops.reduce_sum(grad, constant_op.constant([0]))
f = constant_op.constant([[0.1]])
grad = backprop.gradients_function(second, [0])(f)[0]
self.assertAllEqual([[0.0]], grad)
def testMakeVJP(self):
def f(x):
return x * x
wrapped_fn = backprop.make_vjp(f)
result, vjp = wrapped_fn(constant_op.constant(3.0))
self.assertAllEqual(result, 9.0)
self.assertAllEqual(vjp(2.0)[0], 12.0)
def testGradGrad(self):
def sq(x):
return x * x
def grad(x):
value = backprop.gradients_function(sq, [0])(x)[0]
return value
gradgrad = backprop.gradients_function(grad, [0])
self.assertAllEqual(gradgrad(constant_op.constant(3.0))[0], 2.0)
def testGradGradExp(self):
def grad(x):
value = backprop.gradients_function(math_ops.exp, [0])(x)[0]
return value
gradgrad = backprop.gradients_function(grad, [0])
self.assertAllEqual(gradgrad(constant_op.constant(0.0))[0], 1.0)
def testGPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
def fn(x):
with context.device('/gpu:0'):
b = constant_op.constant(2.0)
c = math_ops.add(x.gpu(), b)
# TODO(apassos): remove cpu below by making TensorVSPace aware
# of devices.
return math_ops.add(c, constant_op.constant(3.0)).cpu()
grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0]
self.assertAllEqual(grad, 1.0)
def testGPUImplicitGrad(self):
if not context.context().num_gpus():
self.skipTest('No GPU found')
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(
constant_op.constant(1.0), name='v')
def f():
with context.device('gpu:0'):
tape.watch_variable(v)
return v.read_value()
self.assertEqual(
backprop.implicit_grad(f)()[0][0].cpu().numpy(), 1.0)
def testCPU(self):
def fn(x):
b = constant_op.constant(2.0)
c = math_ops.add(x, b)
return math_ops.add(c, constant_op.constant(3.0))
grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0]
self.assertAllEqual(grad, 1.0)
def testTensorCopyGPU2CPU2GPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
def f(a, b):
return a.cpu() + b.cpu()
with context.device('/gpu:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
grad = backprop.gradients_function(f, [0])(a, b)[0]
self.assertAllEqual(grad, 1.0)
def testEmptyParams(self):
def fn(a, b):
return a * b
x = constant_op.constant(1.0)
y = constant_op.constant(2.0)
dx, dy = backprop.gradients_function(fn)(x, y)
self.assertAllEqual(dx, y.numpy())
self.assertAllEqual(dy, x.numpy())
def testUnconnectedNone(self):
v = resource_variable_ops.ResourceVariable(
1.0, name='testUnconnectedNone')
def f():
v.read_value()
return constant_op.constant(1.0)
self.assertEqual(backprop.implicit_grad(f)()[0][0], None)
def testGradientTape(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
with backprop.GradientTape() as gg:
gg.watch(y)
z = 2 * y
inner_grad = gg.gradient(z, [y])[0]
self.assertEqual(inner_grad.numpy(), 2.0)
y += inner_grad
grad = g.gradient(y, [x])[0]
self.assertEqual(grad.numpy(), 6.0)
def testGradientTapeGradientCalledMultipleTimes(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
z = y * y
g.gradient(z, [x])
with self.assertRaisesRegexp(
RuntimeError, 'GradientTape.gradient can only be called once'):
g.gradient(y, [x])
def testGradientTapeVariable(self):
v = resource_variable_ops.ResourceVariable(1.0, name='v')
with backprop.GradientTape() as g:
y = v * v
grad = g.gradient(y, [v])[0]
self.assertAllEqual(grad, 2.0)
def testEmptyParamsForValueAndGradFunction(self):
def fn(a, b):
return a * b
val_and_grads_fn = backprop.val_and_grad_function(fn)
x = 2.0
y = 3.0
val, (dx, dy) = val_and_grads_fn(x, y)
self.assertAllClose(val, x * y)
self.assertAllEqual(dx, y)
self.assertAllEqual(dy, x)
def testNonEmptyParamsForValueAndGradFunction(self):
def fn(a, b):
return a * b
val_and_grad_fn = backprop.val_and_grad_function(fn, params=[1])
x = 2.0
y = 3.0
val, grads = val_and_grad_fn(x, y)
self.assertAllClose(val, x * y)
self.assertEqual(1, len(grads))
self.assertAllEqual(grads[0], x)
def testTensorCopyCPU2GPU2CPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# forward: a (cpu->gpu) -> add (gpu) -> c (gpu->cpu) -> add (cpu) -> e (cpu)
# back: e (cpu) -> add (cpu) -> c (cpu->gpu) -> add (gpu) -> grad (gpu->cpu)
def f(a, b):
with context.device('/gpu:0'):
c = math_ops.add(a.gpu(0), b.gpu(0))
return math_ops.add(c.cpu(), constant_op.constant(3.0))
with context.device('/cpu:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
grad = backprop.gradients_function(f, [0])(a, b)[0]
self.assertAllEqual(grad, 1.0)
def testGetAttrType(self):
typ = backprop.op_attr_type('Add', 'T')
self.assertEqual(typ, pywrap_tensorflow.TF_ATTR_TYPE)
def testGetAttrList(self):
typ = backprop.op_attr_type('MaxPool', 'ksize')
self.assertEqual(typ, [pywrap_tensorflow.TF_ATTR_INT])
def testMakeAttrType(self):
self.assertEqual(dtypes.float32,
backprop.make_attr(pywrap_tensorflow.TF_ATTR_TYPE, 1))
def testMakeAttrTypeList(self):
self.assertEqual([dtypes.float32],
backprop.make_attr([pywrap_tensorflow.TF_ATTR_TYPE], [1]))
def testMulType(self):
def mul(x):
return math_ops._mul_dispatch(x, x) # pylint: disable=protected-access
self.assertAllEqual(
backprop.gradients_function(mul)(3.0)[0].numpy(),
6.0)
def testMakeAttrShape(self):
for s in ([], None, [1, 2, 3], [None, None], [1, None, 3]):
expected = tensor_shape.TensorShape(s).as_proto()
actual = backprop.make_attr(pywrap_tensorflow.TF_ATTR_SHAPE, s)
self.assertEqual(
expected,
actual,
msg=('For shape %r, expected %r != %r actual' % (s, expected,
actual)))
def testMakeAttrShapeList(self):
shape_list = [[], None, [1, 2, 3], [None, None], [1, None, 3]]
self.assertEqual(
[tensor_shape.TensorShape(s).as_proto() for s in shape_list],
backprop.make_attr([pywrap_tensorflow.TF_ATTR_SHAPE], shape_list))
def testArgsGradientFunction(self):
def f(*args):
return args[0] * args[0]
grad = backprop.gradients_function(f)
self.assertAllEqual(grad(1.0)[0], 2.0)
def testPartial(self):
def f(x, y):
return x * y
part = functools.partial(f, constant_op.constant(2.0))
self.assertAllEqual(
backprop.gradients_function(part)(constant_op.constant(1.0))[0],
2.0)
def testReturnSameThing(self):
def f(x):
return x, 2 * x
self.assertAllEqual(backprop.gradients_function(f)(1.0)[0], 3.0)
def testExceptionSafety(self):
def f(unused_x):
raise ValueError()
try:
backprop.gradients_function(f)(1.0)
except ValueError:
pass
def real_f(x):
return x * x
self.assertAllEqual(backprop.gradients_function(real_f)(1.0)[0], 2.0)
def testMultiValueConvertToTensor(self):
x = resource_variable_ops.ResourceVariable(
initial_value=array_ops.constant([1.0]), name='x')
def fn():
tape.watch_variable(x)
a = math_ops.add(x.value(), 1.0)
# Make sure convert_to_tensor works correctly with list of TensorNodes.
b = array_ops.stack([a, a], axis=0)
return math_ops.reduce_mean(b)
grad = backprop.implicit_grad(fn)()[0][0]
self.assertAllEqual([1.0], grad)
def testOutput(self):
def multiout(x):
return x + 2, x * x
x = constant_op.constant([0.0, 1.0, 2.0])
grad = backprop.gradients_function(multiout)(x)[0]
self.assertAllEqual([1.0, 3.0, 5.0], grad)
def testMultiValuePreservesIfNotDiffedAgainst(self):
def tfe_conv2d(timage, tkernel, conv2dstrides):
return nn_ops.conv2d(timage, tkernel, conv2dstrides, 'SAME')
i = constant_op.constant([[[[1.0]]]])
k = constant_op.constant([[[[2.0]]]])
s = [1, 1, 1, 1]
grad = backprop.gradients_function(tfe_conv2d, params=(0,))(i, k, s)[0]
self.assertAllEqual([[[[2.0]]]], grad)
def testSameObjectForMultipleArguments(self):
def f(x, y):
return math_ops.multiply(x, y)
g = backprop.gradients_function(f)
def np_g(x, y):
dx, dy = g(x, y)
return [dx.numpy(), dy.numpy()]
x = constant_op.constant(1.)
self.assertAllEqual([1., 1.], np_g(x, x))
x = 1.
self.assertAllEqual([1., 1.], np_g(x, x))
x = constant_op.constant([[1.]])
self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x))
x = [[1.]]
self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x))
v = resource_variable_ops.ResourceVariable(
initial_value=1., name='testSameObjectForMultipleArguments.Variable')
self.assertAllEqual([1., 1.], np_g(v, v))
def testImplicitGradientsCustomGradientAndCachedVariableValue(self):
@custom_gradient.custom_gradient
def my_square(x):
result = math_ops.square(x)
def grad(dr):
return 2 * dr * x + 1
return result, grad
x = resource_variable_ops.ResourceVariable(
initial_value=3, name='X.' + self.id())
def f():
return my_square(x)
g = backprop.implicit_grad(f)
grads_and_vars = g()
self.assertEqual(1, len(grads_and_vars))
grad, var = grads_and_vars[0]
self.assertAllEqual(7, grad)
self.assertAllEqual(x, var)
def testCustomGradient(self):
@custom_gradient.custom_gradient
def my_mul(x, y):
result = x*y
def grad(dr):
return [dr*y, dr*x]
return result, grad
lr = 0.25
x = resource_variable_ops.ResourceVariable(2., name='x')
def loss(x):
return my_mul(2., x.read_value())
loss_grads_fn = backprop.implicit_val_and_grad(loss)
losses = []
for _ in range(5):
loss, grads_and_vars = loss_grads_fn(x)
losses.append(loss.numpy())
for (grad, var) in grads_and_vars:
var.assign_sub(lr*grad)
self.assertAllEqual(losses, [4.0, 3., 2., 1., 0.])
def testCustomGradientIdentity(self):
@custom_gradient.custom_gradient
def my_identity(x):
def grad(dresult):
return [2 * dresult]
return x, grad
self.assertAllEqual(backprop.gradients_function(my_identity)(1.0)[0], 2.0)
def testDifferentiatingFunctionThatReturnsNone(self):
def fn(x, y):
result = x*y # pylint: disable=unused-variable
x = constant_op.constant(1)
y = constant_op.constant(2)
loss_grads_fn = backprop.implicit_val_and_grad(fn)
with self.assertRaisesRegexp(
ValueError, 'Cannot differentiate a function that returns None; '
'did you forget to return a value from fn?'):
loss_grads_fn(x, y)
val_and_grads_fn = backprop.val_and_grad_function(fn)
with self.assertRaisesRegexp(
ValueError, 'Cannot differentiate a function that returns None; '
'did you forget to return a value from fn?'):
val_and_grads_fn(x, y)
if __name__ == '__main__':
test.main()
|
|
"""The tests for the MQTT statestream component."""
from unittest.mock import ANY, call, patch
from homeassistant.setup import setup_component
import homeassistant.components.mqtt_statestream as statestream
from homeassistant.core import State
from tests.common import (
get_test_home_assistant,
mock_mqtt_component,
mock_state_change_event
)
class TestMqttStateStream:
"""Test the MQTT statestream module."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.mock_mqtt = mock_mqtt_component(self.hass)
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def add_statestream(self, base_topic=None, publish_attributes=None,
publish_timestamps=None, publish_include=None,
publish_exclude=None):
"""Add a mqtt_statestream component."""
config = {}
if base_topic:
config['base_topic'] = base_topic
if publish_attributes:
config['publish_attributes'] = publish_attributes
if publish_timestamps:
config['publish_timestamps'] = publish_timestamps
if publish_include:
config['include'] = publish_include
if publish_exclude:
config['exclude'] = publish_exclude
return setup_component(self.hass, statestream.DOMAIN, {
statestream.DOMAIN: config})
def test_fails_with_no_base(self):
"""Setup should fail if no base_topic is set."""
assert self.add_statestream() is False
def test_setup_succeeds_without_attributes(self):
"""Test the success of the setup with a valid base_topic."""
assert self.add_statestream(base_topic='pub')
def test_setup_succeeds_with_attributes(self):
"""Test setup with a valid base_topic and publish_attributes."""
assert self.add_statestream(base_topic='pub', publish_attributes=True)
@patch('homeassistant.components.mqtt.async_publish')
@patch('homeassistant.core.dt_util.utcnow')
def test_state_changed_event_sends_message(self, mock_utcnow, mock_pub):
"""Test the sending of a new message if event changed."""
e_id = 'fake.entity'
base_topic = 'pub'
# Add the statestream component for publishing state updates
assert self.add_statestream(base_topic=base_topic)
self.hass.block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mock_pub.reset_mock()
# Set a state of an entity
mock_state_change_event(self.hass, State(e_id, 'on'))
self.hass.block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mock_pub.assert_called_with(self.hass, 'pub/fake/entity/state', 'on',
1, True)
assert mock_pub.called
@patch('homeassistant.components.mqtt.async_publish')
@patch('homeassistant.core.dt_util.utcnow')
def test_state_changed_event_sends_message_and_timestamp(
self,
mock_utcnow,
mock_pub):
"""Test the sending of a message and timestamps if event changed."""
e_id = 'another.entity'
base_topic = 'pub'
# Add the statestream component for publishing state updates
assert self.add_statestream(base_topic=base_topic,
publish_attributes=None,
publish_timestamps=True)
self.hass.block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mock_pub.reset_mock()
# Set a state of an entity
mock_state_change_event(self.hass, State(e_id, 'on'))
self.hass.block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
calls = [
call.async_publish(self.hass, 'pub/another/entity/state', 'on', 1,
True),
call.async_publish(self.hass, 'pub/another/entity/last_changed',
ANY, 1, True),
call.async_publish(self.hass, 'pub/another/entity/last_updated',
ANY, 1, True),
]
mock_pub.assert_has_calls(calls, any_order=True)
assert mock_pub.called
@patch('homeassistant.components.mqtt.async_publish')
@patch('homeassistant.core.dt_util.utcnow')
def test_state_changed_attr_sends_message(self, mock_utcnow, mock_pub):
"""Test the sending of a new message if attribute changed."""
e_id = 'fake.entity'
base_topic = 'pub'
# Add the statestream component for publishing state updates
assert self.add_statestream(base_topic=base_topic,
publish_attributes=True)
self.hass.block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mock_pub.reset_mock()
test_attributes = {
"testing": "YES",
"list": ["a", "b", "c"],
"bool": False
}
# Set a state of an entity
mock_state_change_event(self.hass, State(e_id, 'off',
attributes=test_attributes))
self.hass.block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
calls = [
call.async_publish(self.hass, 'pub/fake/entity/state', 'off', 1,
True),
call.async_publish(self.hass, 'pub/fake/entity/testing', '"YES"',
1, True),
call.async_publish(self.hass, 'pub/fake/entity/list',
'["a", "b", "c"]', 1, True),
call.async_publish(self.hass, 'pub/fake/entity/bool', "false",
1, True)
]
mock_pub.assert_has_calls(calls, any_order=True)
assert mock_pub.called
@patch('homeassistant.components.mqtt.async_publish')
@patch('homeassistant.core.dt_util.utcnow')
def test_state_changed_event_include_domain(self, mock_utcnow, mock_pub):
"""Test that filtering on included domain works as expected."""
base_topic = 'pub'
incl = {
'domains': ['fake']
}
excl = {}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert self.add_statestream(base_topic=base_topic,
publish_include=incl,
publish_exclude=excl)
self.hass.block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mock_pub.reset_mock()
# Set a state of an entity
mock_state_change_event(self.hass, State('fake.entity', 'on'))
self.hass.block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mock_pub.assert_called_with(self.hass, 'pub/fake/entity/state', 'on',
1, True)
assert mock_pub.called
mock_pub.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(self.hass, State('fake2.entity', 'on'))
self.hass.block_till_done()
assert not mock_pub.called
@patch('homeassistant.components.mqtt.async_publish')
@patch('homeassistant.core.dt_util.utcnow')
def test_state_changed_event_include_entity(self, mock_utcnow, mock_pub):
"""Test that filtering on included entity works as expected."""
base_topic = 'pub'
incl = {
'entities': ['fake.entity']
}
excl = {}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert self.add_statestream(base_topic=base_topic,
publish_include=incl,
publish_exclude=excl)
self.hass.block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mock_pub.reset_mock()
# Set a state of an entity
mock_state_change_event(self.hass, State('fake.entity', 'on'))
self.hass.block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mock_pub.assert_called_with(self.hass, 'pub/fake/entity/state', 'on',
1, True)
assert mock_pub.called
mock_pub.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(self.hass, State('fake.entity2', 'on'))
self.hass.block_till_done()
assert not mock_pub.called
@patch('homeassistant.components.mqtt.async_publish')
@patch('homeassistant.core.dt_util.utcnow')
def test_state_changed_event_exclude_domain(self, mock_utcnow, mock_pub):
"""Test that filtering on excluded domain works as expected."""
base_topic = 'pub'
incl = {}
excl = {
'domains': ['fake2']
}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert self.add_statestream(base_topic=base_topic,
publish_include=incl,
publish_exclude=excl)
self.hass.block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mock_pub.reset_mock()
# Set a state of an entity
mock_state_change_event(self.hass, State('fake.entity', 'on'))
self.hass.block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mock_pub.assert_called_with(self.hass, 'pub/fake/entity/state', 'on',
1, True)
assert mock_pub.called
mock_pub.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(self.hass, State('fake2.entity', 'on'))
self.hass.block_till_done()
assert not mock_pub.called
@patch('homeassistant.components.mqtt.async_publish')
@patch('homeassistant.core.dt_util.utcnow')
def test_state_changed_event_exclude_entity(self, mock_utcnow, mock_pub):
"""Test that filtering on excluded entity works as expected."""
base_topic = 'pub'
incl = {}
excl = {
'entities': ['fake.entity2']
}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert self.add_statestream(base_topic=base_topic,
publish_include=incl,
publish_exclude=excl)
self.hass.block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mock_pub.reset_mock()
# Set a state of an entity
mock_state_change_event(self.hass, State('fake.entity', 'on'))
self.hass.block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mock_pub.assert_called_with(self.hass, 'pub/fake/entity/state', 'on',
1, True)
assert mock_pub.called
mock_pub.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(self.hass, State('fake.entity2', 'on'))
self.hass.block_till_done()
assert not mock_pub.called
@patch('homeassistant.components.mqtt.async_publish')
@patch('homeassistant.core.dt_util.utcnow')
def test_state_changed_event_exclude_domain_include_entity(
self, mock_utcnow, mock_pub):
"""Test filtering with excluded domain and included entity."""
base_topic = 'pub'
incl = {
'entities': ['fake.entity']
}
excl = {
'domains': ['fake']
}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert self.add_statestream(base_topic=base_topic,
publish_include=incl,
publish_exclude=excl)
self.hass.block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mock_pub.reset_mock()
# Set a state of an entity
mock_state_change_event(self.hass, State('fake.entity', 'on'))
self.hass.block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mock_pub.assert_called_with(self.hass, 'pub/fake/entity/state', 'on',
1, True)
assert mock_pub.called
mock_pub.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(self.hass, State('fake.entity2', 'on'))
self.hass.block_till_done()
assert not mock_pub.called
@patch('homeassistant.components.mqtt.async_publish')
@patch('homeassistant.core.dt_util.utcnow')
def test_state_changed_event_include_domain_exclude_entity(
self, mock_utcnow, mock_pub):
"""Test filtering with included domain and excluded entity."""
base_topic = 'pub'
incl = {
'domains': ['fake']
}
excl = {
'entities': ['fake.entity2']
}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert self.add_statestream(base_topic=base_topic,
publish_include=incl,
publish_exclude=excl)
self.hass.block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mock_pub.reset_mock()
# Set a state of an entity
mock_state_change_event(self.hass, State('fake.entity', 'on'))
self.hass.block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mock_pub.assert_called_with(self.hass, 'pub/fake/entity/state', 'on',
1, True)
assert mock_pub.called
mock_pub.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(self.hass, State('fake.entity2', 'on'))
self.hass.block_till_done()
assert not mock_pub.called
|
|
from __future__ import absolute_import
from django import forms
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin, messages
from django.contrib.auth.forms import UserCreationForm, UserChangeForm, AdminPasswordChangeForm
from django.core.exceptions import PermissionDenied
from django.db import transaction
from django.http import Http404, HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.translation import ugettext, ugettext_lazy as _
from pprint import saferepr
from sentry.models import (
ApiKey,
AuthIdentity,
AuthProvider,
AuditLogEntry,
Option,
Organization,
OrganizationMember,
Project,
Team,
User,
)
from sentry.utils.html import escape
csrf_protect_m = method_decorator(csrf_protect)
sensitive_post_parameters_m = method_decorator(sensitive_post_parameters())
class OptionAdmin(admin.ModelAdmin):
list_display = ("key", "last_updated")
fields = ("key", "value_repr", "last_updated")
readonly_fields = ("key", "value_repr", "last_updated")
search_fields = ("key",)
def value_repr(self, instance):
return u'<pre style="display:inline-block;white-space:pre-wrap;">{}</pre>'.format(
escape(saferepr(instance.value))
)
value_repr.short_description = "Value"
value_repr.allow_tags = True
admin.site.register(Option, OptionAdmin)
class ProjectAdmin(admin.ModelAdmin):
list_display = ("name", "slug", "organization", "status", "date_added")
list_filter = ("status", "public")
search_fields = ("name", "organization__slug", "organization__name", "slug")
raw_id_fields = ("organization",)
readonly_fields = ("first_event", "date_added")
admin.site.register(Project, ProjectAdmin)
class OrganizationApiKeyInline(admin.TabularInline):
model = ApiKey
extra = 1
fields = ("label", "key", "status", "allowed_origins", "date_added")
raw_id_fields = ("organization",)
class OrganizationProjectInline(admin.TabularInline):
model = Project
extra = 1
fields = ("name", "slug", "status", "date_added")
raw_id_fields = ("organization",)
class OrganizationTeamInline(admin.TabularInline):
model = Team
extra = 1
fields = ("name", "slug", "status", "date_added")
raw_id_fields = ("organization",)
class OrganizationMemberInline(admin.TabularInline):
model = OrganizationMember
extra = 1
fields = ("user", "organization", "role")
raw_id_fields = ("user", "organization")
class OrganizationUserInline(OrganizationMemberInline):
fk_name = "user"
class AuthIdentityInline(admin.TabularInline):
model = AuthIdentity
extra = 1
fields = ("user", "auth_provider", "ident", "data", "last_verified")
raw_id_fields = ("user", "auth_provider")
class OrganizationAdmin(admin.ModelAdmin):
list_display = ("name", "slug", "status")
list_filter = ("status",)
search_fields = ("name", "slug")
fields = ("name", "slug", "status")
inlines = (
OrganizationMemberInline,
OrganizationTeamInline,
OrganizationProjectInline,
OrganizationApiKeyInline,
)
admin.site.register(Organization, OrganizationAdmin)
class AuthProviderAdmin(admin.ModelAdmin):
list_display = ("organization", "provider", "date_added")
search_fields = ("organization__name",)
raw_id_fields = ("organization", "default_teams")
list_filter = ("provider",)
admin.site.register(AuthProvider, AuthProviderAdmin)
class AuthIdentityAdmin(admin.ModelAdmin):
list_display = ("user", "auth_provider", "ident", "date_added", "last_verified")
list_filter = ("auth_provider__provider",)
search_fields = ("user__email", "user__username", "auth_provider__organization__name")
raw_id_fields = ("user", "auth_provider")
admin.site.register(AuthIdentity, AuthIdentityAdmin)
class TeamAdmin(admin.ModelAdmin):
list_display = ("name", "slug", "organization", "status", "date_added")
list_filter = ("status",)
search_fields = ("name", "organization__name", "slug")
raw_id_fields = ("organization",)
def save_model(self, request, obj, form, change):
prev_org = obj.organization_id
super(TeamAdmin, self).save_model(request, obj, form, change)
if not change:
return
new_org = obj.organization_id
if new_org != prev_org:
return
obj.transfer_to(obj.organization)
admin.site.register(Team, TeamAdmin)
class UserChangeForm(UserChangeForm):
username = forms.RegexField(
label=_("Username"),
max_length=128,
regex=r"^[\w.@+-]+$",
help_text=_("Required. 128 characters or fewer. Letters, digits and " "@/./+/-/_ only."),
error_messages={
"invalid": _(
"This value may contain only letters, numbers and " "@/./+/-/_ characters."
)
},
)
class UserCreationForm(UserCreationForm):
username = forms.RegexField(
label=_("Username"),
max_length=128,
regex=r"^[\w.@+-]+$",
help_text=_("Required. 128 characters or fewer. Letters, digits and " "@/./+/-/_ only."),
error_messages={
"invalid": _(
"This value may contain only letters, numbers and " "@/./+/-/_ characters."
)
},
)
class UserAdmin(admin.ModelAdmin):
add_form_template = "admin/auth/user/add_form.html"
change_user_password_template = None
fieldsets = (
(None, {"fields": ("username", "password")}),
(_("Personal info"), {"fields": ("name", "email")}),
(_("Permissions"), {"fields": ("is_active", "is_staff", "is_superuser")}),
(_("Important dates"), {"fields": ("last_login", "date_joined")}),
)
add_fieldsets = (
(None, {"classes": ("wide",), "fields": ("username", "password1", "password2")}),
)
form = UserChangeForm
add_form = UserCreationForm
change_password_form = AdminPasswordChangeForm
list_display = ("username", "email", "name", "is_staff", "date_joined")
list_filter = ("is_staff", "is_superuser", "is_active", "is_managed")
search_fields = ("username", "name", "email")
ordering = ("username",)
inlines = (OrganizationUserInline, AuthIdentityInline)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(UserAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults.update(
{"form": self.add_form, "fields": admin.util.flatten_fieldsets(self.add_fieldsets)}
)
defaults.update(kwargs)
return super(UserAdmin, self).get_form(request, obj, **defaults)
def get_urls(self):
return [
url(r"^(\d+)/password/$", self.admin_site.admin_view(self.user_change_password))
] + super(UserAdmin, self).get_urls()
def lookup_allowed(self, lookup, value):
# See #20078: we don't want to allow any lookups involving passwords.
if lookup.startswith("password"):
return False
return super(UserAdmin, self).lookup_allowed(lookup, value)
@sensitive_post_parameters_m
@csrf_protect_m
@transaction.atomic
def add_view(self, request, form_url="", extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404(
'Your user does not have the "Change user" permission. In '
"order to add users, Django requires that your user "
'account have both the "Add user" and "Change user" '
"permissions set."
)
raise PermissionDenied
if extra_context is None:
extra_context = {}
username_field = self.model._meta.get_field(self.model.USERNAME_FIELD)
defaults = {"auto_populated_fields": (), "username_help_text": username_field.help_text}
extra_context.update(defaults)
return super(UserAdmin, self).add_view(request, form_url, extra_context)
@sensitive_post_parameters_m
def user_change_password(self, request, id, form_url=""):
if not self.has_change_permission(request):
raise PermissionDenied
user = get_object_or_404(self.queryset(request), pk=id)
if request.method == "POST":
form = self.change_password_form(user, request.POST)
if form.is_valid():
form.save()
msg = ugettext("Password changed successfully.")
messages.success(request, msg)
return HttpResponseRedirect("..")
else:
form = self.change_password_form(user)
fieldsets = [(None, {"fields": list(form.base_fields)})]
adminForm = admin.helpers.AdminForm(form, fieldsets, {})
context = {
"title": _("Change password: %s") % escape(user.get_username()),
"adminForm": adminForm,
"form_url": form_url,
"form": form,
"is_popup": "_popup" in request.GET,
"add": True,
"change": False,
"has_delete_permission": False,
"has_change_permission": True,
"has_absolute_url": False,
"opts": self.model._meta,
"original": user,
"save_as": False,
"show_save": True,
}
return TemplateResponse(
request,
self.change_user_password_template or "admin/auth/user/change_password.html",
context,
current_app=self.admin_site.name,
)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if "_addanother" not in request.POST and "_popup" not in request.POST:
request.POST["_continue"] = 1
return super(UserAdmin, self).response_add(request, obj, post_url_continue)
admin.site.register(User, UserAdmin)
class AuditLogEntryAdmin(admin.ModelAdmin):
list_display = ("event", "organization", "actor", "datetime")
list_filter = ("event", "datetime")
search_fields = ("actor__email", "organization__name", "organization__slug")
raw_id_fields = ("organization", "actor", "target_user")
readonly_fields = (
"organization",
"actor",
"actor_key",
"target_object",
"target_user",
"event",
"ip_address",
"data",
"datetime",
)
admin.site.register(AuditLogEntry, AuditLogEntryAdmin)
|
|
import unittest
from reactivex import Observable
from reactivex.disposable import SerialDisposable
from reactivex.operators import filter, filter_indexed
from reactivex.testing import ReactiveTest, TestScheduler, is_prime
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
def test_is_prime():
assert not is_prime(1)
assert is_prime(2)
assert is_prime(3)
assert not is_prime(4)
assert is_prime(5)
assert not is_prime(6)
class TestFilter(unittest.TestCase):
def test_filter_complete(self):
scheduler = TestScheduler()
invoked = [0]
xs = scheduler.create_hot_observable(
on_next(110, 1),
on_next(180, 2),
on_next(230, 3),
on_next(270, 4),
on_next(340, 5),
on_next(380, 6),
on_next(390, 7),
on_next(450, 8),
on_next(470, 9),
on_next(560, 10),
on_next(580, 11),
on_completed(600),
on_next(610, 12),
on_error(620, "ex"),
on_completed(630),
)
def create() -> Observable[int]:
def predicate(x: int) -> bool:
invoked[0] += 1
return is_prime(x)
return xs.pipe(filter(predicate))
results = scheduler.start(create)
assert results.messages == [
on_next(230, 3),
on_next(340, 5),
on_next(390, 7),
on_next(580, 11),
on_completed(600),
]
assert xs.subscriptions == [subscribe(200, 600)]
assert invoked[0] == 9
def test_filter_true(self):
scheduler = TestScheduler()
invoked = [0]
xs = scheduler.create_hot_observable(
on_next(110, 1),
on_next(180, 2),
on_next(230, 3),
on_next(270, 4),
on_next(340, 5),
on_next(380, 6),
on_next(390, 7),
on_next(450, 8),
on_next(470, 9),
on_next(560, 10),
on_next(580, 11),
on_completed(600),
)
def create() -> Observable[int]:
def predicate(x: int) -> bool:
invoked[0] += 1
return True
return xs.pipe(filter(predicate))
results = scheduler.start(create)
assert results.messages == [
on_next(230, 3),
on_next(270, 4),
on_next(340, 5),
on_next(380, 6),
on_next(390, 7),
on_next(450, 8),
on_next(470, 9),
on_next(560, 10),
on_next(580, 11),
on_completed(600),
]
assert xs.subscriptions == [subscribe(200, 600)]
assert invoked[0] == 9
def test_filter_false(self):
scheduler = TestScheduler()
invoked = [0]
xs = scheduler.create_hot_observable(
on_next(110, 1),
on_next(180, 2),
on_next(230, 3),
on_next(270, 4),
on_next(340, 5),
on_next(380, 6),
on_next(390, 7),
on_next(450, 8),
on_next(470, 9),
on_next(560, 10),
on_next(580, 11),
on_completed(600),
)
def create() -> Observable[int]:
def predicate(x: int) -> bool:
invoked[0] += 1
return False
return xs.pipe(filter(predicate))
results = scheduler.start(create)
assert results.messages == [on_completed(600)]
assert xs.subscriptions == [subscribe(200, 600)]
assert invoked[0] == 9
def test_filter_dispose(self):
scheduler = TestScheduler()
invoked = [0]
xs = scheduler.create_hot_observable(
on_next(110, 1),
on_next(180, 2),
on_next(230, 3),
on_next(270, 4),
on_next(340, 5),
on_next(380, 6),
on_next(390, 7),
on_next(450, 8),
on_next(470, 9),
on_next(560, 10),
on_next(580, 11),
on_completed(600),
)
def create():
def predicate(x: int) -> bool:
invoked[0] += 1
return is_prime(x)
return xs.pipe(filter(predicate))
results = scheduler.start(create, disposed=400)
assert results.messages == [on_next(230, 3), on_next(340, 5), on_next(390, 7)]
assert xs.subscriptions == [subscribe(200, 400)]
assert invoked[0] == 5
def test_filter_error(self):
scheduler = TestScheduler()
invoked = [0]
ex = "ex"
xs = scheduler.create_hot_observable(
on_next(110, 1),
on_next(180, 2),
on_next(230, 3),
on_next(270, 4),
on_next(340, 5),
on_next(380, 6),
on_next(390, 7),
on_next(450, 8),
on_next(470, 9),
on_next(560, 10),
on_next(580, 11),
on_error(600, ex),
on_next(610, 12),
on_error(620, "ex"),
on_completed(630),
)
def create():
def predicate(x: int) -> bool:
invoked[0] += 1
return is_prime(x)
return xs.pipe(filter(predicate))
results = scheduler.start(create)
assert results.messages == [
on_next(230, 3),
on_next(340, 5),
on_next(390, 7),
on_next(580, 11),
on_error(600, ex),
]
assert xs.subscriptions == [subscribe(200, 600)]
assert invoked[0] == 9
def test_filter_on_error(self):
scheduler = TestScheduler()
invoked = [0]
ex = "ex"
xs = scheduler.create_hot_observable(
on_next(110, 1),
on_next(180, 2),
on_next(230, 3),
on_next(270, 4),
on_next(340, 5),
on_next(380, 6),
on_next(390, 7),
on_next(450, 8),
on_next(470, 9),
on_next(560, 10),
on_next(580, 11),
on_completed(600),
on_next(610, 12),
on_error(620, "ex"),
on_completed(630),
)
def create():
def predicate(x: int) -> bool:
invoked[0] += 1
if x > 5:
raise Exception(ex)
return is_prime(x)
return xs.pipe(filter(predicate))
results = scheduler.start(create)
assert results.messages == [on_next(230, 3), on_next(340, 5), on_error(380, ex)]
assert xs.subscriptions == [subscribe(200, 380)]
assert invoked[0] == 4
def test_filter_dispose_in_predicate(self):
scheduler = TestScheduler()
invoked = [0]
ys = [None]
xs = scheduler.create_hot_observable(
on_next(110, 1),
on_next(180, 2),
on_next(230, 3),
on_next(270, 4),
on_next(340, 5),
on_next(380, 6),
on_next(390, 7),
on_next(450, 8),
on_next(470, 9),
on_next(560, 10),
on_next(580, 11),
on_completed(600),
on_next(610, 12),
on_error(620, "ex"),
on_completed(630),
)
results = scheduler.create_observer()
d = SerialDisposable()
def action(scheduler, state):
def predicate(x):
invoked[0] += 1
if x == 8:
d.dispose()
return is_prime(x)
ys[0] = xs.pipe(filter(predicate))
return ys[0]
scheduler.schedule_absolute(created, action)
def action1(scheduler, state):
d.disposable = ys[0].subscribe(results)
scheduler.schedule_absolute(subscribed, action1)
def action2(scheduler, state):
d.dispose()
scheduler.schedule_absolute(disposed, action2)
scheduler.start()
assert results.messages == [on_next(230, 3), on_next(340, 5), on_next(390, 7)]
assert xs.subscriptions == [subscribe(200, 450)]
assert invoked[0] == 6
def test_filter_indexed_complete(self):
scheduler = TestScheduler()
invoked = [0]
xs = scheduler.create_hot_observable(
on_next(110, 1),
on_next(180, 2),
on_next(230, 3),
on_next(270, 4),
on_next(340, 5),
on_next(380, 6),
on_next(390, 7),
on_next(450, 8),
on_next(470, 9),
on_next(560, 10),
on_next(580, 11),
on_completed(600),
on_next(610, 12),
on_error(620, "ex"),
on_completed(630),
)
def create():
def predicate(x, index):
invoked[0] += 1
return is_prime(x + index * 10)
return xs.pipe(filter_indexed(predicate))
results = scheduler.start(create)
assert results.messages == [on_next(230, 3), on_next(390, 7), on_completed(600)]
assert xs.subscriptions == [subscribe(200, 600)]
assert invoked[0] == 9
def test_filter_indexed_true(self):
scheduler = TestScheduler()
invoked = [0]
xs = scheduler.create_hot_observable(
on_next(110, 1),
on_next(180, 2),
on_next(230, 3),
on_next(270, 4),
on_next(340, 5),
on_next(380, 6),
on_next(390, 7),
on_next(450, 8),
on_next(470, 9),
on_next(560, 10),
on_next(580, 11),
on_completed(600),
)
def create():
def predicate(x, index):
invoked[0] += 1
return True
return xs.pipe(filter_indexed(predicate))
results = scheduler.start(create)
assert results.messages == [
on_next(230, 3),
on_next(270, 4),
on_next(340, 5),
on_next(380, 6),
on_next(390, 7),
on_next(450, 8),
on_next(470, 9),
on_next(560, 10),
on_next(580, 11),
on_completed(600),
]
assert xs.subscriptions == [subscribe(200, 600)]
assert invoked[0] == 9
def test_filter_indexed_false(self):
scheduler = TestScheduler()
invoked = [0]
xs = scheduler.create_hot_observable(
on_next(110, 1),
on_next(180, 2),
on_next(230, 3),
on_next(270, 4),
on_next(340, 5),
on_next(380, 6),
on_next(390, 7),
on_next(450, 8),
on_next(470, 9),
on_next(560, 10),
on_next(580, 11),
on_completed(600),
)
def create():
def predicate(x, index):
invoked[0] += 1
return False
return xs.pipe(filter_indexed(predicate))
results = scheduler.start(create)
assert results.messages == [on_completed(600)]
assert xs.subscriptions == [subscribe(200, 600)]
assert invoked[0] == 9
def test_filter_indexed_dispose(self):
scheduler = TestScheduler()
invoked = [0]
xs = scheduler.create_hot_observable(
on_next(110, 1),
on_next(180, 2),
on_next(230, 3),
on_next(270, 4),
on_next(340, 5),
on_next(380, 6),
on_next(390, 7),
on_next(450, 8),
on_next(470, 9),
on_next(560, 10),
on_next(580, 11),
on_completed(600),
)
def create():
def predicate(x, index):
invoked[0] += 1
return is_prime(x + index * 10)
return xs.pipe(filter_indexed(predicate))
results = scheduler.start(create, disposed=400)
assert results.messages == [on_next(230, 3), on_next(390, 7)]
assert xs.subscriptions == [subscribe(200, 400)]
assert invoked[0] == 5
def test_filter_indexed_error(self):
scheduler = TestScheduler()
invoked = [0]
ex = "ex"
xs = scheduler.create_hot_observable(
on_next(110, 1),
on_next(180, 2),
on_next(230, 3),
on_next(270, 4),
on_next(340, 5),
on_next(380, 6),
on_next(390, 7),
on_next(450, 8),
on_next(470, 9),
on_next(560, 10),
on_next(580, 11),
on_error(600, ex),
on_next(610, 12),
on_error(620, "ex"),
on_completed(630),
)
def create():
def predicate(x, index):
invoked[0] += 1
return is_prime(x + index * 10)
return xs.pipe(filter_indexed(predicate))
results = scheduler.start(create)
assert results.messages == [on_next(230, 3), on_next(390, 7), on_error(600, ex)]
assert xs.subscriptions == [subscribe(200, 600)]
assert invoked[0] == 9
def test_filter_indexed_on_error(self):
scheduler = TestScheduler()
invoked = [0]
ex = "ex"
xs = scheduler.create_hot_observable(
on_next(110, 1),
on_next(180, 2),
on_next(230, 3),
on_next(270, 4),
on_next(340, 5),
on_next(380, 6),
on_next(390, 7),
on_next(450, 8),
on_next(470, 9),
on_next(560, 10),
on_next(580, 11),
on_completed(600),
on_next(610, 12),
on_error(620, "ex"),
on_completed(630),
)
def create():
def predicate(x, index):
invoked[0] += 1
if x > 5:
raise Exception(ex)
return is_prime(x + index * 10)
return xs.pipe(filter_indexed(predicate))
results = scheduler.start(create)
assert results.messages == [on_next(230, 3), on_error(380, ex)]
assert xs.subscriptions == [subscribe(200, 380)]
assert invoked[0] == 4
def test_filter_indexed_dispose_in_predicate(self):
scheduler = TestScheduler()
ys = [None]
invoked = [0]
xs = scheduler.create_hot_observable(
on_next(110, 1),
on_next(180, 2),
on_next(230, 3),
on_next(270, 4),
on_next(340, 5),
on_next(380, 6),
on_next(390, 7),
on_next(450, 8),
on_next(470, 9),
on_next(560, 10),
on_next(580, 11),
on_completed(600),
on_next(610, 12),
on_error(620, "ex"),
on_completed(630),
)
results = scheduler.create_observer()
d = SerialDisposable()
def action1(scheduler, state):
def predicate(x, index):
invoked[0] += 1
if x == 8:
d.dispose()
return is_prime(x + index * 10)
ys[0] = xs.pipe(filter_indexed(predicate))
scheduler.schedule_absolute(created, action1)
def action2(scheduler, state):
d.disposable = ys[0].subscribe(results)
scheduler.schedule_absolute(subscribed, action2)
def action3(scheduler, state):
d.dispose()
scheduler.schedule_absolute(disposed, action3)
scheduler.start()
assert results.messages == [on_next(230, 3), on_next(390, 7)]
assert xs.subscriptions == [subscribe(200, 450)]
assert invoked[0] == 6
|
|
import requests
import bs4
import datetime
import json
import sys
parser = ''
try:
import lxml
parser = 'lxml'
except ImportError:
parser = 'html.parser'
try:
with open(sys.path[0] + '/proxy.config', 'r') as f:
proxies = f.read()
proxy_dict = {"http": proxies,
"https": proxies,
}
except:
import urllib
proxy_dict = urllib.getproxies()
months = {
1: "JANUARY",
2: "FEBRUARY",
3: "MARCH",
4: "APRIL",
5: "MAY",
6: "JUNE",
7: "JULY",
8: "AUGUST",
9: "SEPTEMBER",
10: "OCTOBER",
11: "NOVEMBER",
12: "DECEMBER"
}
class Barclay(object):
def get_news_headlines(self, get_club_news=False, type_return='string'):
"""
Parameters
----------
get_club_news : Bool, default to False
If true , returns news about clubs too.
type_return : String, to specify the return type
Defaults to `string`
Returns
-------
String : A collection of news headlines and links
"""
url = "http://www.premierleague.com/en-gb.html"
res = requests.get(url, stream=True, proxies=proxy_dict)
soup = bs4.BeautifulSoup(res.text, parser)
all_updated = False
news_headline = []
news_list = soup.select('.newsfeaturetitle')
for i in range(len(news_list)):
news_headline.append(str(news_list[i].text))
news_url = []
urls = soup.select('.newsfeature a')
for i in urls:
news_url.append("http://www.premierleague.com/" + i.get('href'))
if get_club_news is True:
news_list = soup.select('.feed li a')
for i in range(len(news_list)):
news_headline.append(str(news_list[i].text))
urls = soup.select('.feed li a')
for i in urls:
news_url.append(i.get('href'))
return_dict = {}
for i, name in enumerate(news_headline):
return_dict[name] = news_url[i]
if type_return == 'dict':
return return_dict
return str(return_dict)
def next3Fixtures(self, type_return='string'):
now = datetime.datetime.now()
url = "http://www.premierleague.com/en-gb/matchday/league-table.html?season=2015-2016&month=" +\
months[now.month] + \
"&timelineView=date&toDate=1451433599999&tableView=NEXT_3_FIXTURES"
team_names = soup(template='.next3FixturesTable')
for i in range(len(team_names)):
team_names[i] = str(team_names[i].text)
next_3_fixtures = soup.select('.club-row .col-fixture')
for i in range(len(next_3_fixtures)):
next_3_fixtures[i] = str(next_3_fixtures[i].text)
return_dict = {}
for i in range(len(team_names)):
return_dict[team_names[i]] = next_3_fixtures[i]
if type_return == 'dict':
return return_dict
return str(return_dict)
def pointsTable(self, type_return='string'):
url = 'http://www.premierleague.com/en-gb/matchday/league-table.html'
res = requests.get(url, stream=True, proxies=proxy_dict)
soup = bs4.BeautifulSoup(res.text, parser)
team_name = soup(template='.leagueTable-Club')
for i in range(len(team_name)):
team_name[i] = str(team_name[i].text)
matches_played = soup(template='.leagueTable-P')
for i in range(len(matches_played)):
matches_played[i] = str(matches_played[i].text)
matches_won = soup(template='.leagueTable-W')
for i in range(len(matches_won)):
matches_won[i] = str(matches_won[i].text)
matches_drew = soup(template='.leagueTable-D')
for i in range(len(matches_drew)):
matches_drew[i] = str(matches_drew[i].text)
matches_lost = soup(template='.leagueTable-L')
for i in range(len(matches_lost)):
matches_lost[i] = str(matches_lost[i].text)
goals_difference = soup(template='.leagueTable-GD')
for i in range(len(goals_difference)):
goals_difference[i] = str(goals_difference[i].text)
points = soup(template='.leagueTable-Pts')
for i in range(len(points)):
points[i] = str(points[i].text)
return_dict = {}
for i in range(len(team_name)):
return_dict[team_name[i]] = [matches_played[i], matches_won[
i], matches_drew[i], matches_lost[i], goals_difference[i], points[i]]
if type_return == 'dict':
return return_dict
return str(return_dict)
def topScorers(self, type_return='string'):
url = "http://www.premierleague.com/en-gb.html"
res = requests.get(url, stream=True, proxies=proxy_dict)
soup = bs4.BeautifulSoup(res.text, parser)
top_scorers = soup.select(
'.statsranking-topscorers .statsranking-table .statsranking-name a')
for i in range(len(top_scorers)):
top_scorers[i] = str(top_scorers[i].text)
top_scorers_goals = []
top_scorers_temp = soup.select(
'.statsranking-topscorers .statsranking-table tbody tr td')
for i in range(2, len(top_scorers_temp), 3):
top_scorers_goals.append(str(top_scorers_temp[i].text))
return_dict = {}
for i in range(len(top_scorers)):
return_dict[top_scorers[i]] = top_scorers_goals[i]
if type_return == 'dict':
return return_dict
return str(return_dict)
def Fixtures(self, type_return='string'):
url = "http://www.premierleague.com/en-gb/matchday/matches.html?paramClubId=ALL¶mComp_8=true&view=.dateSeason"
res = requests.get(url, stream=True, proxies=proxy_dict)
soup = bs4.BeautifulSoup(res.text, parser)
fixtures_time = []
fixtures_location = []
fixtures_clubs = []
fixture_table = soup.select('.contentTable')
for tables in fixture_table:
date = tables.select('th')
date[0] = str(date[0].text)
fixtures_t = tables.select('.time')
for i in range(len(fixtures_t)):
fixtures_time.append(str(fixtures_t[i].text) + ', ' + date[0])
fixtures_c = tables.select('.clubs a')
for i in range(len(fixtures_c)):
fixtures_clubs.append(str(fixtures_c[i].text))
fixtures_l = tables.select('.location a')
for i in range(len(fixtures_l)):
fixtures_location.append(str(fixtures_l[i].text))
temp = list(zip(fixtures_clubs, fixtures_time, fixtures_location))
if type_return == 'dict':
return temp
return str(temp)
def Results(self, type_return='string'):
url = "http://www.premierleague.com/en-gb.html"
res = requests.get(url, stream=True, proxies=proxy_dict)
soup = bs4.BeautifulSoup(res.text, parser)
results_time = soup.select('.megamenu-date span')
for i in range(len(results_time)):
results_time[i] = str(results_time[i].text)
results_time = results_time[0:20]
results_time.reverse()
results_clubs = soup.select('.megamenu-matchName span')
for i in range(len(results_clubs)):
results_clubs[i] = str(results_clubs[i].text)
results_clubs = results_clubs[0:60]
results_clubs_temp = []
for i in range(20):
j = i * 3
results_clubs_temp.append(
[results_clubs[j], results_clubs[j + 1], results_clubs[j + 2]])
results_clubs = results_clubs_temp
results_clubs.reverse()
results_location = soup.select('.megamenu-venue')
for i in range(len(results_location)):
results_location[i] = str(results_location[i].text)
results_location = results_location[0:20]
results_location.reverse()
val_return = list(zip(results_time, results_clubs, results_location))
if type_return == 'dict':
return val_return
return str(val_return)
def liveScore(self, type_return='string'):
self.url = 'http://www.premierleague.com/en-gb.html'
self.res = requests.get(self.url, stream=True, proxies=proxy_dict)
self.soup = bs4.BeautifulSoup(self.res.text, parser)
matches = self.soup.select('.LIVE .MEGAMENU-MATCHnAME')
live_matches = []
for i in matches:
temp = i.text.split()
temp = ' '.join(temp)
live_matches.append(temp)
if type_return == 'dict':
return live_matches
return str(live_matches)
def playerStats(self, name, type_return='string'):
try:
self.url = 'http://www.premierleague.com/en-gb/players/profile.html/' + name
self.res = requests.get(self.url, stream=True, proxies=proxy_dict)
self.soup = bs4.BeautifulSoup(self.res.text, parser)
stats = self.soup.select('.left td')
temp = []
statsDict = {}
for i in stats:
temp.append(i.text)
statsDict[temp[0]] = temp[1]
statsDict[temp[8]] = temp[9]
statsDict[temp[12]] = temp[13]
statsDict[temp[16]] = temp[17]
if type_return == 'dict':
return statsDict
return str(statsDict)
except:
raise ValueError('Name not found, enter a valid name of player!')
if __name__ == '__main__':
obj = Barclay()
print(obj.get_news_headlines(type_return='dict'))
print(obj.pointsTable('dict'))
print(obj.topScorers('dict'))
print(obj.Fixtures())
print(obj.Results())
print(obj.playerStats('harry-kane'))
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import json
import functools
import itertools
import resource
import contextlib
import time
import sys
import lupa
from splash.browser_tab import JsError
from splash.lua_runner import (
BaseScriptRunner,
ScriptError,
ImmediateResult,
AsyncCommand
)
from splash.qtrender import RenderScript, stop_on_error
from splash.lua import get_main, get_main_sandboxed
from splash.har.qt import reply2har, request2har
from splash.render_options import BadOption, RenderOptions
from splash.utils import truncated, BinaryCapsule
from splash.qtutils import (
REQUEST_ERRORS_SHORT,
drop_request,
set_request_url,
create_proxy
)
from splash.lua_runtime import SplashLuaRuntime
class AsyncBrowserCommand(AsyncCommand):
def __repr__(self):
kwargs = self.kwargs.copy()
if 'callback' in kwargs:
kwargs['callback'] = '<a callback>'
if 'errback' in kwargs:
kwargs['errback'] = '<an errback>'
kwargs_repr = truncated(repr(kwargs), 400, "...[long kwargs truncated]")
return "%s(id=%r, name=%r, kwargs=%s)" % (self.__class__.__name__, self.id, self.name, kwargs_repr)
def command(async=False, can_raise_async=False, table_argument=False,
sets_callback=False):
""" Decorator for marking methods as commands available to Lua """
if sets_callback:
table_argument = True
def decorator(meth):
if not table_argument:
meth = lupa.unpacks_lua_table_method(meth)
if sets_callback:
meth = first_argument_from_storage(meth)
meth = exceptions_as_return_values(
can_raise(
emits_lua_objects(meth)
)
)
meth._is_command = True
meth._is_async = async
meth._can_raise_async = can_raise_async
meth._sets_callback = sets_callback
return meth
return decorator
def lua_property(name):
""" Decorator for marking methods that make attributes available to Lua """
def decorator(meth):
def setter(method):
meth._setter_method = method.__name__
return method
meth._is_lua_property = True
meth._name = name
meth.lua_setter = setter
return meth
return decorator
def emits_lua_objects(meth):
"""
This decorator makes method convert results to
native Lua formats when possible
"""
@functools.wraps(meth)
def wrapper(self, *args, **kwargs):
res = meth(self, *args, **kwargs)
py2lua = self.lua.python2lua
if isinstance(res, tuple):
return tuple(py2lua(r) for r in res)
else:
return py2lua(res)
return wrapper
def first_argument_from_storage(meth):
"""
Methods decorated with ``first_argument_from_storage`` decorator
take a value from self.tmp_storage and use it
as a first argument. It is a workaround for Lupa issue
(see https://github.com/scoder/lupa/pull/49).
"""
@functools.wraps(meth)
def wrapper(self, *args, **kwargs):
arg = self.tmp_storage[1]
del self.tmp_storage[1]
return meth(self, arg, *args, **kwargs)
return wrapper
def is_command(meth):
""" Return True if method is an exposed Lua command """
return getattr(meth, '_is_command', False)
def is_lua_property(meth):
""" Return True if method is exposed to an Lua attribute """
return getattr(meth, '_is_lua_property', False)
def can_raise(meth):
"""
Decorator for preserving Python exceptions raised in Python
methods called from Lua.
"""
@functools.wraps(meth)
def wrapper(self, *args, **kwargs):
try:
return meth(self, *args, **kwargs)
except ScriptError as e:
self._exceptions.append(e)
raise
except BaseException as e:
self._exceptions.append(ScriptError(e))
raise
return wrapper
def exceptions_as_return_values(meth):
"""
Decorator for allowing Python exceptions to be caught from Lua.
It makes wrapped methods return ``True, result`` and ``False, repr(exception)``
pairs instead of raising an exception; Lua script should handle it itself
and raise an error when needed. In Splash this is done by
splash/lua_modules/splash.lua unwraps_errors decorator.
"""
@functools.wraps(meth)
def wrapper(self, *args, **kwargs):
try:
result = meth(self, *args, **kwargs)
if isinstance(result, tuple):
return (True,) + result
else:
return True, result
except Exception as e:
return False, repr(e)
wrapper._returns_error_flag = True
return wrapper
def get_commands(obj):
"""
Inspect a Python object and get a dictionary of all its commands
which was made available to Lua using @command decorator.
"""
commands = {}
for name in dir(obj):
value = getattr(obj, name)
if is_command(value):
commands[name] = {
'is_async': getattr(value, '_is_async'),
'returns_error_flag': getattr(value, '_returns_error_flag', False),
'can_raise_async': getattr(value, '_can_raise_async', False),
'sets_callback': getattr(value, '_sets_callback', False),
}
return commands
def get_lua_properties(obj):
"""
Inspect a Python object and get a dictionary of all lua properties, their
getter and setter methods which were made available to Lua using
@lua_property and @<getter_method_name>.lua_setter decorators.
"""
lua_properties = {}
for name in dir(obj):
value = getattr(obj, name)
if is_lua_property(value):
setter_method = getattr(value, '_setter_method')
lua_properties[setter_method] = {
'name': getattr(value, '_name'),
'getter_method': name,
}
return lua_properties
class _WrappedJavascriptFunction(object):
"""
JavaScript functions wrapper. It allows to call JS functions
with arguments.
"""
def __init__(self, splash, source):
"""
:param splash.browser_tab.BrowserTab tab: BrowserTab object
:param str source: function source code
"""
self.lua = splash.lua
self.tab = splash.tab
self.source = source
self._exceptions = splash._exceptions
@exceptions_as_return_values
@can_raise
@emits_lua_objects
def __call__(self, *args):
args = self.lua.lua2python(args)
args_text = json.dumps(args, ensure_ascii=False, encoding="utf8")[1:-1]
func_text = json.dumps([self.source], ensure_ascii=False, encoding='utf8')[1:-1]
wrapper_script = """
(function(func_text){
try{
var func = eval("(" + func_text + ")");
return {
result: func(%(args)s),
error: false,
}
}
catch(e){
return {
error: true,
error_repr: e.toString(),
}
}
})(%(func_text)s)
""" % {"func_text": func_text, "args": args_text}
# print(wrapper_script)
res = self.tab.evaljs(wrapper_script)
if not isinstance(res, dict):
raise ScriptError("[lua] unknown error during JS function call: %r; %r" % (res, wrapper_script))
if res["error"]:
raise ScriptError("[lua] error during JS function call: %r" % (res.get("error_repr", "<unknown error>"),))
return res.get("result")
class Splash(object):
"""
This object is passed to Lua script as an argument to 'main' function
(wrapped in 'Splash' Lua object; see :file:`splash/lua_modules/splash.lua`).
"""
_result_content_type = None
_attribute_whitelist = ['commands', 'args', 'tmp_storage',
'lua_properties']
def __init__(self, lua, tab, render_options=None):
"""
:param SplashLuaRuntime lua: Lua wrapper
:param splash.browser_tab.BrowserTab tab: BrowserTab object
:param splash.render_options.RenderOptions render_options: arguments
"""
self.tab = tab
self.lua = lua
self._exceptions = []
self._command_ids = itertools.count()
if isinstance(render_options, RenderOptions):
self.args = self.lua.python2lua(render_options.data)
elif isinstance(render_options, dict):
self.args = self.lua.python2lua(render_options)
elif render_options is None:
self.args = self.lua.python2lua({})
else:
raise ValueError("Invalid render_options type: %s" % render_options.__class__)
commands = get_commands(self)
self.commands = self.lua.python2lua(commands)
lua_properties = get_lua_properties(self)
self.lua_properties = self.lua.python2lua(lua_properties)
lua_attr_getters = [
lua_properties[attr]['getter_method'] for attr in lua_properties]
self.attr_whitelist = (list(commands.keys()) +
list(lua_properties.keys()) +
lua_attr_getters +
self._attribute_whitelist)
self.lua.add_allowed_object(self, self.attr_whitelist)
self.tmp_storage = self.lua.table_from({})
wrapper = self.lua.eval("require('splash')")
self._wrapped = wrapper._create(self)
self._result_headers = []
def init_dispatcher(self, return_func):
"""
:param callable return_func: function that continues the script
"""
self._return = return_func
@lua_property('js_enabled')
def get_js_enabled(self):
return self.tab.get_js_enabled()
@get_js_enabled.lua_setter
def set_js_enabled(self, value):
self.tab.set_js_enabled(value)
@command(async=True)
def wait(self, time, cancel_on_redirect=False, cancel_on_error=True):
time = float(time)
if time < 0:
raise BadOption("splash:wait time can't be negative")
cmd_id = next(self._command_ids)
def success():
self._return(cmd_id, True)
def redirect(error_info):
self._return(cmd_id, None, 'redirect')
def error(error_info):
self._return(cmd_id, None, self._error_info_to_lua(error_info))
return AsyncBrowserCommand(cmd_id, "wait", dict(
time_ms = time*1000,
callback = success,
onredirect = redirect if cancel_on_redirect else False,
onerror = error if cancel_on_error else False,
))
@command(async=True)
def go(self, url, baseurl=None, headers=None):
if url is None:
raise ScriptError("'url' is required for splash:go")
if self.tab.web_page.navigation_locked:
return ImmediateResult((None, "navigation_locked"))
cmd_id = next(self._command_ids)
def success():
try:
code = self.tab.last_http_status()
if code and 400 <= code < 600:
# return HTTP errors as errors
self._return(cmd_id, None, "http%d" % code)
else:
self._return(cmd_id, True)
except Exception as e:
self._return(cmd_id, None, "internal_error")
def error(error_info):
self._return(cmd_id, None, self._error_info_to_lua(error_info))
return AsyncBrowserCommand(cmd_id, "go", dict(
url=url,
baseurl=baseurl,
callback=success,
errback=error,
headers=self.lua.lua2python(headers, max_depth=3),
))
@command()
def html(self):
return self.tab.html()
@command()
def png(self, width=None, height=None, render_all=False,
scale_method=None):
if width is not None:
width = int(width)
if height is not None:
height = int(height)
result = self.tab.png(width, height, b64=False, render_all=render_all,
scale_method=scale_method)
return BinaryCapsule(result)
@command()
def har(self):
return self.tab.har()
@command()
def history(self):
return self.tab.history()
@command()
def stop(self):
self.tab.stop_loading()
@command()
def evaljs(self, snippet):
return self.tab.evaljs(snippet)
@command()
def runjs(self, snippet):
try:
self.tab.runjs(snippet)
return True
except JsError as e:
return None, e.args[0]
@command(async=True, can_raise_async=True)
def wait_for_resume(self, snippet, timeout=0):
cmd_id = next(self._command_ids)
def callback(result):
self._return(cmd_id, self.lua.python2lua(result))
def errback(msg, raise_):
self._return(cmd_id, None, "JavaScript error: %s" % msg, raise_)
return AsyncBrowserCommand(cmd_id, "wait_for_resume", dict(
js_source=snippet,
callback=callback,
errback=errback,
timeout=timeout,
))
@command()
def private_jsfunc(self, func):
return _WrappedJavascriptFunction(self, func)
@command(async=True)
def http_get(self, url, headers=None, follow_redirects=True):
if url is None:
raise ScriptError("'url' is required for splash:http_get")
cmd_id = next(self._command_ids)
def callback(reply):
reply_har = reply2har(reply, include_content=True, binary_content=True)
self._return(cmd_id, self.lua.python2lua(reply_har))
return AsyncBrowserCommand(cmd_id, "http_get", dict(
url=url,
callback=callback,
headers=self.lua.lua2python(headers, max_depth=3),
follow_redirects=follow_redirects,
))
@command(async=True)
def autoload(self, source_or_url=None, source=None, url=None):
if len([a for a in [source_or_url, source, url] if a is not None]) != 1:
raise ScriptError("splash:autoload requires a single argument")
if source_or_url is not None:
source_or_url = source_or_url.strip()
if source_or_url.startswith(("http://", "https://")):
source, url = None, source_or_url
else:
source, url = source_or_url, None
if source is not None:
# load source directly
self.tab.autoload(source)
return ImmediateResult(True)
else:
# load JS from a remote resource
cmd_id = next(self._command_ids)
def callback(reply):
if reply.error():
reason = REQUEST_ERRORS_SHORT.get(reply.error(), '?')
self._return(cmd_id, None, reason)
else:
source = bytes(reply.readAll())
self.tab.autoload(source)
self._return(cmd_id, True)
return AsyncBrowserCommand(cmd_id, "http_get", dict(
url=url,
callback=callback
))
@command(async=True)
def set_content(self, data, mime_type=None, baseurl=None):
cmd_id = next(self._command_ids)
def success():
self._return(cmd_id, True)
def error(error_info):
self._return(cmd_id, None, self._error_info_to_lua(error_info))
return AsyncBrowserCommand(cmd_id, "set_content", dict(
data=data,
baseurl=baseurl,
mime_type=mime_type,
callback=success,
errback=error,
))
@command()
def lock_navigation(self):
self.tab.lock_navigation()
@command()
def unlock_navigation(self):
self.tab.unlock_navigation()
@command()
def get_cookies(self):
return self.tab.get_cookies()
@command()
def clear_cookies(self):
return self.tab.clear_cookies()
@command(table_argument=True)
def init_cookies(self, cookies):
cookies = self.lua.lua2python(cookies, max_depth=3)
if isinstance(cookies, dict):
keys = sorted(cookies.keys())
cookies = [cookies[k] for k in keys]
return self.tab.init_cookies(cookies)
@command()
def delete_cookies(self, name=None, url=None):
return self.tab.delete_cookies(name=name, url=url)
@command()
def add_cookie(self, name, value, path=None, domain=None, expires=None,
httpOnly=None, secure=None):
cookie = dict(name=name, value=value)
if path is not None:
cookie["path"] = path
if domain is not None:
cookie["domain"] = domain
if expires is not None:
cookie["expires"] = expires
if httpOnly is not None:
cookie["httpOnly"] = httpOnly
if secure is not None:
cookie["secure"] = secure
return self.tab.add_cookie(cookie)
@command()
def set_result_content_type(self, content_type):
if not isinstance(content_type, basestring):
raise ScriptError("splash:set_result_content_type() argument must be a string")
self._result_content_type = content_type
@command()
def set_result_header(self, name, value):
if not all([isinstance(h, basestring) for h in [name, value]]):
raise ScriptError("splash:set_result_header() arguments must be strings")
try:
name = name.decode('utf-8').encode('ascii')
value = value.decode('utf-8').encode('ascii')
except UnicodeEncodeError:
raise ScriptError("splash:set_result_header() arguments must be ascii")
header = (name, value)
self._result_headers.append(header)
@command()
def set_user_agent(self, value):
if not isinstance(value, basestring):
raise ScriptError("splash:set_user_agent() argument must be a string")
self.tab.set_user_agent(value)
@command(table_argument=True)
def set_custom_headers(self, headers):
self.tab.set_custom_headers(self.lua.lua2python(headers, max_depth=3))
@command()
def get_viewport_size(self):
sz = self.tab.web_page.viewportSize()
return sz.width(), sz.height()
@command()
def set_viewport_size(self, width, height):
self.tab.set_viewport('%dx%d' % (width, height))
@command()
def set_viewport_full(self):
return tuple(self.tab.set_viewport('full'))
@lua_property('images_enabled')
def get_images_enabled(self):
return self.tab.get_images_enabled()
@get_images_enabled.lua_setter
@command()
def set_images_enabled(self, enabled):
if enabled is not None:
self.tab.set_images_enabled(int(enabled))
@command()
def status_code(self):
return self.tab.last_http_status()
@command()
def url(self):
return self.tab.url
@command()
def get_perf_stats(self):
""" Return performance-related statistics. """
rusage = resource.getrusage(resource.RUSAGE_SELF)
# on Mac OS X ru_maxrss is in bytes, on Linux it is in KB
rss_mul = 1 if sys.platform == 'darwin' else 1024
return {'maxrss': rusage.ru_maxrss * rss_mul,
'cputime': rusage.ru_utime + rusage.ru_stime,
'walltime': time.time()}
@command(sets_callback=True)
def private_on_request(self, callback):
"""
Register a Lua callback to be called when a resource is requested.
"""
def py_callback(request, operation, outgoing_data):
with wrapped_request(self.lua, request, operation, outgoing_data) as req:
callback(req)
self.tab.register_callback("on_request", py_callback)
return True
def _error_info_to_lua(self, error_info):
if error_info is None:
return "error"
return "%s%s" % (error_info.type.lower(), error_info.code)
def get_real_exception(self):
if self._exceptions:
return self._exceptions[-1]
def clear_exceptions(self):
self._exceptions[:] = []
def result_content_type(self):
if self._result_content_type is None:
return None
return str(self._result_content_type)
def result_headers(self):
return self._result_headers
def get_wrapped(self):
""" Return a Lua wrapper for this object. """
return self._wrapped
def run_async_command(self, cmd):
""" Execute _AsyncCommand """
meth = getattr(self.tab, cmd.name)
return meth(**cmd.kwargs)
@contextlib.contextmanager
def wrapped_request(lua, request, operation, outgoing_data):
"""
Context manager which returns a wrapped QNetworkRequest
suitable for using in Lua code.
"""
req = _WrappedRequest(lua, request, operation, outgoing_data)
try:
with lua.object_allowed(req, req.attr_whitelist):
yield req
finally:
req.clear()
def _requires_request(meth):
@functools.wraps(meth)
def wrapper(self, *args, **kwargs):
if self.request is None:
raise ValueError("request is used outside a callback")
return meth(self, *args, **kwargs)
return wrapper
class _WrappedRequest(object):
""" QNetworkRequest wrapper for Lua """
_attribute_whitelist = ['info', 'commands']
def __init__(self, lua, request, operation, outgoing_data):
self.request = request
self.lua = lua
self.info = self.lua.python2lua(
request2har(request, operation, outgoing_data)
)
commands = get_commands(self)
self.commands = self.lua.python2lua(commands)
self.attr_whitelist = list(commands.keys()) + self._attribute_whitelist
self._exceptions = []
def clear(self):
self.request = None
self.lua = None
@command()
@_requires_request
def abort(self):
drop_request(self.request)
@command()
@_requires_request
def set_url(self, url):
set_request_url(self.request, url)
@command()
@_requires_request
def set_proxy(self, host, port, username=None, password=None):
proxy = create_proxy(host, port, username, password)
self.request.custom_proxy = proxy
@command()
@_requires_request
def set_header(self, name, value):
self.request.setRawHeader(name, value)
class SplashScriptRunner(BaseScriptRunner):
"""
An utility class for running Lua coroutines that interact with Splash.
"""
def __init__(self, lua, splash, log, sandboxed):
self.splash = splash
self.splash.init_dispatcher(self.dispatch)
super(SplashScriptRunner, self).__init__(lua=lua, log=log, sandboxed=sandboxed)
def start(self, main_coro, return_result, return_error):
self.return_result = return_result
self.return_error = return_error
self.splash.clear_exceptions()
super(SplashScriptRunner, self).start(main_coro, [self.splash.get_wrapped()])
def on_result(self, result):
self.return_result((result, self.splash.result_content_type(), self.splash.result_headers()))
def on_async_command(self, cmd):
self.splash.run_async_command(cmd)
def on_lua_error(self, lua_exception):
ex = self.splash.get_real_exception()
if not ex:
return
self.log("[lua] LuaError is caused by %r" % ex)
if isinstance(ex, ScriptError):
ex.enrich_from_lua_error(lua_exception)
raise ex
@stop_on_error
def dispatch(self, cmd_id, *args):
super(SplashScriptRunner, self).dispatch(cmd_id, *args)
class LuaRender(RenderScript):
default_min_log_level = 2
@stop_on_error
def start(self, lua_source, sandboxed, lua_package_path,
lua_sandbox_allowed_modules):
self.log(lua_source)
self.sandboxed = sandboxed
self.lua = SplashLuaRuntime(
sandboxed=sandboxed,
lua_package_path=lua_package_path,
lua_sandbox_allowed_modules=lua_sandbox_allowed_modules
)
self.splash = Splash(self.lua, self.tab, self.render_options)
self.runner = SplashScriptRunner(
lua=self.lua,
splash=self.splash,
log=self.log,
sandboxed=sandboxed,
)
try:
main_coro = self.get_main(lua_source)
except (ValueError, lupa.LuaSyntaxError, lupa.LuaError) as e:
raise ScriptError("lua_source: " + repr(e))
self.runner.start(
main_coro=main_coro,
return_result=self.return_result,
return_error=self.return_error,
)
def get_main(self, lua_source):
if self.sandboxed:
main, env = get_main_sandboxed(self.lua, lua_source)
else:
main, env = get_main(self.lua, lua_source)
return self.lua.create_coroutine(main)
|
|
import logging
import re
import sys
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from unittest import skipIf, skipUnless
from xml.dom.minidom import Node, parseString
from django.apps import apps
from django.conf import UserSettingsHolder, settings
from django.core import mail
from django.core.signals import request_started
from django.core.urlresolvers import get_script_prefix, set_script_prefix
from django.db import reset_queries
from django.http import request
from django.template import Template
from django.test.signals import setting_changed, template_rendered
from django.utils import six
from django.utils.decorators import ContextDecorator
from django.utils.encoding import force_str
from django.utils.translation import deactivate
try:
import jinja2
except ImportError:
jinja2 = None
__all__ = (
'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner',
'modify_settings', 'override_settings',
'requires_tz_support',
'setup_test_environment', 'teardown_test_environment',
)
TZ_SUPPORT = hasattr(time, 'tzset')
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, six.string_types):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
keys = set()
for subcontext in self:
for dict in subcontext:
keys |= set(dict.keys())
return keys
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template._original_render = Template._render
Template._render = instrumented_test_render
# Storing previous values in the secure module itself is problematic.
# Store them in arbitrary (but related) modules instead. See #20636.
mail._original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
request._original_allowed_hosts = settings.ALLOWED_HOSTS
settings.ALLOWED_HOSTS = ['*']
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template._original_render
del Template._original_render
settings.EMAIL_BACKEND = mail._original_email_backend
del mail._original_email_backend
settings.ALLOWED_HOSTS = request._original_allowed_hosts
del request._original_allowed_hosts
del mail.outbox
def get_runner(settings, test_runner_class=None):
if not test_runner_class:
test_runner_class = settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, force_str(test_path[-1]))
test_runner = getattr(test_module, test_path[-1])
return test_runner
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import SimpleTestCase
if isinstance(test_func, type):
if not issubclass(test_func, SimpleTestCase):
raise Exception(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
self.save_options(test_func)
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = dict(
test_func._overridden_settings, **self.options)
def enable(self):
# Keep this code at the beginning to leave the secure unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if 'INSTALLED_APPS' in self.options:
try:
apps.set_installed_apps(self.options['INSTALLED_APPS'])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=True)
def disable(self):
if 'INSTALLED_APPS' in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
for key in self.options:
new_value = getattr(settings, key, None)
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=False)
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase.setUpClass.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = list(
test_func._modified_settings) + self.operations
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase.setUpClass, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except KeyError:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, six.string_types):
items = [items]
if action == 'append':
value = value + [item for item in items if item not in value]
elif action == 'prepend':
value = [item for item in items if item not in value] + value
elif action == 'remove':
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super(modify_settings, self).enable()
def override_system_checks(new_checks, deployment_checks=None):
""" Acts as a decorator. Overrides list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks. """
from django.core.checks.registry import registry
def outer(test_func):
@wraps(test_func)
def inner(*args, **kwargs):
old_checks = registry.registered_checks
registry.registered_checks = new_checks
old_deployment_checks = registry.deployment_checks
if deployment_checks is not None:
registry.deployment_checks = deployment_checks
try:
return test_func(*args, **kwargs)
finally:
registry.registered_checks = old_checks
registry.deployment_checks = old_deployment_checks
return inner
return outer
def compare_xml(want, got):
"""Tries to do a 'xml-comparison' of want and got. Plain string
comparison doesn't always work because, for example, attribute
ordering should not be important. Comment nodes are not considered in the
comparison.
Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join(c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE)
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want, got = strip_quotes(want, got)
want = want.replace('\\n', '\n')
got = got.replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
def strip_quotes(want, got):
"""
Strip quotes of doctests output values:
>>> strip_quotes("'foo'")
"foo"
>>> strip_quotes('"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return (len(s) >= 2
and s[0] == s[-1]
and s[0] in ('"', "'"))
def is_quoted_unicode(s):
s = s.strip()
return (len(s) >= 3
and s[0] == 'u'
and s[1] == s[-1]
and s[1] in ('"', "'"))
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
def str_prefix(s):
return s % {'_': '' if six.PY3 else 'u'}
class CaptureQueriesContext(object):
"""
Context manager that captures queries executed by the specified connection.
"""
def __init__(self, connection):
self.connection = connection
def __iter__(self):
return iter(self.captured_queries)
def __getitem__(self, index):
return self.captured_queries[index]
def __len__(self):
return len(self.captured_queries)
@property
def captured_queries(self):
return self.connection.queries[self.initial_queries:self.final_queries]
def __enter__(self):
self.force_debug_cursor = self.connection.force_debug_cursor
self.connection.force_debug_cursor = True
self.initial_queries = len(self.connection.queries_log)
self.final_queries = None
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.force_debug_cursor = self.force_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
self.final_queries = len(self.connection.queries_log)
class ignore_warnings(object):
def __init__(self, **kwargs):
self.ignore_kwargs = kwargs
if 'message' in self.ignore_kwargs or 'module' in self.ignore_kwargs:
self.filter_func = warnings.filterwarnings
else:
self.filter_func = warnings.simplefilter
def __call__(self, decorated):
if isinstance(decorated, type):
# A class is decorated
saved_setUp = decorated.setUp
saved_tearDown = decorated.tearDown
def setUp(inner_self):
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
self.filter_func('ignore', **self.ignore_kwargs)
saved_setUp(inner_self)
def tearDown(inner_self):
saved_tearDown(inner_self)
self.catch_warnings.__exit__(*sys.exc_info())
decorated.setUp = setUp
decorated.tearDown = tearDown
return decorated
else:
@wraps(decorated)
def inner(*args, **kwargs):
with warnings.catch_warnings():
self.filter_func('ignore', **self.ignore_kwargs)
return decorated(*args, **kwargs)
return inner
@contextmanager
def patch_logger(logger_name, log_level):
"""
Context manager that takes a named logger and the logging level
and provides a simple mock-like list of messages received
"""
calls = []
def replacement(msg, *args, **kwargs):
calls.append(msg % args)
logger = logging.getLogger(logger_name)
orig = getattr(logger, log_level)
setattr(logger, log_level, replacement)
try:
yield calls
finally:
setattr(logger, log_level, orig)
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that.")
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
@contextmanager
def isolate_lru_cache(lru_cache_object):
"""Clear the cache of an LRU cache object on entering and exiting."""
lru_cache_object.cache_clear()
try:
yield
finally:
lru_cache_object.cache_clear()
@contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Note: This function and the following ``captured_std*`` are copied
from CPython's ``test.support`` module."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, six.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
def reset_warning_registry():
"""
Clear warning registry for all modules. This is required in some tests
because of a bug in Python that prevents warnings.simplefilter("always")
from always making warnings appear: http://bugs.python.org/issue4180
The bug was fixed in Python 3.4.2.
"""
key = "__warningregistry__"
for mod in sys.modules.values():
if hasattr(mod, key):
getattr(mod, key).clear()
@contextmanager
def freeze_time(t):
"""
Context manager to temporarily freeze time.time(). This temporarily
modifies the time function of the time module. Modules which import the
time function directly (e.g. `from time import time`) won't be affected
This isn't meant as a public API, but helps reduce some repetitive code in
Django's test suite.
"""
_real_time = time.time
time.time = lambda: t
try:
yield
finally:
time.time = _real_time
def require_jinja2(test_func):
"""
Decorator to enable a Jinja2 template engine in addition to the regular
Django template engine for a test or skip it if Jinja2 isn't available.
"""
test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func)
test_func = override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'OPTIONS': {'keep_trailing_newline': True},
}])(test_func)
return test_func
class ScriptPrefix(ContextDecorator):
def __enter__(self):
set_script_prefix(self.prefix)
def __exit__(self, exc_type, exc_val, traceback):
set_script_prefix(self.old_prefix)
def __init__(self, prefix):
self.prefix = prefix
self.old_prefix = get_script_prefix()
def override_script_prefix(prefix):
"""
Decorator or context manager to temporary override the script prefix.
"""
return ScriptPrefix(prefix)
class LoggingCaptureMixin(object):
"""
Capture the output from the 'django' logger and store it on the class's
logger_output attribute.
"""
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
self.logger_output = six.StringIO()
self.logger.handlers[0].stream = self.logger_output
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
|
|
'''
Video player
============
.. versionadded:: 1.2.0
The video player widget can be used to play video and let the user control the
play/pausing, volume and position. The widget cannot be customized much because
of the complex assembly of numerous base widgets.
.. image:: images/videoplayer.jpg
:align: center
Annotations
-----------
If you want to display text at a specific time and for a certain duration,
consider annotations. An annotation file has a ".jsa" extension. The player
will automatically load the associated annotation file if it exists.
An annotation file is JSON-based, providing a list of label dictionary items.
The key and value must match one of the :class:`VideoPlayerAnnotation` items.
For example, here is a short version of a jsa file that you can find in
`examples/widgets/softboy.jsa`::
[
{"start": 0, "duration": 2,
"text": "This is an example of annotation"},
{"start": 2, "duration": 2,
"bgcolor": [0.5, 0.2, 0.4, 0.5],
"text": "You can change the background color"}
]
For our softboy.avi example, the result will be:
.. image:: images/videoplayer-annotation.jpg
:align: center
If you want to experiment with annotation files, test with::
python -m kivy.uix.videoplayer examples/widgets/softboy.avi
Fullscreen
----------
The video player can play the video in fullscreen, if
:attr:`VideoPlayer.allow_fullscreen` is activated by a double-tap on
the video. By default, if the video is smaller than the Window, it will be not
stretched.
You can allow stretching by passing custom options to a
:class:`VideoPlayer` instance::
player = VideoPlayer(source='myvideo.avi', state='play',
options={'allow_stretch': True})
End-of-stream behavior
----------------------
You can specify what happens when the video has finished playing by passing an
`eos` (end of stream) directive to the underlying
:class:`~kivy.core.video.VideoBase` class. `eos` can be one of 'stop', 'pause'
or 'loop' and defaults to 'stop'. For example, in order to loop the video::
player = VideoPlayer(source='myvideo.avi', state='play',
options={'eos': 'loop'})
.. note::
The `eos` property of the VideoBase class is a string specifying the
end-of-stream behavior. This property differs from the `eos`
properties of the :class:`VideoPlayer` and
:class:`~kivy.uix.video.Video` classes, whose `eos`
property is simply a boolean indicating that the end of the file has
been reached.
'''
__all__ = ('VideoPlayer', 'VideoPlayerAnnotation')
from json import load
from os.path import exists
from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, \
NumericProperty, DictProperty, OptionProperty
from kivy.animation import Animation
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.progressbar import ProgressBar
from kivy.uix.label import Label
from kivy.uix.video import Video
from kivy.uix.video import Image
from kivy.factory import Factory
from kivy.logger import Logger
from kivy.clock import Clock
class VideoPlayerVolume(Image):
video = ObjectProperty(None)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return False
touch.grab(self)
# save the current volume and delta to it
touch.ud[self.uid] = [self.video.volume, 0]
return True
def on_touch_move(self, touch):
if touch.grab_current is not self:
return
# calculate delta
dy = abs(touch.y - touch.oy)
if dy > 10:
dy = min(dy - 10, 100)
touch.ud[self.uid][1] = dy
self.video.volume = dy / 100.
return True
def on_touch_up(self, touch):
if touch.grab_current is not self:
return
touch.ungrab(self)
dy = abs(touch.y - touch.oy)
if dy < 10:
if self.video.volume > 0:
self.video.volume = 0
else:
self.video.volume = 1.
class VideoPlayerPlayPause(Image):
video = ObjectProperty(None)
def on_touch_down(self, touch):
'''.. versionchanged:: 1.4.0'''
if self.collide_point(*touch.pos):
if self.video.state == 'play':
self.video.state = 'pause'
else:
self.video.state = 'play'
return True
class VideoPlayerStop(Image):
video = ObjectProperty(None)
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
self.video.state = 'stop'
self.video.position = 0
return True
class VideoPlayerProgressBar(ProgressBar):
video = ObjectProperty(None)
seek = NumericProperty(None, allownone=True)
alpha = NumericProperty(1.)
def __init__(self, **kwargs):
super(VideoPlayerProgressBar, self).__init__(**kwargs)
self.bubble = Factory.Bubble(size=(50, 44))
self.bubble_label = Factory.Label(text='0:00')
self.bubble.add_widget(self.bubble_label)
self.add_widget(self.bubble)
update = self._update_bubble
fbind = self.fast_bind
fbind('pos', update)
fbind('size', update)
fbind('seek', update)
def on_video(self, instance, value):
self.video.bind(position=self._update_bubble,
state=self._showhide_bubble)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return
self._show_bubble()
touch.grab(self)
self._update_seek(touch.x)
return True
def on_touch_move(self, touch):
if touch.grab_current is not self:
return
self._update_seek(touch.x)
return True
def on_touch_up(self, touch):
if touch.grab_current is not self:
return
touch.ungrab(self)
if self.seek:
self.video.seek(self.seek)
self.seek = None
self._hide_bubble()
return True
def _update_seek(self, x):
if self.width == 0:
return
x = max(self.x, min(self.right, x)) - self.x
self.seek = x / float(self.width)
def _show_bubble(self):
self.alpha = 1
Animation.stop_all(self, 'alpha')
def _hide_bubble(self):
self.alpha = 1.
Animation(alpha=0, d=4, t='in_out_expo').start(self)
def on_alpha(self, instance, value):
self.bubble.background_color = (1, 1, 1, value)
self.bubble_label.color = (1, 1, 1, value)
def _update_bubble(self, *l):
seek = self.seek
if self.seek is None:
if self.video.duration == 0:
seek = 0
else:
seek = self.video.position / self.video.duration
# convert to minutes:seconds
d = self.video.duration * seek
minutes = int(d / 60)
seconds = int(d - (minutes * 60))
# fix bubble label & position
self.bubble_label.text = '%d:%02d' % (minutes, seconds)
self.bubble.center_x = self.x + seek * self.width
self.bubble.y = self.top
def _showhide_bubble(self, instance, value):
if value == 'play':
self._hide_bubble()
else:
self._show_bubble()
class VideoPlayerPreview(FloatLayout):
source = ObjectProperty(None)
video = ObjectProperty(None)
click_done = BooleanProperty(False)
def on_touch_down(self, touch):
if self.collide_point(*touch.pos) and not self.click_done:
self.click_done = True
self.video.state = 'play'
return True
class VideoPlayerAnnotation(Label):
'''Annotation class used for creating annotation labels.
Additional keys are available:
* bgcolor: [r, g, b, a] - background color of the text box
* bgsource: 'filename' - background image used for the background text box
* border: (n, e, s, w) - border used for the background image
'''
start = NumericProperty(0)
'''Start time of the annotation.
:attr:`start` is a :class:`~kivy.properties.NumericProperty` and defaults
to 0.
'''
duration = NumericProperty(1)
'''Duration of the annotation.
:attr:`duration` is a :class:`~kivy.properties.NumericProperty` and
defaults to 1.
'''
annotation = DictProperty({})
def on_annotation(self, instance, ann):
for key, value in ann.items():
setattr(self, key, value)
class VideoPlayer(GridLayout):
'''VideoPlayer class. See module documentation for more information.
'''
source = StringProperty('')
'''Source of the video to read.
:attr:`source` is a :class:`~kivy.properties.StringProperty` and
defaults to ''.
.. versionchanged:: 1.4.0
'''
thumbnail = StringProperty('')
'''Thumbnail of the video to show. If None, VideoPlayer will try to find
the thumbnail from the :attr:`source` + '.png'.
:attr:`thumbnail` a :class:`~kivy.properties.StringProperty` and defaults
to ''.
.. versionchanged:: 1.4.0
'''
duration = NumericProperty(-1)
'''Duration of the video. The duration defaults to -1 and is set to the
real duration when the video is loaded.
:attr:`duration` is a :class:`~kivy.properties.NumericProperty` and
defaults to -1.
'''
position = NumericProperty(0)
'''Position of the video between 0 and :attr:`duration`. The position
defaults to -1 and is set to the real position when the video is loaded.
:attr:`position` is a :class:`~kivy.properties.NumericProperty` and
defaults to -1.
'''
volume = NumericProperty(1.0)
'''Volume of the video in the range 0-1. 1 means full volume and 0 means
mute.
:attr:`volume` is a :class:`~kivy.properties.NumericProperty` and defaults
to 1.
'''
state = OptionProperty('stop', options=('play', 'pause', 'stop'))
'''String, indicates whether to play, pause, or stop the video::
# start playing the video at creation
video = VideoPlayer(source='movie.mkv', state='play')
# create the video, and start later
video = VideoPlayer(source='movie.mkv')
# and later
video.state = 'play'
:attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults
to 'stop'.
'''
play = BooleanProperty(False)
'''
.. deprecated:: 1.4.0
Use :attr:`state` instead.
Boolean, indicates whether the video is playing or not. You can start/stop
the video by setting this property::
# start playing the video at creation
video = VideoPlayer(source='movie.mkv', play=True)
# create the video, and start later
video = VideoPlayer(source='movie.mkv')
# and later
video.play = True
:attr:`play` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
image_overlay_play = StringProperty(
'atlas://data/images/defaulttheme/player-play-overlay')
'''Image filename used to show a "play" overlay when the video has not yet
started.
:attr:`image_overlay_play` is a
:class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/player-play-overlay'.
'''
image_loading = StringProperty('data/images/image-loading.gif')
'''Image filename used when the video is loading.
:attr:`image_loading` is a :class:`~kivy.properties.StringProperty` and
defaults to 'data/images/image-loading.gif'.
'''
image_play = StringProperty(
'atlas://data/images/defaulttheme/media-playback-start')
'''Image filename used for the "Play" button.
:attr:`image_play` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/media-playback-start'.
'''
image_stop = StringProperty(
'atlas://data/images/defaulttheme/media-playback-stop')
'''Image filename used for the "Stop" button.
:attr:`image_stop` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/media-playback-stop'.
'''
image_pause = StringProperty(
'atlas://data/images/defaulttheme/media-playback-pause')
'''Image filename used for the "Pause" button.
:attr:`image_pause` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/media-playback-pause'.
'''
image_volumehigh = StringProperty(
'atlas://data/images/defaulttheme/audio-volume-high')
'''Image filename used for the volume icon when the volume is high.
:attr:`image_volumehigh` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/audio-volume-high'.
'''
image_volumemedium = StringProperty(
'atlas://data/images/defaulttheme/audio-volume-medium')
'''Image filename used for the volume icon when the volume is medium.
:attr:`image_volumemedium` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/audio-volume-medium'.
'''
image_volumelow = StringProperty(
'atlas://data/images/defaulttheme/audio-volume-low')
'''Image filename used for the volume icon when the volume is low.
:attr:`image_volumelow` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/audio-volume-low'.
'''
image_volumemuted = StringProperty(
'atlas://data/images/defaulttheme/audio-volume-muted')
'''Image filename used for the volume icon when the volume is muted.
:attr:`image_volumemuted` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/audio-volume-muted'.
'''
annotations = StringProperty('')
'''If set, it will be used for reading annotations box.
:attr:`annotations` is a :class:`~kivy.properties.StringProperty`
and defaults to ''.
'''
fullscreen = BooleanProperty(False)
'''Switch to fullscreen view. This should be used with care. When
activated, the widget will remove itself from its parent, remove all
children from the window and will add itself to it. When fullscreen is
unset, all the previous children are restored and the widget is restored to
its previous parent.
.. warning::
The re-add operation doesn't care about the index position of it's
children within the parent.
:attr:`fullscreen` is a :class:`~kivy.properties.BooleanProperty`
and defaults to False.
'''
allow_fullscreen = BooleanProperty(True)
'''By default, you can double-tap on the video to make it fullscreen. Set
this property to False to prevent this behavior.
:attr:`allow_fullscreen` is a :class:`~kivy.properties.BooleanProperty`
defaults to True.
'''
options = DictProperty({})
'''Optional parameters can be passed to a :class:`~kivy.uix.video.Video`
instance with this property.
:attr:`options` a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
# internals
container = ObjectProperty(None)
def __init__(self, **kwargs):
self._video = None
self._image = None
self._annotations = ''
self._annotations_labels = []
super(VideoPlayer, self).__init__(**kwargs)
self._load_thumbnail()
self._load_annotations()
if self.source:
self._trigger_video_load()
def _trigger_video_load(self, *largs):
Clock.unschedule(self._do_video_load)
Clock.schedule_once(self._do_video_load, -1)
def on_source(self, instance, value):
# we got a value, try to see if we have an image for it
self._load_thumbnail()
self._load_annotations()
if self._video is not None:
self._video.unload()
self._video = None
if value:
self._trigger_video_load()
def on_image_overlay_play(self, instance, value):
self._image.image_overlay_play = value
def on_image_loading(self, instance, value):
self._image.image_loading = value
def _load_thumbnail(self):
if not self.container:
return
self.container.clear_widgets()
# get the source, remove extension, and use png
thumbnail = self.thumbnail
if not thumbnail:
filename = self.source.rsplit('.', 1)
thumbnail = filename[0] + '.png'
self._image = VideoPlayerPreview(source=thumbnail, video=self)
self.container.add_widget(self._image)
def _load_annotations(self):
if not self.container:
return
self._annotations_labels = []
annotations = self.annotations
if not annotations:
filename = self.source.rsplit('.', 1)
annotations = filename[0] + '.jsa'
if exists(annotations):
with open(annotations, 'r') as fd:
self._annotations = load(fd)
if self._annotations:
for ann in self._annotations:
self._annotations_labels.append(
VideoPlayerAnnotation(annotation=ann))
def on_state(self, instance, value):
if self._video is not None:
self._video.state = value
def _set_state(self, instance, value):
self.state = value
def _do_video_load(self, *largs):
self._video = Video(source=self.source, state=self.state,
volume=self.volume, pos_hint={'x': 0, 'y': 0},
**self.options)
self._video.bind(texture=self._play_started,
duration=self.setter('duration'),
position=self.setter('position'),
volume=self.setter('volume'),
state=self._set_state)
def on_play(self, instance, value):
value = 'play' if value else 'stop'
return self.on_state(instance, value)
def on_volume(self, instance, value):
if not self._video:
return
self._video.volume = value
def on_position(self, instance, value):
labels = self._annotations_labels
if not labels:
return
for label in labels:
start = label.start
duration = label.duration
if start > value or (start + duration) < value:
if label.parent:
label.parent.remove_widget(label)
elif label.parent is None:
self.container.add_widget(label)
def seek(self, percent):
'''Change the position to a percentage of the duration. Percentage must
be a value between 0-1.
.. warning::
Calling seek() before video is loaded has no effect.
'''
if not self._video:
return
self._video.seek(percent)
def _play_started(self, instance, value):
self.container.clear_widgets()
self.container.add_widget(self._video)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return False
if touch.is_double_tap and self.allow_fullscreen:
self.fullscreen = not self.fullscreen
return True
return super(VideoPlayer, self).on_touch_down(touch)
def on_fullscreen(self, instance, value):
window = self.get_parent_window()
if not window:
Logger.warning('VideoPlayer: Cannot switch to fullscreen, '
'window not found.')
if value:
self.fullscreen = False
return
if not self.parent:
Logger.warning('VideoPlayer: Cannot switch to fullscreen, '
'no parent.')
if value:
self.fullscreen = False
return
if value:
self._fullscreen_state = state = {
'parent': self.parent,
'pos': self.pos,
'size': self.size,
'pos_hint': self.pos_hint,
'size_hint': self.size_hint,
'window_children': window.children[:]}
# remove all window children
for child in window.children[:]:
window.remove_widget(child)
# put the video in fullscreen
if state['parent'] is not window:
state['parent'].remove_widget(self)
window.add_widget(self)
# ensure the video widget is in 0, 0, and the size will be
# reajusted
self.pos = (0, 0)
self.size = (100, 100)
self.pos_hint = {}
self.size_hint = (1, 1)
else:
state = self._fullscreen_state
window.remove_widget(self)
for child in state['window_children']:
window.add_widget(child)
self.pos_hint = state['pos_hint']
self.size_hint = state['size_hint']
self.pos = state['pos']
self.size = state['size']
if state['parent'] is not window:
state['parent'].add_widget(self)
if __name__ == '__main__':
import sys
from kivy.base import runTouchApp
player = VideoPlayer(source=sys.argv[1])
runTouchApp(player)
if player:
player.state = 'stop'
|
|
# Copyright 2011 Dorgival Guedes
# Copyright 2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tracks host location and configuration
Keep track of hosts in the network, where they are and how they are
configured (at least MAC/IP addresses).
For the time being, it keeps tables with the information; later, it should
transfer that information to Topology and handle just the actual
discovery/update of host information.
Timer configuration can be changed when needed (e.g., for debugging) using
the launch facility (check timeoutSec dict and PingCtrl.pingLim).
You can set various timeouts from the commandline. Names and defaults:
arpAware=60*2 Quiet ARP-responding entries are pinged after this
arpSilent=60*20 This is for uiet entries not known to answer ARP
arpReply=4 Time to wait for an ARP reply before retrial
timerInterval=5 Seconds between timer routine activations
entryMove=60 Minimum expected time to move a physical entry
Good values for testing:
--arpAware=15 --arpSilent=45 --arpReply=1 --entryMove=4
You can also specify how many ARP pings we try before deciding it failed:
--pingLim=2
"""
from pox.core import core
from pox.lib.addresses import EthAddr
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.arp import arp
from pox.lib.recoco import Timer
from pox.lib.revent import Event, EventHalt
import pox.openflow.libopenflow_01 as of
import pox.openflow.discovery as discovery
from pox.lib.revent.revent import *
import time
import pox
log = core.getLogger()
# Times (in seconds) to use for differente timouts:
timeoutSec = dict(
arpAware=60*2, # Quiet ARP-responding entries are pinged after this
arpSilent=60*20, # This is for uiet entries not known to answer ARP
arpReply=12, # Time to wait for an ARP reply before retrial
timerInterval=5, # Seconds between timer routine activations
entryMove=60 # Minimum expected time to move a physical entry
)
# Address to send ARP pings from.
# The particular one here is just an arbitrary locally administered address.
DEFAULT_ARP_PING_SRC_MAC = '02:00:00:00:be:ef'
class HostEvent (Event):
"""
Event when hosts join, leave, or move within the network
"""
def __init__ (self, entry, new_dpid = None, new_port = None, join = False,
leave = False, move = False):
super(HostEvent,self).__init__()
self.entry = entry
self.join = join
self.leave = leave
self.move = move
assert sum(1 for x in [join,leave,move] if x) == 1
# You can alter these and they'll change where we think it goes...
self._new_dpid = new_dpid
self._new_port = new_port
#TODO: Allow us to cancel add/removes
@property
def new_dpid (self):
"""
New DPID for move events"
"""
assert self.move
return self._new_dpid
@property
def new_port (self):
"""
New port for move events"
"""
assert self.move
return self._new_port
class Alive (object):
"""
Holds liveliness information for MAC and IP entries
"""
def __init__ (self, livelinessInterval=timeoutSec['arpAware']):
self.lastTimeSeen = time.time()
self.interval=livelinessInterval
def expired (self):
return time.time() > self.lastTimeSeen + self.interval
def refresh (self):
self.lastTimeSeen = time.time()
class PingCtrl (Alive):
"""
Holds information for handling ARP pings for hosts
"""
# Number of ARP ping attemps before deciding it failed
pingLim=3
def __init__ (self):
super(PingCtrl,self).__init__(timeoutSec['arpReply'])
self.pending = 0
def sent (self):
self.refresh()
self.pending += 1
def failed (self):
return self.pending > PingCtrl.pingLim
def received (self):
# Clear any pending timeouts related to ARP pings
self.pending = 0
class IpEntry (Alive):
"""
This entry keeps track of IP addresses seen from each MAC entry and will
be kept in the macEntry object's ipAddrs dictionary. At least for now,
there is no need to refer to the original macEntry as the code is organized.
"""
def __init__ (self, hasARP):
if hasARP:
super(IpEntry,self).__init__(timeoutSec['arpAware'])
else:
super(IpEntry,self).__init__(timeoutSec['arpSilent'])
self.hasARP = hasARP
self.pings = PingCtrl()
def setHasARP (self):
if not self.hasARP:
self.hasARP = True
self.interval = timeoutSec['arpAware']
class MacEntry (Alive):
"""
Not strictly an ARP entry.
When it gets moved to Topology, may include other host info, like
services, and it may replace dpid by a general switch object reference
We use the port to determine which port to forward traffic out of.
"""
def __init__ (self, dpid, port, macaddr):
super(MacEntry,self).__init__()
self.dpid = dpid
self.port = port
self.macaddr = macaddr
self.ipAddrs = {}
def __str__(self):
return ' '.join([str(self.dpid), str(self.port), str(self.macaddr)])
def __eq__ (self, other):
if other is None:
return False
elif type(other) == tuple:
return (self.dpid,self.port,self.macaddr)==other
if self.dpid != other.dpid: return False
if self.port != other.port: return False
if self.macaddr != other.macaddr: return False
if self.dpid != other.dpid: return False
# What about ipAddrs??
return True
def __ne__ (self, other):
return not self.__eq__(other)
class host_tracker (EventMixin):
"""
Host tracking component
"""
_eventMixin_events = set([HostEvent])
def __init__ (self, ping_src_mac = None, install_flow = True,
eat_packets = True):
if ping_src_mac is None:
ping_src_mac = DEFAULT_ARP_PING_SRC_MAC
self.ping_src_mac = EthAddr(ping_src_mac)
self.install_flow = install_flow
self.eat_packets = eat_packets
# The following tables should go to Topology later
self.entryByMAC = {}
self._t = Timer(timeoutSec['timerInterval'],
self._check_timeouts, recurring=True)
# Listen to openflow with high priority if we want to eat our ARP replies
listen_args = {}
if eat_packets:
listen_args={'openflow':{'priority':0}}
core.listen_to_dependencies(self, listen_args=listen_args)
def _all_dependencies_met (self):
log.info("host_tracker ready")
# The following two functions should go to Topology also
def getMacEntry (self, macaddr):
try:
result = self.entryByMAC[macaddr]
except KeyError as e:
result = None
return result
def sendPing (self, macEntry, ipAddr):
"""
Builds an ETH/IP any-to-any ARP packet (an "ARP ping")
"""
r = arp()
r.opcode = arp.REQUEST
r.hwdst = macEntry.macaddr
r.hwsrc = self.ping_src_mac
r.protodst = ipAddr
# src is IP_ANY
e = ethernet(type=ethernet.ARP_TYPE, src=r.hwsrc, dst=r.hwdst)
e.payload = r
log.debug("%i %i sending ARP REQ to %s %s",
macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst))
msg = of.ofp_packet_out(data = e.pack(),
action = of.ofp_action_output(port=macEntry.port))
if core.openflow.sendToDPID(macEntry.dpid, msg.pack()):
ipEntry = macEntry.ipAddrs[ipAddr]
ipEntry.pings.sent()
else:
# macEntry is stale, remove it.
log.debug("%i %i ERROR sending ARP REQ to %s %s",
macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst))
del macEntry.ipAddrs[ipAddr]
return
def getSrcIPandARP (self, packet):
"""
Gets source IPv4 address for packets that have one (IPv4 and ARP)
Returns (ip_address, has_arp). If no IP, returns (None, False).
"""
if isinstance(packet, ipv4):
log.debug("IP %s => %s",str(packet.srcip),str(packet.dstip))
return ( packet.srcip, False )
elif isinstance(packet, arp):
log.debug("ARP %s %s => %s",
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(packet.opcode,
'op:%i' % (packet.opcode,)),
str(packet.protosrc), str(packet.protodst))
if (packet.hwtype == arp.HW_TYPE_ETHERNET and
packet.prototype == arp.PROTO_TYPE_IP and
packet.protosrc != 0):
return ( packet.protosrc, True )
return ( None, False )
def updateIPInfo (self, pckt_srcip, macEntry, hasARP):
"""
Update given MacEntry
If there is IP info in the incoming packet, update the macEntry
accordingly. In the past we assumed a 1:1 mapping between MAC and IP
addresses, but removed that restriction later to accomodate cases
like virtual interfaces (1:n) and distributed packet rewriting (n:1)
"""
if pckt_srcip in macEntry.ipAddrs:
# that entry already has that IP
ipEntry = macEntry.ipAddrs[pckt_srcip]
ipEntry.refresh()
log.debug("%s already has IP %s, refreshing",
str(macEntry), str(pckt_srcip) )
else:
# new mapping
ipEntry = IpEntry(hasARP)
macEntry.ipAddrs[pckt_srcip] = ipEntry
log.info("Learned %s got IP %s", str(macEntry), str(pckt_srcip) )
if hasARP:
ipEntry.pings.received()
def _handle_openflow_ConnectionUp (self, event):
if not self.install_flow: return
log.debug("Installing flow for ARP ping responses")
m = of.ofp_flow_mod()
m.priority += 1 # Higher than normal
m.match.dl_type = ethernet.ARP_TYPE
m.match.dl_dst = self.ping_src_mac
m.actions.append(of.ofp_action_output(port=of.OFPP_CONTROLLER))
event.connection.send(m)
def _handle_openflow_PacketIn (self, event):
"""
Populate MAC and IP tables based on incoming packets.
Handles only packets from ports identified as not switch-only.
If a MAC was not seen before, insert it in the MAC table;
otherwise, update table and enry.
If packet has a source IP, update that info for the macEntry (may require
removing the info from antoher entry previously with that IP address).
It does not forward any packets, just extract info from them.
"""
dpid = event.connection.dpid
inport = event.port
packet = event.parsed
if not packet.parsed:
log.warning("%i %i ignoring unparsed packet", dpid, inport)
return
if packet.type == ethernet.LLDP_TYPE: # Ignore LLDP packets
return
# This should use Topology later
if not core.openflow_discovery.is_edge_port(dpid, inport):
# No host should be right behind a switch-only port
log.debug("%i %i ignoring packetIn at switch-only port", dpid, inport)
return
log.debug("PacketIn: %i %i ETH %s => %s",
dpid, inport, str(packet.src), str(packet.dst))
# Learn or update dpid/port/MAC info
macEntry = self.getMacEntry(packet.src)
if macEntry is None:
# there is no known host by that MAC
# should we raise a NewHostFound event (at the end)?
macEntry = MacEntry(dpid,inport,packet.src)
self.entryByMAC[packet.src] = macEntry
log.info("Learned %s", str(macEntry))
self.raiseEventNoErrors(HostEvent, macEntry, join=True)
elif macEntry != (dpid, inport, packet.src):
# there is already an entry of host with that MAC, but host has moved
# should we raise a HostMoved event (at the end)?
log.info("Learned %s moved to %i %i", str(macEntry), dpid, inport)
# if there has not been long since heard from it...
if time.time() - macEntry.lastTimeSeen < timeoutSec['entryMove']:
log.warning("Possible duplicate: %s at time %i, now (%i %i), time %i",
str(macEntry), macEntry.lastTimeSeen,
dpid, inport, time.time())
# should we create a whole new entry, or keep the previous host info?
# for now, we keep it: IP info, answers pings, etc.
e = HostEvent(macEntry, move=True, new_dpid = dpid, new_port = inport)
self.raiseEventNoErrors(e)
macEntry.dpid = e._new_dpid
macEntry.inport = e._new_port
macEntry.refresh()
(pckt_srcip, hasARP) = self.getSrcIPandARP(packet.next)
if pckt_srcip is not None:
self.updateIPInfo(pckt_srcip,macEntry,hasARP)
if self.eat_packets and packet.dst == self.ping_src_mac:
return EventHalt
def _check_timeouts (self):
"""
Checks for timed out entries
"""
for macEntry in self.entryByMAC.values():
entryPinged = False
for ip_addr, ipEntry in macEntry.ipAddrs.items():
if ipEntry.expired():
if ipEntry.pings.failed():
del macEntry.ipAddrs[ip_addr]
log.info("Entry %s: IP address %s expired",
str(macEntry), str(ip_addr) )
else:
self.sendPing(macEntry,ip_addr)
ipEntry.pings.sent()
entryPinged = True
if macEntry.expired() and not entryPinged:
log.info("Entry %s expired", str(macEntry))
# sanity check: there should be no IP addresses left
if len(macEntry.ipAddrs) > 0:
for ip in macEntry.ipAddrs.keys():
log.warning("Entry %s expired but still had IP address %s",
str(macEntry), str(ip_addr) )
del macEntry.ipAddrs[ip_addr]
self.raiseEventNoErrors(HostEvent, macEntry, leave=True)
del self.entryByMAC[macEntry.macaddr]
|
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
import click
import os, sys
import subprocess
import rackman
VERSION = rackman.__version__
RELEASE = 'trusty'
basedir = os.path.realpath('.')
@click.group(chain=True)
def cli():
"""Building script for Rackman"""
pass
@cli.command()
def build_deb():
"""Building DEB package"""
click.secho('*** Creating distribution archive...', fg='yellow')
comm = "python setup.py sdist"
try:
# https://docs.python.org/2/distutils/introduction.html#distutils-simple-example
os.chdir( basedir )
subprocess.call(comm, shell=True)
except OSError as e:
click.echo("ERR: {}; {}".format(comm, e), err=True)
sys.exit(os.EX_OSERR)
click.secho('*** Transforming to debain package from distribution archive...', fg='yellow')
comm = "py2dsc --suite='{}' Rackman-{}.tar.gz".format(RELEASE, VERSION)
try:
# https://pypi.python.org/pypi/stdeb/#debianize-distutils-command
os.chdir( "{}/dist".format(basedir) )
subprocess.call(comm, shell=True)
except OSError as e:
click.echo("ERR: {}; {}".format(comm, e), err=True)
sys.exit(os.EX_OSERR)
click.secho('*** Configuring and building debian package...', fg='yellow')
try:
os.chdir( "{}/dist/deb_dist/rackman-{}".format(basedir, VERSION) )
subprocess.call("sed -i -- '/^Depends:/ s/$/, python-gtk2 (>=2.24.0), python-cairo (>=1.8.8)/g' ./debian/control", shell=True)
subprocess.call("sed -i -- '/^Build-Depends:/ s/$/, python-gtk2 (>=2.24.0), python-cairo (>=1.8.8)/g' ./debian/control", shell=True)
subprocess.call("sed -i -- 's/python-rackman/rackman/g' ./debian/control ./debian/rules", shell=True)
subprocess.call("dpkg-buildpackage -rfakeroot -uc -us", shell=True)
except OSError as e:
click.echo("ERR: dpkg-buildpackage -rfakeroot -uc -us; {}".format(e), err=True)
sys.exit(os.EX_OSERR)
click.secho('*** Signing debian package...', fg='yellow')
comm = "debuild -S -sa -k$GPGKEY"
try:
# https://help.ubuntu.com/community/GnuPrivacyGuardHowto
# https://help.launchpad.net/Packaging/PPA/BuildingASourcePackage
os.chdir( "{}/dist/deb_dist/rackman-{}".format(basedir, VERSION) )
subprocess.call(comm, shell=True)
except OSError as e:
click.echo("ERR: {}; {}".format(comm, e), err=True)
sys.exit(os.EX_OSERR)
@cli.command()
def push():
"""Pushing DEB in Launchpad"""
# https://help.launchpad.net/Packaging/PPA/Uploading#Next_steps
comm = "dput ppa:freezemandix/rackman rackman_{}-1_source.changes".format(VERSION)
try:
os.chdir( "{}/dist/deb_dist/".format(basedir) )
subprocess.call(comm, shell=True)
except OSError as e:
click.echo("ERR: {}; {}".format(comm, e), err=True)
sys.exit(os.EX_OSERR)
@cli.command()
def clean():
"""Cleaning ./dist and ./build directories"""
import shutil
path = os.path.join(basedir, 'dist')
try:
shutil.rmtree(path)
except OSError as e:
click.echo("ERR: remove {}; {}".format(path, e), err=True)
path = os.path.join(basedir, 'build')
try:
shutil.rmtree(path)
except OSError as e:
click.echo("ERR: remove {}; {}".format(path, e), err=True)
@cli.command()
@click.option('--html', '-h', is_flag=True, help='generating html documentation')
@click.option('--man', '-m', is_flag=True, help='generating man documentation')
def build_doc(html, man):
"""Building documentation from README.md"""
# Building html page
if html:
comm = "pandoc " \
"--standalone " \
"--self-contained " \
"--smart " \
"--normalize " \
"-V lang:russian " \
"-f markdown " \
"-t html " \
"-o ./doc/html/ru/index.html README.md"
try:
os.chdir( basedir )
subprocess.call(comm, shell=True)
except OSError as e:
click.echo("ERR: {}; {}".format(comm, e), err=True)
sys.exit(os.EX_OSERR)
# Building man page
if man:
from datetime import datetime
import gzip, shutil
comm = "pandoc " \
"--standalone " \
"--self-contained " \
"--smart " \
"--normalize " \
"-V lang:russian " \
"-f markdown " \
"-t man " \
"-o ./doc/man/ru/rackman README.md"
date = datetime.today().strftime('%Y-%m-%destroy_window')
try:
os.chdir( basedir )
subprocess.call(comm, shell=True)
# add metadata in man page
subprocess.call('''sed -i -- 's/.TH "" "" "" "" ""/.TH RACKMAN 1 {} {} ""/g' ./doc/man/ru/rackman'''.format(date, VERSION), shell=True)
# compressing man page (rackman -> rackman.1.gz)
with open(os.path.join(basedir, 'doc/man/ru/rackman'), 'rb') as f_in, gzip.open(os.path.join(basedir, 'doc/man/ru/rackman.1.gz'), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
except OSError as e:
click.echo("ERR: {}; {}".format(comm, e), err=True)
sys.exit(os.EX_OSERR)
@cli.command()
@click.option('--update', '-u', is_flag=False, help='update messages.pot from rackman.py')
@click.option('--merge', '-m', is_flag=False, help='merge po files with messages.pot')
@click.option('--add', '-a', is_flag=False, help='add translation for new language')
@click.option('--build', '-b', is_flag=False, help='build mo files from po files')
@click.option('--lang', '-l', default='en', help='language (default=en)')
def build_lang(update, merge, add, build, lang):
"""Building i18n files"""
# https://help.launchpad.net/Translations/POTemplates
if update:
comm = 'xgettext -k_ -kN_ ' \
'--package-version={} ' \
'--package-name=rackman ' \
'--copyright-holder="by Nik Volkov" ' \
'[email protected] ' \
'-o messages.pot rackman.py'.format(VERSION)
try:
os.chdir( basedir )
subprocess.call(comm, shell=True)
except OSError as e:
click.echo("ERR: {}; {}".format(comm, e), err=True)
sys.exit(os.EX_OSERR)
if add:
comm = 'msginit --locale={}'.format(lang)
try:
os.chdir( basedir )
subprocess.call(comm, shell=True)
except OSError as e:
click.echo("ERR: {}; {}".format(comm, e), err=True)
sys.exit(os.EX_OSERR)
if merge:
comm = 'msgmerge -UN {}.po messages.pot'.format(lang)
try:
os.chdir( basedir )
subprocess.call(comm, shell=True)
except OSError as e:
click.echo("ERR: {}; {}".format(comm, e), err=True)
sys.exit(os.EX_OSERR)
if build:
comm = 'msgfmt {lang}.po -o locale/{lang}/LC_MESSAGES/rackman.mo'.format(lang=lang)
try:
os.chdir( basedir )
path = 'locale/{}/LC_MESSAGES/'.format(lang)
if os.path.isdir(path) is False:
os.makedirs(path)
subprocess.call(comm, shell=True)
except OSError as e:
click.echo("ERR: {}; {}".format(comm, e), err=True)
sys.exit(os.EX_OSERR)
@cli.command()
@click.pass_context
def build_all(ctx):
"""Building all in DEB (full cycle)"""
for lang in ('en', 'ru'):
ctx.invoke(build_lang, lang=lang, update=True, merge=True)
while True:
if click.confirm('Whether translations are true to the {} po files?'.format(lang)):
ctx.invoke(build_lang, lang=lang, build=True)
break
ctx.invoke(build_doc, html=True, man=True)
ctx.invoke(clean)
ctx.invoke(build_deb)
if __name__ == '__main__':
cli()
|
|
# Tests command line execution of scripts
import unittest
import os
import os.path
import test.test_support
from test.script_helper import (run_python,
temp_dir, make_script, compile_script,
make_pkg, make_zip_script, make_zip_pkg)
verbose = test.test_support.verbose
test_source = """\
# Script may be run with optimisation enabled, so don't rely on assert
# statements being executed
def assertEqual(lhs, rhs):
if lhs != rhs:
raise AssertionError('%r != %r' % (lhs, rhs))
def assertIdentical(lhs, rhs):
if lhs is not rhs:
raise AssertionError('%r is not %r' % (lhs, rhs))
# Check basic code execution
result = ['Top level assignment']
def f():
result.append('Lower level reference')
f()
assertEqual(result, ['Top level assignment', 'Lower level reference'])
# Check population of magic variables
assertEqual(__name__, '__main__')
print '__file__==%r' % __file__
print '__package__==%r' % __package__
# Check the sys module
import sys
assertIdentical(globals(), sys.modules[__name__].__dict__)
print 'sys.argv[0]==%r' % sys.argv[0]
"""
def _make_test_script(script_dir, script_basename, source=test_source):
return make_script(script_dir, script_basename, source)
def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source=test_source, depth=1):
return make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source, depth)
# There's no easy way to pass the script directory in to get
# -m to work (avoiding that is the whole point of making
# directories and zipfiles executable!)
# So we fake it for testing purposes with a custom launch script
launch_source = """\
import sys, os.path, runpy
sys.path.insert(0, %s)
runpy._run_module_as_main(%r)
"""
def _make_launch_script(script_dir, script_basename, module_name, path=None):
if path is None:
path = "os.path.dirname(__file__)"
else:
path = repr(path)
source = launch_source % (path, module_name)
return make_script(script_dir, script_basename, source)
class CmdLineTest(unittest.TestCase):
def _check_script(self, script_name, expected_file,
expected_argv0, expected_package,
*cmd_line_switches):
run_args = cmd_line_switches + (script_name,)
exit_code, data = run_python(*run_args)
if verbose:
print 'Output from test script %r:' % script_name
print data
self.assertEqual(exit_code, 0)
printed_file = '__file__==%r' % expected_file
printed_argv0 = 'sys.argv[0]==%r' % expected_argv0
printed_package = '__package__==%r' % expected_package
if verbose:
print 'Expected output:'
print printed_file
print printed_package
print printed_argv0
self.assertIn(printed_file, data)
self.assertIn(printed_package, data)
self.assertIn(printed_argv0, data)
def _check_import_error(self, script_name, expected_msg,
*cmd_line_switches):
run_args = cmd_line_switches + (script_name,)
exit_code, data = run_python(*run_args)
if verbose:
print 'Output from test script %r:' % script_name
print data
print 'Expected output: %r' % expected_msg
self.assertIn(expected_msg, data)
def test_basic_script(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'script')
self._check_script(script_name, script_name, script_name, None)
def test_script_compiled(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'script')
compiled_name = compile_script(script_name)
os.remove(script_name)
self._check_script(compiled_name, compiled_name, compiled_name, None)
def test_directory(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
self._check_script(script_dir, script_name, script_dir, '')
def test_directory_compiled(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
compiled_name = compile_script(script_name)
os.remove(script_name)
self._check_script(script_dir, compiled_name, script_dir, '')
def test_directory_error(self):
with temp_dir() as script_dir:
msg = "can't find '__main__' module in %r" % script_dir
self._check_import_error(script_dir, msg)
def test_zipfile(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name)
self._check_script(zip_name, run_name, zip_name, '')
def test_zipfile_compiled(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
compiled_name = compile_script(script_name)
zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name)
self._check_script(zip_name, run_name, zip_name, '')
def test_zipfile_error(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'not_main')
zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name)
msg = "can't find '__main__' module in %r" % zip_name
self._check_import_error(zip_name, msg)
def test_module_in_package(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, 'script')
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script')
self._check_script(launch_name, script_name, script_name, 'test_pkg')
def test_module_in_package_in_zipfile(self):
with temp_dir() as script_dir:
zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script')
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name)
self._check_script(launch_name, run_name, run_name, 'test_pkg')
def test_module_in_subpackage_in_zipfile(self):
with temp_dir() as script_dir:
zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2)
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name)
self._check_script(launch_name, run_name, run_name, 'test_pkg.test_pkg')
def test_package(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, '__main__')
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_script(launch_name, script_name,
script_name, 'test_pkg')
def test_package_compiled(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, '__main__')
compiled_name = compile_script(script_name)
os.remove(script_name)
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_script(launch_name, compiled_name,
compiled_name, 'test_pkg')
def test_package_error(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
msg = ("'test_pkg' is a package and cannot "
"be directly executed")
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_import_error(launch_name, msg)
def test_package_recursion(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
main_dir = os.path.join(pkg_dir, '__main__')
make_pkg(main_dir)
msg = ("Cannot use package as __main__ module; "
"'test_pkg' is a package and cannot "
"be directly executed")
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_import_error(launch_name, msg)
def test_main():
test.test_support.run_unittest(CmdLineTest)
test.test_support.reap_children()
if __name__ == '__main__':
test_main()
|
|
import decimal
import json
import unittest
import uuid
from django import forms
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.forms import SimpleArrayField, SplitArrayField
from django.core import exceptions, serializers
from django.core.management import call_command
from django.db import IntegrityError, connection, models
from django.test import TestCase, override_settings
from django.utils import timezone
from .models import (
ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel, IntegerArrayModel,
NestedIntegerArrayModel, NullableIntegerArrayModel, OtherTypesArrayModel,
)
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL required')
class TestSaveLoad(TestCase):
def test_integer(self):
instance = IntegerArrayModel(field=[1, 2, 3])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_char(self):
instance = CharArrayModel(field=['hello', 'goodbye'])
instance.save()
loaded = CharArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_dates(self):
instance = DateTimeArrayModel(
datetimes=[timezone.now()],
dates=[timezone.now().date()],
times=[timezone.now().time()],
)
instance.save()
loaded = DateTimeArrayModel.objects.get()
self.assertEqual(instance.datetimes, loaded.datetimes)
self.assertEqual(instance.dates, loaded.dates)
self.assertEqual(instance.times, loaded.times)
def test_tuples(self):
instance = IntegerArrayModel(field=(1,))
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertSequenceEqual(instance.field, loaded.field)
def test_integers_passed_as_strings(self):
# This checks that get_prep_value is deferred properly
instance = IntegerArrayModel(field=['1'])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(loaded.field, [1])
def test_default_null(self):
instance = NullableIntegerArrayModel()
instance.save()
loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk)
self.assertEqual(loaded.field, None)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullableIntegerArrayModel(field=None)
instance.save()
loaded = NullableIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = IntegerArrayModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
def test_nested(self):
instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]])
instance.save()
loaded = NestedIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_other_array_types(self):
instance = OtherTypesArrayModel(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
)
instance.save()
loaded = OtherTypesArrayModel.objects.get()
self.assertEqual(instance.ips, loaded.ips)
self.assertEqual(instance.uuids, loaded.uuids)
self.assertEqual(instance.decimals, loaded.decimals)
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL required')
class TestQuerying(TestCase):
def setUp(self):
self.objs = [
NullableIntegerArrayModel.objects.create(field=[1]),
NullableIntegerArrayModel.objects.create(field=[2]),
NullableIntegerArrayModel.objects.create(field=[2, 3]),
NullableIntegerArrayModel.objects.create(field=[20, 30, 40]),
NullableIntegerArrayModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[1]),
self.objs[:1]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__isnull=True),
self.objs[-1:]
)
def test_gt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__gt=[0]),
self.objs[:4]
)
def test_lt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__lt=[2]),
self.objs[:1]
)
def test_in(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2]
)
def test_contained_by(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]),
self.objs[:2]
)
def test_contains(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=[2]),
self.objs[1:3]
)
def test_contains_charfield(self):
# Regression for #22907
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contains=['text']),
[]
)
def test_contained_by_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contained_by=['text']),
[]
)
def test_overlap_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__overlap=['text']),
[]
)
def test_index(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0=2),
self.objs[1:3]
)
def test_index_chained(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0__lt=3),
self.objs[0:3]
)
def test_index_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0=1),
[instance]
)
@unittest.expectedFailure
def test_index_used_on_nested_data(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0=[1, 2]),
[instance]
)
def test_overlap(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]),
self.objs[0:3]
)
def test_len(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len__lte=2),
self.objs[0:3]
)
def test_slice(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_1=[2]),
self.objs[1:3]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]),
self.objs[2:3]
)
@unittest.expectedFailure
def test_slice_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]),
[instance]
)
class TestChecks(TestCase):
def test_field_checks(self):
field = ArrayField(models.CharField())
field.set_attributes_from_name('field')
errors = field.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'postgres.E001')
def test_invalid_base_fields(self):
field = ArrayField(models.ManyToManyField('postgres_tests.IntegerArrayModel'))
field.set_attributes_from_name('field')
errors = field.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'postgres.E002')
class TestMigrations(TestCase):
def test_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(type(new.base_field), type(field.base_field))
def test_deconstruct_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.size, field.size)
def test_deconstruct_args(self):
field = ArrayField(models.CharField(max_length=20))
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.base_field.max_length, field.base_field.max_length)
def test_subclass_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.fields.ArrayField')
field = ArrayFieldSubclass()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'postgres_tests.models.ArrayFieldSubclass')
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL required')
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_default_migrations",
})
def test_adding_field_with_default(self):
# See #22962
call_command('migrate', 'postgres_tests', verbosity=0)
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL required')
class TestSerialization(TestCase):
test_data = '[{"fields": {"field": "[\\"1\\", \\"2\\"]"}, "model": "postgres_tests.integerarraymodel", "pk": null}]'
def test_dumping(self):
instance = IntegerArrayModel(field=[1, 2])
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, [1, 2])
class TestValidation(TestCase):
def test_unbounded(self):
field = ArrayField(models.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, None], None)
self.assertEqual(cm.exception.code, 'item_invalid')
self.assertEqual(cm.exception.message % cm.exception.params, 'Item 1 in the array did not validate: This field cannot be null.')
def test_blank_true(self):
field = ArrayField(models.IntegerField(blank=True, null=True))
# This should not raise a validation error
field.clean([1, None], None)
def test_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
field.clean([1, 2, 3], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, 2, 3, 4], None)
self.assertEqual(cm.exception.messages[0], 'List contains 4 items, it should contain no more than 3.')
def test_nested_array_mismatch(self):
field = ArrayField(ArrayField(models.IntegerField()))
field.clean([[1, 2], [3, 4]], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([[1, 2], [3, 4, 5]], None)
self.assertEqual(cm.exception.code, 'nested_array_mismatch')
self.assertEqual(cm.exception.messages[0], 'Nested arrays must have the same length.')
class TestSimpleFormField(TestCase):
def test_valid(self):
field = SimpleArrayField(forms.CharField())
value = field.clean('a,b,c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_to_python_fail(self):
field = SimpleArrayField(forms.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,9')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a whole number.')
def test_validate_fail(self):
field = SimpleArrayField(forms.CharField(required=True))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,')
self.assertEqual(cm.exception.messages[0], 'Item 2 in the array did not validate: This field is required.')
def test_validators_fail(self):
field = SimpleArrayField(forms.RegexField('[a-e]{2}'))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,bc,de')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a valid value.')
def test_delimiter(self):
field = SimpleArrayField(forms.CharField(), delimiter='|')
value = field.clean('a|b|c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_delimiter_with_nesting(self):
field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter='|')
value = field.clean('a,b|c,d')
self.assertEqual(value, [['a', 'b'], ['c', 'd']])
def test_prepare_value(self):
field = SimpleArrayField(forms.CharField())
value = field.prepare_value(['a', 'b', 'c'])
self.assertEqual(value, 'a,b,c')
def test_max_length(self):
field = SimpleArrayField(forms.CharField(), max_length=2)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no more than 2.')
def test_min_length(self):
field = SimpleArrayField(forms.CharField(), min_length=4)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no fewer than 4.')
def test_required(self):
field = SimpleArrayField(forms.CharField(), required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('')
self.assertEqual(cm.exception.messages[0], 'This field is required.')
def test_model_field_formfield(self):
model_field = ArrayField(models.CharField(max_length=27))
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertIsInstance(form_field.base_field, forms.CharField)
self.assertEqual(form_field.base_field.max_length, 27)
def test_model_field_formfield_size(self):
model_field = ArrayField(models.CharField(max_length=27), size=4)
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertEqual(form_field.max_length, 4)
class TestSplitFormField(TestCase):
def test_valid(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': 'c'}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'array': ['a', 'b', 'c']})
def test_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), required=True, size=3)
data = {'array_0': '', 'array_1': '', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['This field is required.']})
def test_remove_trailing_nulls(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(required=False), size=5, remove_trailing_nulls=True)
data = {'array_0': 'a', 'array_1': '', 'array_2': 'b', 'array_3': '', 'array_4': ''}
form = SplitForm(data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data, {'array': ['a', '', 'b']})
def test_required_field(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['Item 2 in the array did not validate: This field is required.']})
def test_rendering(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
self.assertHTMLEqual(str(SplitForm()), '''
<tr>
<th><label for="id_array_0">Array:</label></th>
<td>
<input id="id_array_0" name="array_0" type="text" />
<input id="id_array_1" name="array_1" type="text" />
<input id="id_array_2" name="array_2" type="text" />
</td>
</tr>
''')
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import datetime
import os
import unittest
from airflow.operators.hive_operator import HiveOperator
from airflow.operators.hive_stats_operator import HiveStatsCollectionOperator
from airflow.operators.hive_to_mysql import HiveToMySqlTransfer
from airflow.operators.hive_to_samba_operator import Hive2SambaOperator
from airflow.operators.presto_check_operator import PrestoCheckOperator
from airflow.operators.presto_to_mysql import PrestoToMySqlTransfer
from airflow.sensors.hdfs_sensor import HdfsSensor
from airflow.sensors.hive_partition_sensor import HivePartitionSensor
from airflow.sensors.metastore_partition_sensor import MetastorePartitionSensor
from airflow.sensors.named_hive_partition_sensor import NamedHivePartitionSensor
from airflow.sensors.sql_sensor import SqlSensor
from airflow.sensors.web_hdfs_sensor import WebHdfsSensor
from tests.compat import mock
from airflow import DAG
from airflow.configuration import conf
from airflow.exceptions import AirflowSensorTimeout
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
class HiveEnvironmentTest(unittest.TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG('test_dag_id', default_args=args)
self.dag = dag
self.hql = """
USE airflow;
DROP TABLE IF EXISTS static_babynames_partitioned;
CREATE TABLE IF NOT EXISTS static_babynames_partitioned (
state string,
year string,
name string,
gender string,
num int)
PARTITIONED BY (ds string);
INSERT OVERWRITE TABLE static_babynames_partitioned
PARTITION(ds='{{ ds }}')
SELECT state, year, name, gender, num FROM static_babynames;
"""
class HiveCliTest(unittest.TestCase):
def setUp(self):
self.nondefault_schema = "nondefault"
os.environ["AIRFLOW__CORE__SECURITY"] = "kerberos"
def tearDown(self):
del os.environ["AIRFLOW__CORE__SECURITY"]
def test_get_proxy_user_value(self):
from airflow.hooks.hive_hooks import HiveCliHook
hook = HiveCliHook()
returner = mock.MagicMock()
returner.extra_dejson = {'proxy_user': 'a_user_proxy'}
hook.use_beeline = True
hook.conn = returner
# Run
result = hook._prepare_cli_cmd()
# Verify
self.assertIn('hive.server2.proxy.user=a_user_proxy', result[2])
class HiveOperatorConfigTest(HiveEnvironmentTest):
def test_hive_airflow_default_config_queue(self):
t = HiveOperator(
task_id='test_default_config_queue',
hql=self.hql,
mapred_queue_priority='HIGH',
mapred_job_name='airflow.test_default_config_queue',
dag=self.dag)
# just check that the correct default value in test_default.cfg is used
test_config_hive_mapred_queue = conf.get(
'hive',
'default_hive_mapred_queue'
)
self.assertEqual(t.get_hook().mapred_queue, test_config_hive_mapred_queue)
def test_hive_airflow_default_config_queue_override(self):
specific_mapred_queue = 'default'
t = HiveOperator(
task_id='test_default_config_queue',
hql=self.hql,
mapred_queue=specific_mapred_queue,
mapred_queue_priority='HIGH',
mapred_job_name='airflow.test_default_config_queue',
dag=self.dag)
self.assertEqual(t.get_hook().mapred_queue, specific_mapred_queue)
class HiveOperatorTest(HiveEnvironmentTest):
def test_hiveconf_jinja_translate(self):
hql = "SELECT ${num_col} FROM ${hiveconf:table};"
t = HiveOperator(
hiveconf_jinja_translate=True,
task_id='dry_run_basic_hql', hql=hql, dag=self.dag)
t.prepare_template()
self.assertEqual(t.hql, "SELECT {{ num_col }} FROM {{ table }};")
def test_hiveconf(self):
hql = "SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});"
t = HiveOperator(
hiveconfs={'table': 'static_babynames', 'day': '{{ ds }}'},
task_id='dry_run_basic_hql', hql=hql, dag=self.dag)
t.prepare_template()
self.assertEqual(
t.hql,
"SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});")
if 'AIRFLOW_RUNALL_TESTS' in os.environ:
class HivePrestoTest(HiveEnvironmentTest):
def test_hive(self):
t = HiveOperator(
task_id='basic_hql', hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_queues(self):
t = HiveOperator(
task_id='test_hive_queues', hql=self.hql,
mapred_queue='default', mapred_queue_priority='HIGH',
mapred_job_name='airflow.test_hive_queues',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_dryrun(self):
t = HiveOperator(
task_id='dry_run_basic_hql', hql=self.hql, dag=self.dag)
t.dry_run()
def test_beeline(self):
t = HiveOperator(
task_id='beeline_hql', hive_cli_conn_id='beeline_default',
hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_presto(self):
sql = """
SELECT count(1) FROM airflow.static_babynames_partitioned;
"""
t = PrestoCheckOperator(
task_id='presto_check', sql=sql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_presto_to_mysql(self):
t = PrestoToMySqlTransfer(
task_id='presto_to_mysql_check',
sql="""
SELECT name, count(*) as ccount
FROM airflow.static_babynames
GROUP BY name
""",
mysql_table='test_static_babynames',
mysql_preoperator='TRUNCATE TABLE test_static_babynames;',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hdfs_sensor(self):
t = HdfsSensor(
task_id='hdfs_sensor_check',
filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_webhdfs_sensor(self):
t = WebHdfsSensor(
task_id='webhdfs_sensor_check',
filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames',
timeout=120,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_sql_sensor(self):
t = SqlSensor(
task_id='hdfs_sensor_check',
conn_id='presto_default',
sql="SELECT 'x' FROM airflow.static_babynames LIMIT 1;",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_stats(self):
t = HiveStatsCollectionOperator(
task_id='hive_stats_check',
table="airflow.static_babynames_partitioned",
partition={'ds': DEFAULT_DATE_DS},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor(self):
t = NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}"
],
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self):
t = NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}",
"airflow.static_babynames_partitioned/ds={{ds}}"
],
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor_parses_partitions_with_periods(self):
t = NamedHivePartitionSensor.parse_partition_name(
partition="schema.table/part1=this.can.be.an.issue/part2=ok")
self.assertEqual(t[0], "schema")
self.assertEqual(t[1], "table")
self.assertEqual(t[2], "part1=this.can.be.an.issue/part2=this_should_be_ok")
def test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self):
with self.assertRaises(AirflowSensorTimeout):
t = NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}",
"airflow.static_babynames_partitioned/ds=nonexistent"
],
poke_interval=0.1,
timeout=1,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_partition_sensor(self):
t = HivePartitionSensor(
task_id='hive_partition_check',
table='airflow.static_babynames_partitioned',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_metastore_sql_sensor(self):
t = MetastorePartitionSensor(
task_id='hive_partition_check',
table='airflow.static_babynames_partitioned',
partition_name='ds={}'.format(DEFAULT_DATE_DS),
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive2samba(self):
t = Hive2SambaOperator(
task_id='hive2samba_check',
samba_conn_id='tableau_samba',
hql="SELECT * FROM airflow.static_babynames LIMIT 10000",
destination_filepath='test_airflow.csv',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_to_mysql(self):
t = HiveToMySqlTransfer(
mysql_conn_id='airflow_db',
task_id='hive_to_mysql_check',
create=True,
sql="""
SELECT name
FROM airflow.static_babynames
LIMIT 100
""",
mysql_table='test_static_babynames',
mysql_preoperator=[
'DROP TABLE IF EXISTS test_static_babynames;',
'CREATE TABLE test_static_babynames (name VARCHAR(500))',
],
dag=self.dag)
t.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Momentum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class MomentumOptimizerTest(test.TestCase):
def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum):
var = var + accum * lr * momentum
accum = accum * momentum + g
var = var - lr * accum
var = var - accum * lr * momentum
return var, accum
def doTestBasic(self, use_resource=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtype, name="var1_%d" % i)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
if context.in_graph_mode():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
if context.in_graph_mode():
self.assertFalse(slot0 in variables.trainable_variables())
self.assertFalse(slot1 in variables.trainable_variables())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
if context.in_graph_mode():
self.evaluate(mom_update)
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
if context.in_graph_mode():
self.evaluate(mom_update)
else:
mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
def testBasic(self):
with self.test_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testNesterovMomentum(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
cost = 5 * var0 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name="global_step")
mom_op = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9, use_nesterov=True)
opt_op = mom_op.minimize(cost, global_step, [var0, var1])
variables.global_variables_initializer().run()
for t in range(1, 5):
opt_op.run()
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
accum1_np,
3, 2.0, 0.9)
self.assertAllClose(var0_np, var0.eval())
self.assertAllClose(var1_np, var1.eval())
def testSparseNesterovMomentum(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.test_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
grads = []
for t in range(1, 5):
grads.append(var0_np * 10)
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
accum1_np,
3, 2.0, 0.9)
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
loss = 5 * var0 * var0 + 3 * var1
mom_op = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9, use_nesterov=True)
x_feed = array_ops.placeholder(dtype)
y_feed = ops.IndexedSlices(
x_feed, constant_op.constant([0, 1]), constant_op.constant([2]))
grads_and_vars = [(y_feed, var0), (constant_op.constant(
[3.0, 3.0], dtype=dtype), var1)]
opt_update = mom_op.apply_gradients(grads_and_vars)
variables.global_variables_initializer().run()
for t in range(1, 5):
opt_update.run(feed_dict={x_feed: grads[t - 1]})
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
accum1_np,
3, 2.0, 0.9)
self.assertAllClose(var0_np, var0.eval())
self.assertAllClose(var1_np, var1.eval())
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = momentum_lib.MomentumOptimizer(
learning_rate=1.0, momentum=0.0).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[-111, -138]], var0.eval())
def testTensorLearningRateAndMomentum(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=constant_op.constant(2.0),
momentum=constant_op.constant(0.9))
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in variables.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval())
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), var1.eval())
def _dbParamsMom01(self):
"""Return dist-belief momentum values.
Return values been generated from the dist-belief momentum unittest,
running with a learning rate of 0.1 and a momentum of 0.1.
These values record how a parameter vector of size 10, initialized with 0.0,
gets updated with 10 consecutive momentum steps. It uses random gradients.
Returns:
db_grad: The gradients to apply
db_out: The parameters after the momentum update.
"""
db_grad = [[]] * 10
db_out = [[]] * 10
# pylint: disable=line-too-long
db_grad[0] = [
0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018,
0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615
]
db_out[0] = [
-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018,
-0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618
]
db_grad[1] = [
0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378,
0.5513742, 0.94687688, 0.16012503, 0.22159521
]
db_out[1] = [
-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884,
-0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544
]
db_grad[2] = [
0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965,
0.31168157, 0.43203235, 0.16792089, 0.24644311
]
db_out[2] = [
-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978,
-0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.15638189
]
db_grad[3] = [
0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181, 0.3728098,
0.81454384, 0.03848977, 0.89759839, 0.93665648
]
db_out[3] = [
-0.15459226, -0.24556576, -0.20456907, -0.20662397, -0.18528105,
-0.24716705, -0.2643207, -0.21206589, -0.18749419, -0.2528303
]
db_grad[4] = [
0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469, 0.94036359,
0.69107032, 0.81897682, 0.5433259, 0.67860287
]
db_out[4] = [
-0.20323303, -0.33900154, -0.29658359, -0.28175515, -0.20448165,
-0.34576839, -0.34194785, -0.29488021, -0.25099224, -0.33033544
]
db_grad[5] = [
0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245, 0.48038563,
0.84163809, 0.41172323, 0.83259648, 0.44941229
]
db_out[5] = [
-0.23598288, -0.42444581, -0.33041057, -0.3706224, -0.22536094,
-0.40366709, -0.43387437, -0.34433398, -0.34060168, -0.38302717
]
db_grad[6] = [
0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976, 0.75913221,
0.73577434, 0.16014607, 0.57500273, 0.071136251
]
db_out[6] = [
-0.26649091, -0.43862185, -0.38418442, -0.40361428, -0.26314685,
-0.48537019, -0.51664448, -0.36529395, -0.40706289, -0.39540997
]
db_grad[7] = [
0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942, 0.12683646,
0.74053431, 0.16033, 0.66625422, 0.73515922
]
db_out[7] = [
-0.32823896, -0.46498787, -0.39766794, -0.446868, -0.28281838,
-0.50622416, -0.59897494, -0.38342294, -0.48033443, -0.47016418
]
db_grad[8] = [
0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718, 0.43384039,
0.55561525, 0.22567581, 0.93331909, 0.29438227
]
db_out[8] = [
-0.41656655, -0.50961858, -0.49418902, -0.51919359, -0.36422527,
-0.55169362, -0.6627695, -0.40780342, -0.58099347, -0.50707781
]
db_grad[9] = [
0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063, 0.055731893,
0.68593478, 0.50580865, 0.12602448, 0.093537711
]
db_out[9] = [
-0.49369633, -0.58184016, -0.52132869, -0.5396927, -0.44306302,
-0.56181377, -0.73774242, -0.46082234, -0.60366184, -0.52012295
]
# pylint: enable=line-too-long
return db_grad, db_out
def testLikeDistBeliefMom01(self):
with self.test_session():
db_grad, db_out = self._dbParamsMom01()
num_samples = len(db_grad)
var0 = variables.Variable([0.0] * num_samples)
grads0 = constant_op.constant([0.0] * num_samples)
mom_opt = momentum_lib.MomentumOptimizer(learning_rate=0.1, momentum=0.1)
mom_update = mom_opt.apply_gradients(zip([grads0], [var0]))
variables.global_variables_initializer().run()
for i in xrange(num_samples):
mom_update.run(feed_dict={grads0: db_grad[i]})
self.assertAllClose(np.array(db_out[i]), var0.eval())
def testSparse(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable(array_ops.zeros([4, 2], dtype=dtype))
var1 = variables.Variable(constant_op.constant(1.0, dtype, [4, 2]))
grads0 = ops.IndexedSlices(
constant_op.constant(
[[.1, .1]], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([4, 2]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[[.01, .01], [.01, .01]], dtype=dtype),
constant_op.constant([2, 3]),
constant_op.constant([4, 2]))
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([0, 0], var0.eval()[0])
self.assertAllClose([0, 0], var0.eval()[1])
self.assertAllClose([1, 1], var1.eval()[2])
# Step 1: the momentum accumulators are 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0, 0]), slot0.eval()[0])
self.assertAllCloseAccordingToType(np.array([.1, .1]), slot0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([.01, .01]), slot1.eval()[2])
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(np.array([0, 0]), var0.eval()[0])
self.assertAllCloseAccordingToType(
np.array([-(0.1 * 2.0), -(0.1 * 2.0)]), var0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.01 * 2.0), 1.0 - (0.01 * 2.0)]), var1.eval()[2])
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0, 0]), slot0.eval()[0])
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval()[2])
# Check that the parameters have been updated.
self.assertAllClose(np.array([0, 0]), var0.eval()[0])
self.assertAllCloseAccordingToType(
np.array([
-(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), -(0.1 * 2.0) - (
(0.9 * 0.1 + 0.1) * 2.0)
]), var0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([
0.98 - ((0.9 * 0.01 + 0.01) * 2.0), 0.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), var1.eval()[2])
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update1 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
mom_update2 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update1.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval())
# Step 2: the second momentum accumulators contain the previous update.
mom_update2.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), var1.eval())
if __name__ == "__main__":
test.main()
|
|
from molmass.molmass import Formula
from scipy.stats import mode
import numpy
import re
class mass_isotopomer_distributions():
def __init__(self):
return;
def build_precursorSpectrumFromMRMs(self,peakSpectrum_I,blankSpectrum_I):
'''extract maximum intensity peak'''
# Input:
# peakSpectrum_I = {fragment:{(precursor_mass,product_mass):intensity}}
# peakSpectrum_I = {fragment:{(precursor_mass,product_mass):intensity}}
# Output:
# peakSpectrum_theoretical = {fragment:{mass:intensity}}
# peakSpectrum_measured = {fragment:{mass:[measuredMass,intensity]}}
# peakSpectrum_corrected = {fragment:{mass:[measuredMass,intensity]}}
# peakSpectrum_normalized = {fragment:{mass:[measuredMass,intensity]}}
fragments_I = list(peakSpectrum_I.keys());
# round all precursor/product masses in input for comparison:
peakSpectrum_copy_I = {};
for frag,spec in peakSpectrum_I.items():
peakSpectrum_tmp = {};
for masses,intensity in spec.items():
peakSpectrum_tmp[(numpy.around(masses[0]),numpy.around(masses[1]))] = intensity;
peakSpectrum_copy_I[frag] = peakSpectrum_tmp;
blankSpectrum_copy_I = {};
for frag,spec in blankSpectrum_I.items():
blankSpectrum_tmp = {};
for masses,intensity in spec.items():
blankSpectrum_tmp[(numpy.around(masses[0]),numpy.around(masses[1]))] = intensity;
blankSpectrum_copy_I[frag] = blankSpectrum_tmp;
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
# determine masses from fragments
masses = [];
peakSpectrum_measured = {};
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_measured[frag] = None;
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
masses_rounded = numpy.around(masses); # round masses to nearest digit for comparison
# 1. copy data from peakSpectrum_I to peakSpectrum_measured based on theoretical fragments
# 2. generate corrected spectrum
intensityList = [];
if frag in peakSpectrum_I:
precursor_masses = [k[0] for k in peakSpectrum_copy_I[frag].keys()];
measured_spec = {};
corrected_spec = {};
for i,mass in enumerate(masses_rounded): #iterate through theoretical precursor masses
measured = 0.0;
corrected = 0.0;
if mass in precursor_masses:
product_masses = [k[1] for k in peakSpectrum_copy_I[frag].keys() if k[0]==mass];
for product in product_masses: #iterate through measured product masses
if frag in blankSpectrum_copy_I:
blank_precursor_masses = [k[0] for k in blankSpectrum_copy_I[frag].keys()];
if mass in blank_precursor_masses:
blank_product_masses = [k[1] for k in blankSpectrum_copy_I[frag].keys() if k[0]==mass];
if product in blank_product_masses:
if blankSpectrum_copy_I[frag][(mass,product)]<0.5*peakSpectrum_copy_I[frag][(mass,product)]:
corrected += peakSpectrum_copy_I[frag][(mass,product)]-blankSpectrum_copy_I[frag][(mass,product)];
measured += peakSpectrum_copy_I[frag][(mass,product)]
else:
corrected += 0.0;
measured += peakSpectrum_copy_I[frag][(mass,product)]
else:
corrected += peakSpectrum_copy_I[frag][(mass,product)];
measured += peakSpectrum_copy_I[frag][(mass,product)]
else:
corrected += peakSpectrum_copy_I[frag][(mass,product)];
measured += peakSpectrum_copy_I[frag][(mass,product)]
else:
corrected += peakSpectrum_copy_I[frag][(mass,product)];
measured += peakSpectrum_copy_I[frag][(mass,product)];
measured_spec[masses[i]] = measured;
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_measured[frag] = measured_spec;
peakSpectrum_corrected[frag] = corrected_spec;
# normalize each spectrum:
#NOTE: normalization by max to allow for later conversion to normalization by sum
normalized = {};
intensityListMax = max(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if intensityListMax != 0: normalized[k] = v/intensityListMax;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_measured, peakSpectrum_corrected, peakSpectrum_normalized;
def build_productSpectrumFromMRMs(self,peakSpectrum_I,blankSpectrum_I):
'''extract maximum intensity peak'''
# Input:
# peakSpectrum_I = {fragment:{(product_mass,product_mass):intensity}}
# peakSpectrum_I = {fragment:{(product_mass,product_mass):intensity}}
# Output:
# peakSpectrum_theoretical = {fragment:{mass:intensity}}
# peakSpectrum_measured = {fragment:{mass:intensity}}
# peakSpectrum_corrected = {fragment:{mass:intensity}}
# peakSpectrum_normalized = {fragment:{mass:intensity}}
fragments_I = list(peakSpectrum_I.keys());
# round all precursor/product masses in input for comparison:
peakSpectrum_copy_I = {};
for frag,spec in peakSpectrum_I.items():
peakSpectrum_tmp = {};
for masses,intensity in spec.items():
peakSpectrum_tmp[(numpy.around(masses[0]),numpy.around(masses[1]))] = intensity;
peakSpectrum_copy_I[frag] = peakSpectrum_tmp;
blankSpectrum_copy_I = {};
for frag,spec in blankSpectrum_I.items():
blankSpectrum_tmp = {};
for masses,intensity in spec.items():
blankSpectrum_tmp[(numpy.around(masses[0]),numpy.around(masses[1]))] = intensity;
blankSpectrum_copy_I[frag] = blankSpectrum_tmp;
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
# determine masses from fragments
masses = [];
peakSpectrum_measured = {};
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_measured[frag] = None;
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
masses_rounded = numpy.around(masses); # round masses to nearest digit for comparison
# 1. copy data from peakSpectrum_I to peakSpectrum_measured based on theoretical fragments
# 2. generate corrected spectrum
intensityList = [];
if frag in peakSpectrum_I:
product_masses = [k[1] for k in peakSpectrum_copy_I[frag].keys()];
measured_spec = {};
corrected_spec = {};
for i,mass in enumerate(masses_rounded): #iterate through theoretical product masses
measured = 0.0;
corrected = 0.0;
if mass in product_masses:
precursor_masses = [k[0] for k in peakSpectrum_copy_I[frag].keys() if k[1]==mass];
for precursor in precursor_masses: #iterate through measured precursor masses
if frag in blankSpectrum_copy_I:
blank_product_masses = [k[1] for k in blankSpectrum_copy_I[frag].keys()];
if mass in blank_product_masses:
blank_precursor_masses = [k[0] for k in blankSpectrum_copy_I[frag].keys() if k[1]==mass];
if precursor in blank_precursor_masses:
if blankSpectrum_copy_I[frag][(precursor,mass)]<0.5*peakSpectrum_copy_I[frag][(precursor,mass)]:
corrected += peakSpectrum_copy_I[frag][(precursor,mass)]-blankSpectrum_copy_I[frag][(precursor,mass)];
measured += peakSpectrum_copy_I[frag][(precursor,mass)]
else:
corrected += 0.0;
measured += peakSpectrum_copy_I[frag][(precursor,mass)]
else:
corrected += peakSpectrum_copy_I[frag][(precursor,mass)];
measured += peakSpectrum_copy_I[frag][(precursor,mass)]
else:
corrected += peakSpectrum_copy_I[frag][(precursor,mass)];
measured += peakSpectrum_copy_I[frag][(precursor,mass)]
else:
corrected += peakSpectrum_copy_I[frag][(precursor,mass)];
measured += peakSpectrum_copy_I[frag][(precursor,mass)];
measured_spec[masses[i]] = measured;
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_measured[frag] = measured_spec;
peakSpectrum_corrected[frag] = corrected_spec;
# normalize each spectrum:
#NOTE: normalization by max to allow for later conversion to normalization by sum
normalized = {};
intensityListMax = max(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if intensityListMax != 0: normalized[k] = v/intensityListMax;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_measured, peakSpectrum_corrected, peakSpectrum_normalized;
def compare_peakSpectrum_normMax(self,peakSpectrum_normalized_list_I,return_theoretical = False):
# Input:
# peakSpectrum_normalized_list_I = [{fragment:{mass:intensity}}]
# Output:
# peakSpectrum_stats_O = {fragment:{mass:{'n':integer,
# 'mean':fraction,
# 'stdDev':fraction,
# 'absDev':fraction}}
fragments_all = [];
for row in peakSpectrum_normalized_list_I:
fragments_all.extend(list(row.keys()));
fragments_I = list(set(fragments_all));
#fragments_I = peakSpectrum_normalized_list_I[0].keys();
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
peakSpectrum_stats_O = {};
for frag in fragments_I:
peakSpectrum_stats_O[frag] = {'n':None,
'mean':None,
'stdDev':None,
'absDev':None};
if not peakSpectrum_theoretical[frag]: continue; # no carbons in fragment
intensityList = [];
masses = [];
stats = {};
for peakSpectrum in peakSpectrum_normalized_list_I:
intensityDict = {};
peakSpectrumMasses = list(peakSpectrum_theoretical[frag].keys());
for mass in peakSpectrumMasses:
if frag in peakSpectrum and mass in peakSpectrum[frag] and peakSpectrum[frag][mass] and peakSpectrum[frag][mass] > 0.0:
intensityDict[mass] = peakSpectrum[frag][mass];
else:
intensityDict[mass] = 0.0;
if not mass in masses: masses.append(mass);
intensityList.append(intensityDict);
## uncomment to only compare measured masses
#intensityDict = {};
#peakSpectrumMasses = peakSpectrum[frag].keys();
#for mass in peakSpectrumMasses:
# if peakSpectrum[frag][mass] > 0.0:
# intensityDict[mass] = peakSpectrum[frag][mass];
# if not mass in masses: masses.append(mass);
#intensityList.append(intensityDict);
for mass in masses:
stats[mass] = None;
data = [];
for intensity in intensityList:
if intensity[mass]>0.0:data.append(intensity[mass]);
if data:
intensity_array = numpy.array(data);
if peakSpectrum_theoretical[frag][mass]:abs_dev = abs(intensity_array.mean() - peakSpectrum_theoretical[frag][mass]);
else: abs_dev = None;
stats[mass] = {'n':len(intensity_array),
'mean':intensity_array.mean(),
'stdDev':intensity_array.std(),
'absDev':abs_dev};
else:
stats[mass] = {'n':0.0,
'mean':0.0,
'stdDev':0.0,
'absDev':None};
if stats: peakSpectrum_stats_O[frag] = stats;
if return_theoretical:
return peakSpectrum_stats_O,peakSpectrum_theoretical;
else:
return peakSpectrum_stats_O;
def compare_peakSpectrum_normSum(self,peakSpectrum_normalized_list_I,return_theoretical = False):
# Input:
# peakSpectrum_normalized_list_I = [{fragment:{mass:[measuredMass,intensity]}}]
# Output:
# peakSpectrum_stats_O = {fragment:{mass:{'n':integer,
# 'mean':fraction,
# 'stdDev':fraction,
# 'absDev':fraction}}
fragments_all = [];
for row in peakSpectrum_normalized_list_I:
fragments_all.extend(list(row.keys()));
fragments_I = list(set(fragments_all));
#fragments_I = peakSpectrum_normalized_list_I[0].keys();
peakSpectrum_theoretical = self.report_fragmentSpectrum_normSum(fragments_I,True);
peakSpectrum_stats_O = {};
for frag in fragments_I:
peakSpectrum_stats_O[frag] = {'n':None,
'mean':None,
'stdDev':None,
'absDev':None};
if not peakSpectrum_theoretical[frag]: continue; # no carbons in fragment
intensityList = [];
masses = [];
stats = {};
for peakSpectrum in peakSpectrum_normalized_list_I:
intensityDict = {};
peakSpectrumMasses = list(peakSpectrum_theoretical[frag].keys());
for mass in peakSpectrumMasses:
if frag in peakSpectrum and frag in peakSpectrum and mass in peakSpectrum[frag] and peakSpectrum[frag][mass] and peakSpectrum[frag][mass] > 0.0:
intensityDict[mass] = peakSpectrum[frag][mass];
else:
intensityDict[mass] = 0.0;
if not mass in masses: masses.append(mass);
intensityList.append(intensityDict);
## uncomment to only compare measured masses
#intensityDict = {};
#peakSpectrumMasses = peakSpectrum[frag].keys();
#for mass in peakSpectrumMasses:
# if peakSpectrum[frag][mass] > 0.0:
# intensityDict[mass] = peakSpectrum[frag][mass];
# if not mass in masses: masses.append(mass);
#intensityList.append(intensityDict);
for mass in masses:
stats[mass] = None;
data = [];
for intensity in intensityList:
if intensity[mass]>0.0:data.append(intensity[mass]);
if data:
intensity_array = numpy.array(data);
if peakSpectrum_theoretical[frag][mass]:abs_dev = abs(intensity_array.mean() - peakSpectrum_theoretical[frag][mass]);
else: abs_dev = None;
stats[mass] = {'n':len(intensity_array),
'mean':intensity_array.mean(),
'stdDev':intensity_array.std(),
'absDev':abs_dev};
else:
stats[mass] = {'n':0.0,
'mean':0.0,
'stdDev':0.0,
'absDev':None};
if stats: peakSpectrum_stats_O[frag] = stats;
if return_theoretical:
return peakSpectrum_stats_O,peakSpectrum_theoretical;
else:
return peakSpectrum_stats_O;
def report_fragmentSpectrum_normMax(self,fragments_I,round_mass=False):
'''calculate the format spectrum as a list'''
# Input: formula_str_I
# Output: spectrum_lst_O
fragmentSpectrum_tmp = {};
fragmentSpectrum_O = {};
for formula_str_I in fragments_I:
fragmentSpectrum_tmp[formula_str_I] = None;
fragmentSpectrum_O[formula_str_I] = None;
formula_str = re.sub('[+-]', '', formula_str_I);
n12C = 0
n13C = 0
if 'C' not in Formula(formula_str)._elements: continue; #check if a carbon is even contained in the formula
if 0 in Formula(formula_str)._elements['C']:
n12C += Formula(formula_str)._elements['C'][0]; #get the # of Carbons
if 13 in Formula(formula_str)._elements['C']:
n13C += Formula(formula_str)._elements['C'][13]
mnumber = Formula(formula_str).isotope.massnumber #get the nominal mass number
spectrum = Formula(formula_str).spectrum() #get the spectrum
fragmentSpectrum = {}
intensityList = [];
for c in range(-n13C, n12C + 1):
if c<0:
fragmentSpectrum[Formula(formula_str).isotope.mass-1]=0.0;
intensityList.append(0.0);
else:
if mnumber+c in spectrum:
fragmentSpectrum[spectrum[mnumber+c][0]]=spectrum[mnumber+c][1];
intensityList.append(spectrum[mnumber+c][1]);
else:
fragmentSpectrum[Formula(formula_str).isotope.mass + c]=0.0;
intensityList.append(0.0);
fragmentSpectrum_tmp[formula_str_I] = fragmentSpectrum;
# by default, the spectrum is normalized to the sum of all intensities measured
# convert sum-normalized spectrum to max-normalized spectrum
intensityListMax = max(intensityList);
fragmentSpectrum = {};
for k,v in fragmentSpectrum_tmp[formula_str_I].items():
if round_mass:
fragmentSpectrum[int(numpy.round(k))] = v/intensityListMax;
else:
fragmentSpectrum[k] = v/intensityListMax;
fragmentSpectrum_O[formula_str_I] = fragmentSpectrum;
return fragmentSpectrum_O;
def report_fragmentSpectrum_normSum(self,fragments_I,round_mass=False):
'''calculate the fragment spectrum'''
# Input: formula_str_I
# Output: spectrum_lst_O
fragmentSpectrum_O = {};
for formula_str_I in fragments_I:
fragmentSpectrum_O[formula_str_I] = None;
formula_str = re.sub('[+-]', '', formula_str_I);
n12C = 0
n13C = 0
if 'C' not in Formula(formula_str)._elements: break; #check if a carbon is even contained in the formula
if 0 in Formula(formula_str)._elements['C']:
n12C += Formula(formula_str)._elements['C'][0]; #get the # of Carbons
if 13 in Formula(formula_str)._elements['C']:
n13C += Formula(formula_str)._elements['C'][13]
mnumber = Formula(formula_str).isotope.massnumber #get the nominal mass number
spectrum = Formula(formula_str).spectrum() #get the spectrum
fragmentSpectrum = {}
for c in range(-n13C, n12C + 1):
if c<0:
exact_mass = Formula(formula_str).isotope.mass+c;
if round_mass:
fragmentSpectrum[int(numpy.round(exact_mass))]=0.0;
else:
fragmentSpectrum[exact_mass]=0.0;
else:
if mnumber+c in spectrum:
exact_mass = spectrum[mnumber+c][0];
if round_mass:
fragmentSpectrum[int(numpy.round(exact_mass))]=spectrum[mnumber+c][1];
else:
fragmentSpectrum[exact_mass]=spectrum[mnumber+c][1];
else:
exact_mass = Formula(formula_str).isotope.mass + c
if round_mass:
fragmentSpectrum[int(numpy.round(exact_mass))]=0.0;
else:
fragmentSpectrum[exact_mass]=0.0;
fragmentSpectrum_O[formula_str_I] = fragmentSpectrum;
return fragmentSpectrum_O;
def extract_peakData_normMax(self, peakData_I, fragments_I, res_I=0.3, round_mass=False):
'''extract maximum intensity peak'''
# Input: peakData_I = mass:intensity
# res_I = mass window/resolution (default = 0.3);
# Output:
# peakSpectrum_theoretical = {fragment:{mass:intensity}}
# peakSpectrum_measured = {fragment:{mass:intensity}}
# peakSpectrum_corrected = {fragment:{mass:intensity}}
# peakSpectrum_normalized = {fragment:{mass:intensity}}
'''The algorithm implement below does not track the peak width for calculation of peak area,
nor for calculate of resolution using FWHM. However, compared to peak-picking algorithm
implemented in analyst(r) and peakView(r), the intensities for most compounds match
the intensities calculated as peaks (compare 140228_MRM_EPI/..._EPI to ..._EPI_peakList
or 140228_ER_EPI/...I to ..._ER).'''
# min peak height
detectionThreshold = 2500.0
# pre-sort for efficiency
# sort masses in peakData
keys = list(peakData_I.keys());
keys.sort();
# determine baseline intensity
# based on the most occuring intensity (background threshold);
values = numpy.array(list(peakData_I.values()));
values_median = mode(values)[0];
if len(values_median) > 1:
baseline = float(max(values_median)); # min returned too much junk
else:
baseline = float(values_median);
if round_mass:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
else:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I);
# determine masses from fragments
masses = [];
peakSpectrum_measured_qcqa = {};
peakSpectrum_normalized_qcqa = {};
peakSpectrum_corrected_qcqa = {};
peakSpectrum_measured = {};
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_measured_qcqa[frag] = None;
peakSpectrum_corrected_qcqa[frag] = None;
peakSpectrum_normalized_qcqa[frag] = None;
peakSpectrum_measured[frag] = None;
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
keyIndex = 0;
keyMax = len(keys);
measured_qcqa = {};
measured = {};
for mass in masses: # iterate through each mass
maxPeak = 0.0;
keyMaxPeak = None;
measured_qcqa[mass] = [keyMaxPeak,maxPeak];
measured[mass] = maxPeak;
while keyIndex<keyMax:
if keys[keyIndex] >= mass - res_I and keys[keyIndex] < mass + res_I:
peak = peakData_I[keys[keyIndex]];
if peak > maxPeak:
maxPeak = peak;
keyMaxPeak = keys[keyIndex];
keyIndex += 1;
elif keys[keyIndex] < mass - res_I:
keyIndex += 1;
continue;
elif keys[keyIndex] >= mass + res_I:
measured_qcqa[mass] = [keyMaxPeak,maxPeak];
measured[mass] = maxPeak;
break;
if measured:
peakSpectrum_measured_qcqa[frag] = measured_qcqa;
peakSpectrum_measured[frag] = measured;
else: break #no peaks were found for the fragment
# correct intensity for background:
corrected_qcqa = {};
#intensityList = [];
for k,v in peakSpectrum_measured_qcqa[frag].items():
if v[1] > detectionThreshold:
if v[1] - baseline > 0.0:
corrected_qcqa[k] = [v[0],v[1] - baseline];
else:
corrected_qcqa[k] = [v[0],0.0];
else:
corrected_qcqa[k] = [v[0],0.0];
#intensityList.append(corrected_qcqa[k][1]);
peakSpectrum_corrected_qcqa[frag] = corrected_qcqa
corrected = {};
intensityList = [];
for k,v in peakSpectrum_measured[frag].items():
if v > detectionThreshold:
if v - baseline > 0.0:
corrected[k] = v - baseline;
else:
corrected[k] = 0.0;
intensityList.append(corrected[k]);
else:
corrected[k] = 0.0;
intensityList.append(corrected[k]);
peakSpectrum_corrected[frag] = corrected;
# normalize each spectrum:
normalized_qcqa = {};
intensityListMax_qcqa = max(intensityList);
for k,v in peakSpectrum_corrected_qcqa[frag].items():
if intensityListMax_qcqa != 0: normalized_qcqa[k] = [v[0],v[1]/intensityListMax_qcqa];
else: normalized_qcqa[k] = [v[0], None];
peakSpectrum_normalized_qcqa[frag] = normalized_qcqa;
normalized = {};
intensityListMax = max(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if intensityListMax != 0: normalized[k] = v/intensityListMax;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_measured, peakSpectrum_corrected, peakSpectrum_normalized;
def extract_peakData_normSum(self, peakData_I, fragments_I, res_I=0.3,round_mass=False):
'''extract maximum intensity peak'''
# Input: peakData_I = mass:intensity
# res_I = mass window/resolution (default = 0.3);
# Output:
# peakSpectrum_theoretical = {fragment:{mass:intensity}}
# peakSpectrum_measured = {fragment:{mass:intensity}}
# peakSpectrum_corrected = {fragment:{mass:intensity}}
# peakSpectrum_normalized = {fragment:{mass:intensity}}
# min peak height
detectionThreshold = 1000.0
# pre-sort for efficiency
# sort masses in peakData
keys = list(peakData_I.keys());
keys.sort();
# determine baseline intensity
# based on the most occuring intensity (background threshold);
values = numpy.array(list(peakData_I.values()));
values_median = mode(values)[0];
if len(values_median) > 1:
baseline = float(max(values_median)); # min returned too much junk
else:
baseline = float(values_median);
if round_mass:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
else:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I);
# determine masses from fragments
masses = [];
peakSpectrum_measured_qcqa = {};
peakSpectrum_normalized_qcqa = {};
peakSpectrum_corrected_qcqa = {};
peakSpectrum_measured = {};
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_measured_qcqa[frag] = None;
peakSpectrum_corrected_qcqa[frag] = None;
peakSpectrum_normalized_qcqa[frag] = None;
peakSpectrum_measured[frag] = None;
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
keyIndex = 0;
keyMax = len(keys);
measured_qcqa = {};
measured = {};
for mass in masses: # iterate through each mass
maxPeak = 0.0;
keyMaxPeak = None;
measured_qcqa[mass] = [keyMaxPeak,maxPeak];
measured[mass] = maxPeak;
while keyIndex<keyMax:
if keys[keyIndex] >= mass - res_I and keys[keyIndex] < mass + res_I:
peak = peakData_I[keys[keyIndex]];
if peak > maxPeak:
maxPeak = peak;
keyMaxPeak = keys[keyIndex];
keyIndex += 1;
elif keys[keyIndex] < mass - res_I:
keyIndex += 1;
continue;
elif keys[keyIndex] >= mass + res_I:
measured_qcqa[mass] = [keyMaxPeak,maxPeak];
measured[mass] = maxPeak;
break;
if measured:
peakSpectrum_measured_qcqa[frag] = measured_qcqa;
peakSpectrum_measured[frag] = measured;
else: break #no peaks were found for the fragment
# correct intensity for background:
corrected_qcqa = {};
#intensityList = [];
for k,v in peakSpectrum_measured_qcqa[frag].items():
if v[1] > detectionThreshold:
if v[1] - baseline > 0.0:
corrected_qcqa[k] = [v[0],v[1] - baseline];
else:
corrected_qcqa[k] = [v[0],0.0];
else:
corrected_qcqa[k] = [v[0],0.0];
#intensityList.append(corrected_qcqa[k][1]);
peakSpectrum_corrected_qcqa[frag] = corrected_qcqa
corrected = {};
intensityList = [];
for k,v in peakSpectrum_measured[frag].items():
if v > detectionThreshold:
if v - baseline > 0.0:
corrected[k] = v - baseline;
else:
corrected[k] = 0.0;
intensityList.append(corrected[k]);
else:
corrected[k] = 0.0;
intensityList.append(corrected[k]);
peakSpectrum_corrected[frag] = corrected;
# normalize each spectrum:
normalized_qcqa = {};
intensityListSum_qcqa = sum(intensityList);
for k,v in peakSpectrum_corrected_qcqa[frag].items():
if intensityListSum_qcqa != 0: normalized_qcqa[k] = [v[0],v[1]/intensityListSum_qcqa];
else: normalized_qcqa[k] = [v[0], None];
peakSpectrum_normalized_qcqa[frag] = normalized_qcqa;
normalized = {};
intensityListSum = sum(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if intensityListSum != 0: normalized[k] = v/intensityListSum;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_measured, peakSpectrum_corrected, peakSpectrum_normalized;
def extract_peakList_normMax(self, peakSpectrum_I, fragments_I, round_mass=False):
'''extract peak spectrum from peak list'''
# Input:
# peakSpectrum_I = {fragment:{(precursor_mass,product_mass):intensity}}
# fragments_I = [fragments]
# Output:
# peakSpectrum_corrected = {fragment:{mass:intensity}}
# peakSpectrum_normalized = {fragment:{mass:intensity}}
# round all precursor/product masses in input for comparison:
peakSpectrum_copy_I = {};
for frag,spec in peakSpectrum_I.items():
peakSpectrum_tmp = {};
for masses,intensity in spec.items():
peakSpectrum_tmp[numpy.around(masses)] = intensity;
peakSpectrum_copy_I[frag] = peakSpectrum_tmp;
if round_mass:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
else:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I);
# determine masses from fragments
masses = [];
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
masses_rounded = numpy.around(masses); # round masses to nearest digit for comparison
# 1. copy data from peakSpectrum_I to peakSpectrum_corrected based on theoretical fragments
intensityList = [];
if frag in peakSpectrum_I:
fragment_masses = [k for k in peakSpectrum_copy_I[frag].keys()];
corrected_spec = {};
for i,mass in enumerate(masses_rounded):
corrected = 0.0; #added on 12/30/15
if mass in fragment_masses:
corrected = peakSpectrum_copy_I[frag][mass];
if not corrected: corrected = 0.0;
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_corrected[frag] = corrected_spec;
else:
corrected_spec = {};
for i,mass in enumerate(masses_rounded):
corrected = 0.0;
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_corrected[frag] = corrected_spec;
# normalize each spectrum:
#NOTE: normalization by max to allow for later conversion to normalization by sum
normalized = {};
intensityListMax = max(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if v:
if intensityListMax != 0: normalized[k] = v/intensityListMax;
else: normalized[k] = None;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_corrected, peakSpectrum_normalized;
def extract_peakList_normSum(self, peakSpectrum_I, fragments_I, round_mass=False):
'''extract peak spectrum from peak list'''
# Input:
# peakSpectrum_I = {fragment:{mass:intensity}}
# fragments_I = [fragments]
# Output:
# peakSpectrum_corrected = {fragment:{mass:intensity}}
# peakSpectrum_normalized = {fragment:{mass:intensity}}
# round all precursor/product masses in input for comparison:
peakSpectrum_copy_I = {};
for frag,spec in peakSpectrum_I.items():
peakSpectrum_tmp = {};
for masses,intensity in spec.items():
peakSpectrum_tmp[numpy.around(masses)] = intensity;
peakSpectrum_copy_I[frag] = peakSpectrum_tmp;
if round_mass:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normSum(fragments_I,True);
else:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normSum(fragments_I);
# determine masses from fragments
masses = [];
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
masses_rounded = numpy.around(masses); # round masses to nearest digit for comparison
# 1. copy data from peakSpectrum_I to peakSpectrum_corrected based on theoretical fragments
intensityList = [];
if frag in peakSpectrum_I:
fragment_masses = [k for k in peakSpectrum_copy_I[frag].keys()];
corrected_spec = {};
for i,mass in enumerate(masses_rounded):
corrected = 0.0;
if mass in fragment_masses and peakSpectrum_copy_I[frag][mass]:
corrected = peakSpectrum_copy_I[frag][mass];
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_corrected[frag] = corrected_spec;
else:
corrected_spec = {};
for i,mass in enumerate(masses_rounded):
corrected = 0.0;
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_corrected[frag] = corrected_spec;
# normalize each spectrum:
normalized = {};
intensityListSum = sum(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if v>0.0:
if intensityListSum != 0: normalized[k] = v/intensityListSum;
else: normalized[k] = None;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_corrected, peakSpectrum_normalized;
def recombine_dilutionsMRMs(self,peakData_I):
'''Method to "recombine" MRMs from one dilution to the next'''
# input: peakData_I = {frag:[mass:{'intensity':intensity,
# 'dilution':dilution,
# 'used_':used_,
# 'comment_':comment_}]}
# e.g.: {frag:[100:{'dilution':'high',...}],
# [101:{'dilution':'low','comment_':'Recombine',...}],
# [101:{'dilution':'high','comment_':'Recombine',...}],
# [102:{'dilution':'low','comment_':'Recombine',...}],
# [103:{'dilution':'low',...}],...}
# NOTE: dictionary > List of dictionaries
# NOTE: input list of masses must be sorted in ascending order
# followed by 'dilutions' in descending order as shown below!
# output: peakData_O = {frag:{mass:{'intensity':intensity,
# 'dilution':dilution,
# 'used_':used_,
# 'comment_':comment_}}}
# peakData_O_false = {frag:{mass:{'intensity':intensity,
# 'dilution':dilution,
# 'used_':used_,
# 'comment_':comment_}}}
# Note: second output structure needed to update rows that are changed to false
'''Algorithm:
start:
dilution m comment used
'low' 0 '' false
'high' 0 '' true
'low' 1 'Recombine' true
'high' 1 'Recombine' true
'low' 2 'Recombine' true
'high' 2 '' false
'low' 3 '' true
'high' 3 '' false
recombine...
end:
dilution m comment used
'low' 0 '' false
'high' 0 '' true
'low' 1 'Recombine' false
'high' 1 'Recombine' true
'low' 2 'Recombine' true
'high' 2 '' false
'low' 3 '' true
'high' 3 '' false
...
done prior: set normalized intensity to diluion 'low', m 1 to 1;
recalculate the rest of the normalized intensities for the dilutions 'low', m 2,3,4,...;
calculate the percent change from dilution 'low', m 1 to dilution 'low', m 2; from dilution 'low', m 2 to dilution 'low', m 3; ...;
replace dilution 'high', m 2 with the normalized intensity for dilution 'low', m 1 - the percent change from dilution 'low', m 1 to dilution 'low', m 2;
replace dilution 'low', m 3 with the new normalized intensity for m 2 - the percent change from dilution 'low', m 2 to dilution 'low', m 3;
...;'''
peakData_O = {};
peakData_O_false = {};
#iterate through each fragment
for frag,spec in peakData_I.items():
peakData_O[frag] = None;
peakData_O_false[frag] = None;
spec_O = {};
spec_O_false = {};
if not spec: continue; #check if there is data for the fragment
# extract out dilutions
dilutions = [];
for d in spec:
values = list(d.values())[0];
dilutions.append(values['dilution']);
dilutions = list(set(dilutions));
dilutions.sort();
dilutions_dict = dict(list(zip(dilutions,['low','high'])));
#iterate through each spectrum
intensity_prev = 0.0
intensity_new = 0.0;
intensity_difference = 0.0;
recombine_cnt = 0;
for spec_dict in spec:
mass = list(spec_dict.keys())[0];
data = list(spec_dict.values())[0];
spec_O[mass] = None;
data_O = {};
if not data['intensity']:
data_O['dilution'] = None;
data_O['intensity'] = None;
data_O['comment_'] = None;
data_O['used_'] = None;
spec_O[mass] = data_O;
continue;
if data['comment_'] == 'Recombine':
if recombine_cnt == 0: # 1st recombination event
if dilutions_dict[data['dilution']] != 'low': print('bad input');
intensity_prev = data['intensity'];
data['used_'] = False;
# copy the data
data_O['dilution'] = data['dilution'];
data_O['intensity'] = data['intensity'];
data_O['comment_'] = data['comment_'];
data_O['used_'] = data['used_'];
spec_O_false[mass] = data_O;
recombine_cnt += 1;
continue
elif recombine_cnt == 1: # 2nd recombination event
if dilutions_dict[data['dilution']] != 'high': print('bad input');
intensity_new = data['intensity'];
recombine_cnt += 1;
elif recombine_cnt == 2: # 3rd recombination event
if dilutions_dict[data['dilution']] != 'low': print('bad input');
intensity_difference = data['intensity']/intensity_prev;
intensity_prev = data['intensity'];
intensity_new = intensity_new*intensity_difference;
data['intensity'] = intensity_new;
recombine_cnt += 1;
elif recombine_cnt >= 3:
if dilutions_dict[data['dilution']] != 'low': print('bad input');
intensity_difference = data['intensity']/intensity_prev;
intensity_prev = data['intensity'];
intensity_new = intensity_new*intensity_difference;
data['intensity'] = intensity_new;
recombine_cnt += 1;
# copy data
data_O['dilution'] = data['dilution'];
data_O['intensity'] = data['intensity'];
data_O['comment_'] = data['comment_'];
data_O['used_'] = data['used_'];
spec_O[mass] = data_O;
# copy spectrum
peakData_O[frag] = spec_O
peakData_O_false[frag] = spec_O_false
#copy out the intensities without the comments
peakData_intensities_O = {};
for frag,spec in peakData_O.items():
spec_tmp = {};
for mass,v in spec.items():
spec_tmp[mass]=v['intensity'];
peakData_intensities_O[frag] = spec_tmp;
return peakData_O,peakData_O_false,peakData_intensities_O;
def normalize_peakSpectrum_normMax(self,peakSpectrum_I,scalingFactors_I):
'''normalize peakSpectrum taken from different m+0, m+1, ... fragments
using a reference scaling factor'''
# Input:
# peakSpectrum_I = {precursor_fragment:{product_fragment:{product_mass:intensity}}}
# scalingFactors_I = {precursor_fragment:intensity}
# Output:
# peakSpectrum_normalized = {product_fragment:{mass:intensity}}
'''Algorithm:
part 1: scale
for each precursor i:
for each product j in precursor i:
for each mass m in product j:
peakSpectrum[precursor_i][product_j][m]*scalingFactor[precursor_i]
part 2: reduce:
for each product j in all precursors:
for each mass in product j:
for each precursor i with product j:
peakSpectrum_O[product_j][m] += peakSpectrum[precursor_i][product_j][m]*scalingFactor[precursor_i]
'''
precursor_fragments_I = list(peakSpectrum_I.keys());
precursorSpectrum_dict = {};
product_fragments_all = [];
product_mass_all = [];
# iterate through each precursor fragment
for precursor in precursor_fragments_I:
product_fragments_I = list(peakSpectrum_I[precursor].keys());
productSpectrum_dict = {};
product_fragments_all.extend(product_fragments_I);
# iterate through each product fragment
for product in product_fragments_I:
spectrum_dict = {};
product_mass_dict = {};
product_mass_tmp = [];
# iterate through each mass
for k,v in peakSpectrum_I[precursor][product].items():
if peakSpectrum_I[precursor][product][k]:
spectrum_dict[k] = peakSpectrum_I[precursor][product][k]*scalingFactors_I[precursor];
else:
spectrum_dict[k] = 0.0;
product_mass_tmp.append(k);
productSpectrum_dict[product] = spectrum_dict;
product_mass_dict[product] = product_mass_tmp;
product_mass_all.append(product_mass_dict);
precursorSpectrum_dict[precursor] = productSpectrum_dict
# reduce product fragments list
product_fragments_reduced = list(set(product_fragments_all));
# reduce product masses
product_mass_combined = {};
product_mass_reduced = {};
for product in product_fragments_all:
product_mass_combined[product] = [];
for product_mass in product_mass_all:
if product in product_mass:
product_mass_combined[product].extend(product_mass[product]);
product_mass_reduced[product] = list(set(product_mass_combined[product]));
peakSpectrum_normalized_O = {};
# iterate through all common product fragments
for product in product_fragments_reduced:
peakSpectrum_normalized_O[product] = None;
peakSpectrum_normalized_tmp = {};
# iterate through each mass
for mass in product_mass_reduced[product]:
peakSpectrum_normalized_tmp[mass] = 0.0;
# iterate through each precursor
for precursor in precursor_fragments_I:
if product in precursorSpectrum_dict[precursor]:
if mass in precursorSpectrum_dict[precursor][product]:
peakSpectrum_normalized_tmp[mass] += precursorSpectrum_dict[precursor][product][mass]
else:
peakSpectrum_normalized_tmp[mass] += 0.0;
else: peakSpectrum_normalized_tmp[mass] += 0.0;
peakSpectrum_normalized_O[product] = peakSpectrum_normalized_tmp;
# re-normalize the spectrum to max-normalized spectrum
intensityListMax = {};
peakSpectrum_normalized_O_max = {};
for product,spec in peakSpectrum_normalized_O.items():
intensityList = [];
for mass,intensity in spec.items():
intensityList.append(intensity);
intensityListMax = max(intensityList);
fragmentSpectrum = {};
for mass,intensity in spec.items():
if intensityListMax != 0.0:
fragmentSpectrum[mass] = intensity/intensityListMax;
else:
fragmentSpectrum[mass] = 0.0;
peakSpectrum_normalized_O_max[product] = fragmentSpectrum;
return peakSpectrum_normalized_O_max
def calculate_fragmentSpectrumAccuracy(self, peakSpectrum_normalized_list_I):
'''calculate the accuracy from the normalized intensity
Method:
spectrum accuracy = mean(abs(measured,a-theoretical,a),...) for all masses of a in the spectrum
Input:
peakSpectrum_normalized_list_I = [{fragment:{mass:intensity}}]
Output:
peakSpectrum_accuracy_O = {fragment:float};
'''
fragments_I = list(peakSpectrum_normalized_list_I[0].keys());
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
peakSpectrum_accuracy_O = {};
for frag in fragments_I:
peakSpectrum_accuracy_O[frag] = None;
if not peakSpectrum_theoretical[frag]: continue; # no carbons in fragment
intensityList = [];
masses = [];
for peakSpectrum in peakSpectrum_normalized_list_I:
intensityDict = {};
peakSpectrumMasses = list(peakSpectrum_theoretical[frag].keys());
for mass in peakSpectrumMasses:
if frag in peakSpectrum and mass in peakSpectrum[frag] and peakSpectrum[frag][mass] > 0.0:
intensityDict[mass] = peakSpectrum[frag][mass];
else:
intensityDict[mass] = 0.0;
if not mass in masses: masses.append(mass);
intensityList.append(intensityDict);
## uncomment to only compare measured masses
#intensityDict = {};
#peakSpectrumMasses = peakSpectrum[frag].keys();
#for mass in peakSpectrumMasses:
# if peakSpectrum[frag][mass] > 0.0:
# intensityDict[mass] = peakSpectrum[frag][mass];
# if not mass in masses: masses.append(mass);
#intensityList.append(intensityDict);
accuracyLst = [];
for mass in masses:
data = [];
for intensity in intensityList:
if intensity[mass]>=0.0:data.append(intensity[mass]);
if data and peakSpectrum_theoretical[frag][mass]:
intensity_array = numpy.array(data);
accuracyLst.append(abs(intensity_array.mean() - peakSpectrum_theoretical[frag][mass]))
accuracyLstMean = None;
if accuracyLst:
accuracyLstMean = numpy.mean(accuracyLst);
peakSpectrum_accuracy_O[frag] = accuracyLstMean;
else: peakSpectrum_accuracy_O[frag] = None;
return peakSpectrum_accuracy_O;
def calculate_fragmentSpectrumAccuracy_normSum(self, peakSpectrum_normalized_list_I):
'''calculate the accuracy from the normalized intensity
Method:
spectrum accuracy = mean(abs(measured,a-theoretical,a),...) for all masses of a in the spectrum
Input:
peakSpectrum_normalized_list_I = [{fragment:{mass:intensity}}]
Output:
peakSpectrum_accuracy_O = {fragment:float};
'''
fragments_I = list(peakSpectrum_normalized_list_I[0].keys());
peakSpectrum_theoretical = self.report_fragmentSpectrum_normSum(fragments_I,True);
peakSpectrum_accuracy_O = {};
for frag in fragments_I:
peakSpectrum_accuracy_O[frag] = None;
if not peakSpectrum_theoretical[frag]: continue; # no carbons in fragment
intensityList = [];
masses = [];
for peakSpectrum in peakSpectrum_normalized_list_I:
intensityDict = {};
peakSpectrumMasses = list(peakSpectrum_theoretical[frag].keys());
for mass in peakSpectrumMasses:
if frag in peakSpectrum and mass in peakSpectrum[frag] and peakSpectrum[frag][mass] > 0.0:
intensityDict[mass] = peakSpectrum[frag][mass];
else:
intensityDict[mass] = 0.0;
if not mass in masses: masses.append(mass);
intensityList.append(intensityDict);
## uncomment to only compare measured masses
#intensityDict = {};
#peakSpectrumMasses = peakSpectrum[frag].keys();
#for mass in peakSpectrumMasses:
# if peakSpectrum[frag][mass] > 0.0:
# intensityDict[mass] = peakSpectrum[frag][mass];
# if not mass in masses: masses.append(mass);
#intensityList.append(intensityDict);
accuracyLst = [];
for mass in masses:
data = [];
for intensity in intensityList:
if intensity[mass]>=0.0:data.append(intensity[mass]);
if data and peakSpectrum_theoretical[frag][mass]:
intensity_array = numpy.array(data);
accuracyLst.append(abs(intensity_array.mean() - peakSpectrum_theoretical[frag][mass]))
accuracyLstMean = None;
if accuracyLst:
accuracyLstMean = numpy.mean(accuracyLst);
peakSpectrum_accuracy_O[frag] = accuracyLstMean;
else: peakSpectrum_accuracy_O[frag] = None;
return peakSpectrum_accuracy_O;
def make_CSourceMix(self,csources_I, composition_I):
'''Make a carbon source mix of a specified composition'''
# Input: (e.g. 80/20 1-13C/U-13C glc)
# csources_I = backbone of the csources [['[13C]HO','CH2O','CH2O','CH2O','CH2O','CH3O'],
# ['[13C]HO','[13C]H2O','[13C]H2O','[13C]H2O','[13C]H2O','[13C]H3O']]
# composition_I = composition csources [0.8,0.2]
# Output:
# emu_O = {strings of emu distribution: spectral list}
emu_O = {};
emu_all = [];
ncsources = len(csources_I)
for cs in csources_I:
emu_tmp = {};
emu_tmp = self.make_EMUDistributionAndCSpectra(cs)
emu_all.append(emu_tmp);
for k in list(emu_all[0].keys()):
spectra_tmp = [];
spectra_tmp = [0.0]*len(emu_all[0][k])
for i in range(ncsources):
for j in range(len(emu_all[i][k])):
spectra_tmp[j] += composition_I[i]*emu_all[i][k][j];
emu_O[k] = spectra_tmp;
return emu_O;
def make_EMUDistributionAndCSpectra(self,csource_I):
'''Make EMU distribution based on the carbon source'''
# Input:
# csource_I = carbon backbone of the csource
# e.g. 1-13C glc = ['[13C]HO','CH2','CH2','CH2','CH2','CH3O']
# U-13C glc = ['[13C]HO','[13C]H2O','[13C]H2O','[13C]H2O','[13C]H2O','[13C]H3O']
# glc = ['CHO','CH2O','CH2O','CH2O','CH2O','CH3O']
# Output:
# emu_O = {strings of emu distribution: spectral list}
nC = len(csource_I)
emu_O = {};
# iterate through each carbon and change from 0 to 1
emu_c = nC*'0'; #intialize
emu_lst = list(emu_c);
for j in range(nC):
emu_lst[j] = '1'
for c in range(j,nC):
emu_lst_2 = copy.copy(emu_lst)
emu_lst_2[j] = '0';
emu_lst_2[c] = '1';
emu_tmp = copy.copy(emu_lst_2);
cfrag = [];
for i in range(c,nC):
emu_tmp[c] = '0';
emu_tmp[i] = '1';
emu_str = 'x' + ''.join(emu_tmp)
dfrag = [csource_I[p] for p,n in enumerate(emu_tmp) if n=='1']
dfrag_tmp = ''.join(dfrag)
#if emu_str.find('0')==-1: #ignore the fully labeled fragment
# continue;
spectrum_tmp = self.report_fragmentSpectrum_normSum([dfrag_tmp],round_mass=True)
# format from dict into a list:
spectrum_tmp_lst = [];
spectrum_masses_lst = [];
for k,v in spectrum_tmp[dfrag_tmp].items():
spectrum_masses_lst.append(k);
spectrum_masses_lst.sort();
for k in spectrum_masses_lst:
spectrum_tmp_lst.append(spectrum_tmp[dfrag_tmp][k]);
emu_O[emu_str] = spectrum_tmp_lst;
emu_c = nC*'1'; #intialize
emu_lst = list(emu_c);
for j in range(nC-1):
emu_lst[j] = '0'
for c in range(j,nC-1):
emu_lst_2 = copy.copy(emu_lst)
emu_lst_2[j] = '1';
emu_lst_2[c] = '0';
emu_tmp = copy.copy(emu_lst_2);
cfrag = [];
for i in range(c,nC-1):
emu_tmp[c] = '1';
emu_tmp[i] = '0';
emu_str = 'x' + ''.join(emu_tmp)
dfrag = [csource_I[p] for p,n in enumerate(emu_tmp) if n=='1']
dfrag_tmp = ''.join(dfrag)
#if emu_str.find('0')==-1: #ignore the fully labeled fragment
# continue;
spectrum_tmp = self.report_fragmentSpectrum_normSum([dfrag_tmp],round_mass=True)
# format from dict into a list:
spectrum_tmp_lst = [];
spectrum_masses_lst = [];
for k,v in spectrum_tmp[dfrag_tmp].items():
spectrum_masses_lst.append(k);
spectrum_masses_lst.sort();
for k in spectrum_masses_lst:
spectrum_tmp_lst.append(spectrum_tmp[dfrag_tmp][k]);
emu_O[emu_str] = spectrum_tmp_lst;
return emu_O;
def make_fragmentID(self,met_id_I,formula_I,mass_I):
"""Make a unique fragment ID"""
fragment_id_O = met_id_I + "_" + formula_I + "_" + str(mass_I);
return fragment_id_O;
def make_sampleFragmentID(self,sample_name_I,met_id_I,formula_I,mass_I):
"""Make a unique fragment ID"""
fragment_id_O = sample_name_I + "_" + met_id_I + "_" + formula_I + "_" + str(mass_I);
return fragment_id_O;
|
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import math
import warnings
import numpy as np
from .visual import CompoundVisual
from .line import LineVisual
from .text import TextVisual
# XXX TODO list (see code, plus):
# 1. Automated tick direction?
# 2. Expand to 3D (only 2D supported currently)
# 3. Input validation
# 4. Property support
# 5. Reactivity to resizing (current tick lengths grow/shrink w/zoom)
# 6. Improve tick label naming (str(x) is not good) and tick selection
class AxisVisual(CompoundVisual):
"""Axis visual
Parameters
----------
pos : array
Co-ordinates of start and end of the axis.
domain : tuple
The data values at the beginning and end of the axis, used for tick
labels. i.e. (5, 10) means the axis starts at 5 and ends at 10. Default
is (0, 1).
tick_direction : array
The tick direction to use (in document coordinates).
scale_type : str
The type of scale. For now only 'linear' is supported.
axis_color : tuple
RGBA values for the axis colour. Default is black.
tick_color : tuple
RGBA values for the tick colours. The colour for the major and minor
ticks is currently fixed to be the same. Default is a dark grey.
text_color : Color
The color to use for drawing tick and axis labels
minor_tick_length : float
The length of minor ticks, in pixels
major_tick_length : float
The length of major ticks, in pixels
tick_width : float
Line width for the ticks
tick_label_margin : float
Margin between ticks and tick labels
tick_font_size : float
The font size to use for rendering tick labels.
axis_width : float
Line width for the axis
axis_label : str
Text to use for the axis label
axis_label_margin : float
Margin between ticks and axis labels
axis_font_size : float
The font size to use for rendering axis labels.
font_size : float
Font size for both the tick and axis labels. If this is set,
tick_font_size and axis_font_size are ignored.
anchors : iterable
A 2-element iterable (tuple, list, etc.) giving the horizontal and
vertical alignment of the tick labels. The first element should be one
of 'left', 'center', or 'right', and the second element should be one
of 'bottom', 'middle', or 'top'. If this is not specified, it is
determined automatically.
"""
def __init__(self, pos=None, domain=(0., 1.), tick_direction=(-1., 0.),
scale_type="linear", axis_color=(1, 1, 1),
tick_color=(0.7, 0.7, 0.7), text_color='w',
minor_tick_length=5, major_tick_length=10,
tick_width=2, tick_label_margin=5, tick_font_size=8,
axis_width=3, axis_label=None,
axis_label_margin=35, axis_font_size=10,
font_size=None, anchors=None):
if scale_type != 'linear':
raise NotImplementedError('only linear scaling is currently '
'supported')
if font_size is not None:
tick_font_size = font_size
axis_font_size = font_size
self._pos = None
self._domain = None
# If True, then axis stops at the first / last major tick.
# If False, then axis extends to edge of *pos*
# (private until we come up with a better name for this)
self._stop_at_major = (False, False)
self.ticker = Ticker(self, anchors=anchors)
self.tick_direction = np.array(tick_direction, float)
self.tick_direction = self.tick_direction
self.scale_type = scale_type
self.axis_color = axis_color
self.tick_color = tick_color
self.minor_tick_length = minor_tick_length # px
self.major_tick_length = major_tick_length # px
self.tick_label_margin = tick_label_margin # px
self.axis_label_margin = axis_label_margin # px
self.axis_label = axis_label
self._need_update = True
self._line = LineVisual(method='gl', width=axis_width)
self._ticks = LineVisual(method='gl', width=tick_width, connect='segments')
self._text = TextVisual(font_size=tick_font_size, color=text_color)
self._axis_label = TextVisual(font_size=axis_font_size, color=text_color)
CompoundVisual.__init__(self, [self._line, self._text, self._ticks, self._axis_label])
if pos is not None:
self.pos = pos
self.domain = domain
@property
def pos(self):
return self._pos
@pos.setter
def pos(self, pos):
self._pos = np.array(pos, float)
self._need_update = True
self.update()
@property
def domain(self):
return self._domain
@domain.setter
def domain(self, d):
if self._domain is None or d != self._domain:
self._domain = d
self._need_update = True
self.update()
@property
def _vec(self):
"""Vector in the direction of the axis line"""
return self.pos[1] - self.pos[0]
def _update_subvisuals(self):
tick_pos, labels, tick_label_pos, anchors, axis_label_pos = self.ticker.get_update()
self._line.set_data(pos=self.pos, color=self.axis_color)
self._ticks.set_data(pos=tick_pos, color=self.tick_color)
self._text.text = list(labels)
self._text.pos = tick_label_pos
self._text.anchors = anchors
if self.axis_label is not None:
self._axis_label.text = self.axis_label
self._axis_label.pos = axis_label_pos
self._need_update = False
def _prepare_draw(self, view):
if self._pos is None:
return False
if self.axis_label is not None:
# TODO: make sure we only call get_transform if the transform for
# the line is updated
tr = self._line.get_transform(map_from='visual', map_to='canvas')
x1, y1, x2, y2 = tr.map(self.pos)[:,:2].ravel()
if x1 > x2:
x1, y1, x2, y2 = x2, y2, x1, y1
self._axis_label.rotation = math.degrees(math.atan2(y2-y1, x2-x1))
if self._need_update:
self._update_subvisuals()
def _compute_bounds(self, axis, view):
if axis == 2:
return (0., 0.)
# now axis in (0, 1)
return self.pos[:, axis].min(), self.pos[:, axis].max()
class Ticker(object):
"""Class to determine tick marks
Parameters
----------
axis : instance of AxisVisual
The AxisVisual to generate ticks for.
"""
def __init__(self, axis, anchors=None):
self.axis = axis
self._anchors = anchors
def get_update(self):
major_tick_fractions, minor_tick_fractions, tick_labels = \
self._get_tick_frac_labels()
tick_pos, tick_label_pos, axis_label_pos, anchors = self._get_tick_positions(
major_tick_fractions, minor_tick_fractions)
return tick_pos, tick_labels, tick_label_pos, anchors, axis_label_pos
def _get_tick_positions(self, major_tick_fractions, minor_tick_fractions):
# tick direction is defined in visual coords, but use document
# coords to determine the tick length
trs = self.axis.transforms
visual_to_document = trs.get_transform('visual', 'document')
direction = np.array(self.axis.tick_direction)
direction /= np.linalg.norm(direction)
if self._anchors is None:
# use the document (pixel) coord system to set text anchors
anchors = []
if direction[0] < 0:
anchors.append('right')
elif direction[0] > 0:
anchors.append('left')
else:
anchors.append('center')
if direction[1] < 0:
anchors.append('bottom')
elif direction[1] > 0:
anchors.append('top')
else:
anchors.append('middle')
else:
anchors = self._anchors
# now figure out the tick positions in visual (data) coords
doc_unit = visual_to_document.map([[0, 0], direction[:2]])
doc_unit = doc_unit[1] - doc_unit[0]
doc_len = np.linalg.norm(doc_unit)
vectors = np.array([[0., 0.],
direction * self.axis.minor_tick_length / doc_len,
direction * self.axis.major_tick_length / doc_len,
direction * (self.axis.major_tick_length +
self.axis.tick_label_margin) / doc_len],
dtype=float)
minor_vector = vectors[1] - vectors[0]
major_vector = vectors[2] - vectors[0]
label_vector = vectors[3] - vectors[0]
axislabel_vector = direction * (self.axis.major_tick_length +
self.axis.axis_label_margin) / doc_len
major_origins, major_endpoints = self._tile_ticks(
major_tick_fractions, major_vector)
minor_origins, minor_endpoints = self._tile_ticks(
minor_tick_fractions, minor_vector)
tick_label_pos = major_origins + label_vector
axis_label_pos = 0.5 * (self.axis.pos[0] + self.axis.pos[1]) + axislabel_vector
num_major = len(major_tick_fractions)
num_minor = len(minor_tick_fractions)
c = np.empty([(num_major + num_minor) * 2, 2])
c[0:(num_major-1)*2+1:2] = major_origins
c[1:(num_major-1)*2+2:2] = major_endpoints
c[(num_major-1)*2+2::2] = minor_origins
c[(num_major-1)*2+3::2] = minor_endpoints
return c, tick_label_pos, axis_label_pos, anchors
def _tile_ticks(self, frac, tickvec):
"""Tiles tick marks along the axis."""
origins = np.tile(self.axis._vec, (len(frac), 1))
origins = self.axis.pos[0].T + (origins.T*frac).T
endpoints = tickvec + origins
return origins, endpoints
def _get_tick_frac_labels(self):
"""Get the major ticks, minor ticks, and major labels"""
minor_num = 4 # number of minor ticks per major division
if (self.axis.scale_type == 'linear'):
domain = self.axis.domain
if domain[1] < domain[0]:
flip = True
domain = domain[::-1]
else:
flip = False
offset = domain[0]
scale = domain[1] - domain[0]
transforms = self.axis.transforms
length = self.axis.pos[1] - self.axis.pos[0] # in logical coords
n_inches = np.sqrt(np.sum(length ** 2)) / transforms.dpi
# major = np.linspace(domain[0], domain[1], num=11)
# major = MaxNLocator(10).tick_values(*domain)
major = _get_ticks_talbot(domain[0], domain[1], n_inches, 2)
labels = ['%g' % x for x in major]
majstep = major[1] - major[0]
minor = []
minstep = majstep / (minor_num + 1)
minstart = 0 if self.axis._stop_at_major[0] else -1
minstop = -1 if self.axis._stop_at_major[1] else 0
for i in range(minstart, len(major) + minstop):
maj = major[0] + i * majstep
minor.extend(np.linspace(maj + minstep,
maj + majstep - minstep,
minor_num))
major_frac = (major - offset) / scale
minor_frac = (np.array(minor) - offset) / scale
major_frac = major_frac[::-1] if flip else major_frac
use_mask = (major_frac > -0.0001) & (major_frac < 1.0001)
major_frac = major_frac[use_mask]
labels = [l for li, l in enumerate(labels) if use_mask[li]]
minor_frac = minor_frac[(minor_frac > -0.0001) &
(minor_frac < 1.0001)]
elif self.axis.scale_type == 'logarithmic':
return NotImplementedError
elif self.axis.scale_type == 'power':
return NotImplementedError
return major_frac, minor_frac, labels
# #############################################################################
# Translated from matplotlib
class MaxNLocator(object):
"""
Select no more than N intervals at nice locations.
"""
def __init__(self, nbins=10, steps=None, trim=True, integer=False,
symmetric=False, prune=None):
"""
Keyword args:
*nbins*
Maximum number of intervals; one less than max number of ticks.
*steps*
Sequence of nice numbers starting with 1 and ending with 10;
e.g., [1, 2, 4, 5, 10]
*integer*
If True, ticks will take only integer values.
*symmetric*
If True, autoscaling will result in a range symmetric
about zero.
*prune*
['lower' | 'upper' | 'both' | None]
Remove edge ticks -- useful for stacked or ganged plots
where the upper tick of one axes overlaps with the lower
tick of the axes above it.
If prune=='lower', the smallest tick will
be removed. If prune=='upper', the largest tick will be
removed. If prune=='both', the largest and smallest ticks
will be removed. If prune==None, no ticks will be removed.
"""
self._nbins = int(nbins)
self._trim = trim
self._integer = integer
self._symmetric = symmetric
if prune is not None and prune not in ['upper', 'lower', 'both']:
raise ValueError(
"prune must be 'upper', 'lower', 'both', or None")
self._prune = prune
if steps is None:
steps = [1, 2, 2.5, 3, 4, 5, 6, 8, 10]
else:
if int(steps[-1]) != 10:
steps = list(steps)
steps.append(10)
self._steps = steps
self._integer = integer
if self._integer:
self._steps = [n for n in self._steps
if divmod(n, 1)[1] < 0.001]
def bin_boundaries(self, vmin, vmax):
nbins = self._nbins
scale, offset = scale_range(vmin, vmax, nbins)
if self._integer:
scale = max(1, scale)
vmin = vmin - offset
vmax = vmax - offset
raw_step = (vmax - vmin) / nbins
scaled_raw_step = raw_step / scale
best_vmax = vmax
best_vmin = vmin
for step in self._steps:
if step < scaled_raw_step:
continue
step *= scale
best_vmin = step * divmod(vmin, step)[0]
best_vmax = best_vmin + step * nbins
if (best_vmax >= vmax):
break
if self._trim:
extra_bins = int(divmod((best_vmax - vmax), step)[0])
nbins -= extra_bins
return (np.arange(nbins + 1) * step + best_vmin + offset)
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
locs = self.bin_boundaries(vmin, vmax)
prune = self._prune
if prune == 'lower':
locs = locs[1:]
elif prune == 'upper':
locs = locs[:-1]
elif prune == 'both':
locs = locs[1:-1]
return locs
def view_limits(self, dmin, dmax):
if self._symmetric:
maxabs = max(abs(dmin), abs(dmax))
dmin = -maxabs
dmax = maxabs
return np.take(self.bin_boundaries(dmin, dmax), [0, -1])
def scale_range(vmin, vmax, n=1, threshold=100):
dv = abs(vmax - vmin)
if dv == 0: # maxabsv == 0 is a special case of this.
return 1.0, 0.0
# Note: this should never occur because
# vmin, vmax should have been checked by nonsingular(),
# and spread apart if necessary.
meanv = 0.5 * (vmax + vmin)
if abs(meanv) / dv < threshold:
offset = 0
elif meanv > 0:
ex = divmod(np.log10(meanv), 1)[0]
offset = 10 ** ex
else:
ex = divmod(np.log10(-meanv), 1)[0]
offset = -10 ** ex
ex = divmod(np.log10(dv / n), 1)[0]
scale = 10 ** ex
return scale, offset
# #############################################################################
# Tranlated from http://www.justintalbot.com/research/axis-labeling/
# See "An Extension of Wilkinson's Algorithm for Positioning Tick Labels
# on Axes" # by Justin Talbot, Sharon Lin, and Pat Hanrahan, InfoVis 2010.
def _coverage(dmin, dmax, lmin, lmax):
return 1 - 0.5 * ((dmax - lmax) ** 2 +
(dmin - lmin) ** 2) / (0.1 * (dmax - dmin)) ** 2
def _coverage_max(dmin, dmax, span):
range_ = dmax - dmin
if span <= range_:
return 1.
else:
half = (span - range_) / 2.0
return 1 - half ** 2 / (0.1 * range_) ** 2
def _density(k, m, dmin, dmax, lmin, lmax):
r = (k-1.0) / (lmax-lmin)
rt = (m-1.0) / (max(lmax, dmax) - min(lmin, dmin))
return 2 - max(r / rt, rt / r)
def _density_max(k, m):
return 2 - (k-1.0) / (m-1.0) if k >= m else 1.
def _simplicity(q, Q, j, lmin, lmax, lstep):
eps = 1e-10
n = len(Q)
i = Q.index(q) + 1
if ((lmin % lstep) < eps or
(lstep - lmin % lstep) < eps) and lmin <= 0 and lmax >= 0:
v = 1
else:
v = 0
return (n - i) / (n - 1.0) + v - j
def _simplicity_max(q, Q, j):
n = len(Q)
i = Q.index(q) + 1
return (n - i)/(n - 1.0) + 1. - j
def _get_ticks_talbot(dmin, dmax, n_inches, density=1.):
# density * size gives target number of intervals,
# density * size + 1 gives target number of tick marks,
# the density function converts this back to a density in data units
# (not inches)
n_inches = max(n_inches, 2.0) # Set minimum otherwise code can crash :(
m = density * n_inches + 1.0
only_inside = False # we cull values outside ourselves
Q = [1, 5, 2, 2.5, 4, 3]
w = [0.25, 0.2, 0.5, 0.05]
best_score = -2.0
best = None
j = 1.0
n_max = 1000
while j < n_max:
for q in Q:
sm = _simplicity_max(q, Q, j)
if w[0] * sm + w[1] + w[2] + w[3] < best_score:
j = n_max
break
k = 2.0
while k < n_max:
dm = _density_max(k, n_inches)
if w[0] * sm + w[1] + w[2] * dm + w[3] < best_score:
break
delta = (dmax-dmin)/(k+1.0)/j/q
z = np.ceil(np.log10(delta))
while z < float('infinity'):
step = j * q * 10 ** z
cm = _coverage_max(dmin, dmax, step*(k-1.0))
if (w[0] * sm +
w[1] * cm +
w[2] * dm +
w[3] < best_score):
break
min_start = np.floor(dmax/step)*j - (k-1.0)*j
max_start = np.ceil(dmin/step)*j
if min_start > max_start:
z = z+1
break
for start in range(int(min_start), int(max_start)+1):
lmin = start * (step/j)
lmax = lmin + step*(k-1.0)
lstep = step
s = _simplicity(q, Q, j, lmin, lmax, lstep)
c = _coverage(dmin, dmax, lmin, lmax)
d = _density(k, m, dmin, dmax, lmin, lmax)
l = 1. # _legibility(lmin, lmax, lstep)
score = w[0] * s + w[1] * c + w[2] * d + w[3] * l
if (score > best_score and
(not only_inside or (lmin >= dmin and
lmax <= dmax))):
best_score = score
best = (lmin, lmax, lstep, q, k)
z += 1
k += 1
if k == n_max:
raise RuntimeError('could not converge on ticks')
j += 1
if j == n_max:
raise RuntimeError('could not converge on ticks')
if best is None:
raise RuntimeError('could not converge on ticks')
return np.arange(best[4]) * best[2] + best[0]
|
|
import socket
import warnings
class TransportSocket:
"""A socket-like wrapper for exposing real transport sockets.
These objects can be safely returned by APIs like
`transport.get_extra_info('socket')`. All potentially disruptive
operations (like "socket.close()") are banned.
"""
__slots__ = ('_sock',)
def __init__(self, sock: socket.socket):
self._sock = sock
def _na(self, what):
warnings.warn(
f"Using {what} on sockets returned from get_extra_info('socket') "
f"will be prohibited in asyncio 3.9. Please report your use case "
f"to bugs.python.org.",
DeprecationWarning, source=self)
@property
def family(self):
return self._sock.family
@property
def type(self):
return self._sock.type
@property
def proto(self):
return self._sock.proto
def __repr__(self):
s = (
f"<asyncio.TransportSocket fd={self.fileno()}, "
f"family={self.family!s}, type={self.type!s}, "
f"proto={self.proto}"
)
if self.fileno() != -1:
try:
laddr = self.getsockname()
if laddr:
s = f"{s}, laddr={laddr}"
except socket.error:
pass
try:
raddr = self.getpeername()
if raddr:
s = f"{s}, raddr={raddr}"
except socket.error:
pass
return f"{s}>"
def __getstate__(self):
raise TypeError("Cannot serialize asyncio.TransportSocket object")
def fileno(self):
return self._sock.fileno()
def dup(self):
return self._sock.dup()
def get_inheritable(self):
return self._sock.get_inheritable()
def shutdown(self, how):
# asyncio doesn't currently provide a high-level transport API
# to shutdown the connection.
self._sock.shutdown(how)
def getsockopt(self, *args, **kwargs):
return self._sock.getsockopt(*args, **kwargs)
def setsockopt(self, *args, **kwargs):
self._sock.setsockopt(*args, **kwargs)
def getpeername(self):
return self._sock.getpeername()
def getsockname(self):
return self._sock.getsockname()
def getsockbyname(self):
return self._sock.getsockbyname()
def accept(self):
self._na('accept() method')
return self._sock.accept()
def connect(self, *args, **kwargs):
self._na('connect() method')
return self._sock.connect(*args, **kwargs)
def connect_ex(self, *args, **kwargs):
self._na('connect_ex() method')
return self._sock.connect_ex(*args, **kwargs)
def bind(self, *args, **kwargs):
self._na('bind() method')
return self._sock.bind(*args, **kwargs)
def ioctl(self, *args, **kwargs):
self._na('ioctl() method')
return self._sock.ioctl(*args, **kwargs)
def listen(self, *args, **kwargs):
self._na('listen() method')
return self._sock.listen(*args, **kwargs)
def makefile(self):
self._na('makefile() method')
return self._sock.makefile()
def sendfile(self, *args, **kwargs):
self._na('sendfile() method')
return self._sock.sendfile(*args, **kwargs)
def close(self):
self._na('close() method')
return self._sock.close()
def detach(self):
self._na('detach() method')
return self._sock.detach()
def sendmsg_afalg(self, *args, **kwargs):
self._na('sendmsg_afalg() method')
return self._sock.sendmsg_afalg(*args, **kwargs)
def sendmsg(self, *args, **kwargs):
self._na('sendmsg() method')
return self._sock.sendmsg(*args, **kwargs)
def sendto(self, *args, **kwargs):
self._na('sendto() method')
return self._sock.sendto(*args, **kwargs)
def send(self, *args, **kwargs):
self._na('send() method')
return self._sock.send(*args, **kwargs)
def sendall(self, *args, **kwargs):
self._na('sendall() method')
return self._sock.sendall(*args, **kwargs)
def set_inheritable(self, *args, **kwargs):
self._na('set_inheritable() method')
return self._sock.set_inheritable(*args, **kwargs)
def share(self, process_id):
self._na('share() method')
return self._sock.share(process_id)
def recv_into(self, *args, **kwargs):
self._na('recv_into() method')
return self._sock.recv_into(*args, **kwargs)
def recvfrom_into(self, *args, **kwargs):
self._na('recvfrom_into() method')
return self._sock.recvfrom_into(*args, **kwargs)
def recvmsg_into(self, *args, **kwargs):
self._na('recvmsg_into() method')
return self._sock.recvmsg_into(*args, **kwargs)
def recvmsg(self, *args, **kwargs):
self._na('recvmsg() method')
return self._sock.recvmsg(*args, **kwargs)
def recvfrom(self, *args, **kwargs):
self._na('recvfrom() method')
return self._sock.recvfrom(*args, **kwargs)
def recv(self, *args, **kwargs):
self._na('recv() method')
return self._sock.recv(*args, **kwargs)
def settimeout(self, value):
if value == 0:
return
raise ValueError(
'settimeout(): only 0 timeout is allowed on transport sockets')
def gettimeout(self):
return 0
def setblocking(self, flag):
if not flag:
return
raise ValueError(
'setblocking(): transport sockets cannot be blocking')
def __enter__(self):
self._na('context manager protocol')
return self._sock.__enter__()
def __exit__(self, *err):
self._na('context manager protocol')
return self._sock.__exit__(*err)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware
import re
import contextlib
import mock
from six.moves import queue as Queue
from neutron import context
from neutron.extensions import loadbalancer
from neutron import manager
from neutron.openstack.common import jsonutils as json
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers.radware import driver
from neutron.services.loadbalancer.drivers.radware import exceptions as r_exc
from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
GET_200 = ('/api/workflow/', '/api/service/', '/api/workflowTemplate')
SERVER_DOWN_CODES = (-1, 301, 307)
class QueueMock(Queue.Queue):
def __init__(self, completion_handler):
self.completion_handler = completion_handler
super(QueueMock, self).__init__()
def put_nowait(self, oper):
self.completion_handler(oper)
def _recover_function_mock(action, resource, data, headers, binary=False):
pass
def rest_call_function_mock(action, resource, data, headers, binary=False):
if rest_call_function_mock.RESPOND_WITH_ERROR:
return 400, 'error_status', 'error_description', None
if rest_call_function_mock.RESPOND_WITH_SERVER_DOWN in SERVER_DOWN_CODES:
val = rest_call_function_mock.RESPOND_WITH_SERVER_DOWN
return val, 'error_status', 'error_description', None
if action == 'GET':
return _get_handler(resource)
elif action == 'DELETE':
return _delete_handler(resource)
elif action == 'POST':
return _post_handler(resource, binary)
else:
return 0, None, None, None
def _get_handler(resource):
if resource == GET_200[2]:
if rest_call_function_mock.TEMPLATES_MISSING:
data = json.loads('[]')
else:
data = json.loads(
'[{"name":"openstack_l2_l3"},{"name":"openstack_l4"}]'
)
return 200, '', '', data
if resource in GET_200:
return 200, '', '', ''
else:
data = json.loads('{"complete":"True", "success": "True"}')
return 202, '', '', data
def _delete_handler(resource):
return 404, '', '', {'message': 'Not Found'}
def _post_handler(resource, binary):
if re.search(r'/api/workflow/.+/action/.+', resource):
data = json.loads('{"uri":"some_uri"}')
return 202, '', '', data
elif re.search(r'/api/service\?name=.+', resource):
data = json.loads('{"links":{"actions":{"provision":"someuri"}}}')
return 201, '', '', data
elif binary:
return 201, '', '', ''
else:
return 202, '', '', ''
RADWARE_PROVIDER = ('LOADBALANCER:radware:neutron.services.'
'loadbalancer.drivers.radware.driver.'
'LoadBalancerDriver:default')
class TestLoadBalancerPluginBase(
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
def setUp(self):
super(TestLoadBalancerPluginBase, self).setUp(
lbaas_provider=RADWARE_PROVIDER)
loaded_plugins = manager.NeutronManager().get_service_plugins()
self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
def setUp(self):
super(TestLoadBalancerPlugin, self).setUp()
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': False})
rest_call_function_mock.__dict__.update(
{'TEMPLATES_MISSING': False})
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_SERVER_DOWN': 200})
self.operation_completer_start_mock = mock.Mock(
return_value=None)
self.operation_completer_join_mock = mock.Mock(
return_value=None)
self.driver_rest_call_mock = mock.Mock(
side_effect=rest_call_function_mock)
self.flip_servers_mock = mock.Mock(
return_value=None)
self.recover_mock = mock.Mock(
side_effect=_recover_function_mock)
radware_driver = self.plugin_instance.drivers['radware']
radware_driver.completion_handler.start = (
self.operation_completer_start_mock)
radware_driver.completion_handler.join = (
self.operation_completer_join_mock)
self.orig_call = radware_driver.rest_client.call
self.orig__call = radware_driver.rest_client._call
radware_driver.rest_client.call = self.driver_rest_call_mock
radware_driver.rest_client._call = self.driver_rest_call_mock
radware_driver.rest_client._flip_servers = self.flip_servers_mock
radware_driver.rest_client._recover = self.recover_mock
radware_driver.completion_handler.rest_client.call = (
self.driver_rest_call_mock)
radware_driver.queue = QueueMock(
radware_driver.completion_handler.handle_operation_completion)
self.addCleanup(radware_driver.completion_handler.join)
def test_rest_client_recover_was_called(self):
"""Call the real REST client and verify _recover is called."""
radware_driver = self.plugin_instance.drivers['radware']
radware_driver.rest_client.call = self.orig_call
radware_driver.rest_client._call = self.orig__call
self.assertRaises(r_exc.RESTRequestFailure,
radware_driver._verify_workflow_templates)
self.recover_mock.assert_called_once()
def test_rest_client_flip_servers(self):
radware_driver = self.plugin_instance.drivers['radware']
server = radware_driver.rest_client.server
sec_server = radware_driver.rest_client.secondary_server
radware_driver.rest_client._flip_servers()
self.assertEqual(server,
radware_driver.rest_client.secondary_server)
self.assertEqual(sec_server,
radware_driver.rest_client.server)
def test_verify_workflow_templates_server_down(self):
"""Test the rest call failure when backend is down."""
for value in SERVER_DOWN_CODES:
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_SERVER_DOWN': value})
self.assertRaises(r_exc.RESTRequestFailure,
self.plugin_instance.drivers['radware'].
_verify_workflow_templates)
def test_verify_workflow_templates(self):
"""Test the rest call failure handling by Exception raising."""
rest_call_function_mock.__dict__.update(
{'TEMPLATES_MISSING': True})
self.assertRaises(r_exc.WorkflowMissing,
self.plugin_instance.drivers['radware'].
_verify_workflow_templates)
def test_create_vip_failure(self):
"""Test the rest call failure handling by Exception raising."""
with self.network(do_delete=False) as network:
with self.subnet(network=network, do_delete=False) as subnet:
with self.pool(no_delete=True,
provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': True})
self.assertRaises(r_exc.RESTRequestFailure,
self.plugin_instance.create_vip,
context.get_admin_context(),
{'vip': vip_data})
def test_create_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
# Test creation REST calls
calls = [
mock.call('GET', u'/api/service/srv_' +
subnet['subnet']['network_id'], None, None),
mock.call('POST', u'/api/service?name=srv_' +
subnet['subnet']['network_id'] + '&tenant=' +
vip['tenant_id'], mock.ANY,
driver.CREATE_SERVICE_HEADER),
mock.call('GET', u'/api/workflow/l2_l3_' +
subnet['subnet']['network_id'], None, None),
mock.call('POST', '/api/workflow/l2_l3_' +
subnet['subnet']['network_id'] +
'/action/setup_l2_l3',
mock.ANY, driver.TEMPLATE_HEADER),
mock.call('POST', 'someuri',
None, driver.PROVISION_HEADER),
mock.call('POST', '/api/workflowTemplate/' +
'openstack_l4' +
'?name=' + pool['pool']['id'],
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflowTemplate/' +
'openstack_l2_l3' +
'?name=l2_l3_' + subnet['subnet']['network_id'],
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER),
mock.call('GET', '/api/workflow/' +
pool['pool']['id'], None, None)
]
self.driver_rest_call_mock.assert_has_calls(calls,
any_order=True)
#Test DB
new_vip = self.plugin_instance.get_vip(
context.get_admin_context(),
vip['id']
)
self.assertEqual(new_vip['status'], constants.ACTIVE)
# Delete VIP
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
# Test deletion REST calls
calls = [
mock.call('DELETE', u'/api/workflow/' + pool['pool']['id'],
None, None)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
def test_create_vip_2_leg(self):
"""Test creation of a VIP where Alteon VIP and PIP are different."""
with self.subnet(cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as pool_sub:
with self.pool(provider='radware',
subnet_id=pool_sub['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
name_suffix = '%s_%s' % (subnet['subnet']['network_id'],
pool_sub['subnet']['network_id'])
# Test creation REST calls
calls = [
mock.call('GET', '/api/workflowTemplate', None, None),
mock.call('GET', '/api/service/srv_' + name_suffix,
None, None),
mock.call('POST', '/api/service?name=srv_' +
name_suffix + '&tenant=' + vip['tenant_id'],
mock.ANY, driver.CREATE_SERVICE_HEADER),
mock.call('POST', 'someuri',
None, driver.PROVISION_HEADER),
mock.call('GET', '/api/workflow/l2_l3_' + name_suffix,
None, None),
mock.call('POST', '/api/workflowTemplate/' +
'openstack_l2_l3' +
'?name=l2_l3_' + name_suffix,
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflow/l2_l3_' +
name_suffix + '/action/setup_l2_l3',
mock.ANY, driver.TEMPLATE_HEADER),
mock.call('GET', '/api/workflow/' +
pool['pool']['id'], None, None),
mock.call('POST', '/api/workflowTemplate/' +
'openstack_l4' +
'?name=' + pool['pool']['id'],
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflow/' +
pool['pool']['id'] + '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER)
]
self.driver_rest_call_mock.assert_has_calls(calls)
#Test DB
new_vip = self.plugin_instance.get_vip(
context.get_admin_context(),
vip['id']
)
self.assertEqual(new_vip['status'], constants.ACTIVE)
# Test that PIP neutron port was created
pip_port_filter = {
'name': ['pip_' + vip['id']],
}
plugin = manager.NeutronManager.get_plugin()
num_ports = plugin.get_ports_count(
context.get_admin_context(), filters=pip_port_filter)
self.assertTrue(num_ports > 0)
# Delete VIP
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
# Test deletion REST calls
calls = [
mock.call('DELETE', u'/api/workflow/' +
pool['pool']['id'], None, None)
]
self.driver_rest_call_mock.assert_has_calls(calls)
def test_update_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
no_delete=True,
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
vip_data['status'] = constants.PENDING_UPDATE
self.plugin_instance.update_vip(
context.get_admin_context(),
vip['id'], {'vip': vip_data})
# Test REST calls
calls = [
mock.call('POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER),
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
updated_vip = self.plugin_instance.get_vip(
context.get_admin_context(), vip['id'])
self.assertEqual(updated_vip['status'], constants.ACTIVE)
# delete VIP
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
def test_update_vip_2_leg(self):
"""Test update of a VIP where Alteon VIP and PIP are different."""
with self.subnet(cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as pool_subnet:
with self.pool(provider='radware',
subnet_id=pool_subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
self.plugin_instance.update_vip(
context.get_admin_context(),
vip['id'], {'vip': vip_data})
# Test REST calls
calls = [
mock.call('POST', '/api/workflow/' +
pool['pool']['id'] + '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER),
]
self.driver_rest_call_mock.assert_has_calls(calls)
updated_vip = self.plugin_instance.get_vip(
context.get_admin_context(), vip['id'])
self.assertEqual(updated_vip['status'], constants.ACTIVE)
# delete VIP
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
def test_delete_vip_failure(self):
plugin = self.plugin_instance
with self.network(do_delete=False) as network:
with self.subnet(network=network, do_delete=False) as subnet:
with self.pool(no_delete=True,
provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
with contextlib.nested(
self.member(pool_id=pool['pool']['id'],
no_delete=True),
self.member(pool_id=pool['pool']['id'],
address='192.168.1.101',
no_delete=True),
self.health_monitor(no_delete=True),
self.vip(pool=pool, subnet=subnet, no_delete=True)
) as (mem1, mem2, hm, vip):
plugin.create_pool_health_monitor(
context.get_admin_context(), hm, pool['pool']['id']
)
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': True})
plugin.delete_vip(
context.get_admin_context(), vip['vip']['id'])
u_vip = plugin.get_vip(
context.get_admin_context(), vip['vip']['id'])
u_pool = plugin.get_pool(
context.get_admin_context(), pool['pool']['id'])
u_mem1 = plugin.get_member(
context.get_admin_context(), mem1['member']['id'])
u_mem2 = plugin.get_member(
context.get_admin_context(), mem2['member']['id'])
u_phm = plugin.get_pool_health_monitor(
context.get_admin_context(),
hm['health_monitor']['id'], pool['pool']['id'])
self.assertEqual(u_vip['status'], constants.ERROR)
self.assertEqual(u_pool['status'], constants.ACTIVE)
self.assertEqual(u_mem1['status'], constants.ACTIVE)
self.assertEqual(u_mem2['status'], constants.ACTIVE)
self.assertEqual(u_phm['status'], constants.ACTIVE)
def test_delete_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
no_delete=True,
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
calls = [
mock.call('DELETE', '/api/workflow/' + pool['pool']['id'],
None, None)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
self.assertRaises(loadbalancer.VipNotFound,
self.plugin_instance.get_vip,
context.get_admin_context(), vip['id'])
def test_delete_vip_2_leg(self):
"""Test deletion of a VIP where Alteon VIP and PIP are different."""
self.driver_rest_call_mock.reset_mock()
with self.subnet(cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as pool_subnet:
with self.pool(provider='radware',
no_delete=True,
subnet_id=pool_subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
calls = [
mock.call('DELETE', '/api/workflow/' +
pool['pool']['id'], None, None)
]
self.driver_rest_call_mock.assert_has_calls(calls)
# Test that PIP neutron port was deleted
pip_port_filter = {
'name': ['pip_' + vip['id']],
}
plugin = manager.NeutronManager.get_plugin()
num_ports = plugin.get_ports_count(
context.get_admin_context(), filters=pip_port_filter)
self.assertTrue(num_ports == 0)
self.assertRaises(loadbalancer.VipNotFound,
self.plugin_instance.get_vip,
context.get_admin_context(), vip['id'])
def test_update_pool(self):
with self.subnet():
with self.pool() as pool:
del pool['pool']['provider']
del pool['pool']['status']
self.plugin_instance.update_pool(
context.get_admin_context(),
pool['pool']['id'], pool)
pool_db = self.plugin_instance.get_pool(
context.get_admin_context(), pool['pool']['id'])
self.assertEqual(pool_db['status'], constants.PENDING_UPDATE)
def test_delete_pool_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
no_delete=True,
subnet_id=subnet['subnet']['id']) as pool:
with self.vip(pool=pool, subnet=subnet):
self.assertRaises(loadbalancer.PoolInUse,
self.plugin_instance.delete_pool,
context.get_admin_context(),
pool['pool']['id'])
def test_create_member_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as p:
with self.vip(pool=p, subnet=subnet):
with self.member(pool_id=p['pool']['id']):
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
def test_update_member_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as p:
with self.member(pool_id=p['pool']['id']) as member:
with self.vip(pool=p, subnet=subnet):
self.plugin_instance.update_member(
context.get_admin_context(),
member['member']['id'], member
)
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
updated_member = self.plugin_instance.get_member(
context.get_admin_context(),
member['member']['id']
)
updated_member = self.plugin_instance.get_member(
context.get_admin_context(),
member['member']['id']
)
self.assertEqual(updated_member['status'],
constants.ACTIVE)
def test_update_member_without_vip(self):
with self.subnet():
with self.pool(provider='radware') as pool:
with self.member(pool_id=pool['pool']['id']) as member:
member['member']['status'] = constants.PENDING_UPDATE
updated_member = self.plugin_instance.update_member(
context.get_admin_context(),
member['member']['id'], member
)
self.assertEqual(updated_member['status'],
constants.PENDING_UPDATE)
def test_delete_member_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as p:
with self.member(pool_id=p['pool']['id'],
no_delete=True) as m:
with self.vip(pool=p, subnet=subnet):
# Reset mock and
# wait for being sure the member
# Changed status from PENDING-CREATE
# to ACTIVE
self.plugin_instance.delete_member(
context.get_admin_context(),
m['member']['id']
)
name, args, kwargs = (
self.driver_rest_call_mock.mock_calls[-2]
)
deletion_post_graph = str(args[2])
self.assertTrue(re.search(
r'.*\'member_address_array\': \[\].*',
deletion_post_graph
))
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
self.assertRaises(loadbalancer.MemberNotFound,
self.plugin_instance.get_member,
context.get_admin_context(),
m['member']['id'])
def test_delete_member_without_vip(self):
with self.subnet():
with self.pool(provider='radware') as p:
with self.member(pool_id=p['pool']['id'], no_delete=True) as m:
self.plugin_instance.delete_member(
context.get_admin_context(), m['member']['id']
)
self.assertRaises(loadbalancer.MemberNotFound,
self.plugin_instance.get_member,
context.get_admin_context(),
m['member']['id'])
def test_create_hm_with_vip(self):
with self.subnet() as subnet:
with self.health_monitor() as hm:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
with self.vip(pool=pool, subnet=subnet):
self.plugin_instance.create_pool_health_monitor(
context.get_admin_context(),
hm, pool['pool']['id']
)
# Test REST calls
calls = [
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
phm = self.plugin_instance.get_pool_health_monitor(
context.get_admin_context(),
hm['health_monitor']['id'], pool['pool']['id']
)
self.assertEqual(phm['status'], constants.ACTIVE)
def test_delete_pool_hm_with_vip(self):
with self.subnet() as subnet:
with self.health_monitor(no_delete=True) as hm:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
with self.vip(pool=pool, subnet=subnet):
self.plugin_instance.create_pool_health_monitor(
context.get_admin_context(),
hm, pool['pool']['id']
)
self.plugin_instance.delete_pool_health_monitor(
context.get_admin_context(),
hm['health_monitor']['id'],
pool['pool']['id']
)
name, args, kwargs = (
self.driver_rest_call_mock.mock_calls[-2]
)
deletion_post_graph = str(args[2])
self.assertTrue(re.search(
r'.*\'hm_uuid_array\': \[\].*',
deletion_post_graph
))
calls = [
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
self.assertRaises(
loadbalancer.PoolMonitorAssociationNotFound,
self.plugin_instance.get_pool_health_monitor,
context.get_admin_context(),
hm['health_monitor']['id'],
pool['pool']['id']
)
|
|
"""Games, or Adversarial Search. (Chapters 6)
"""
from utils import *
import random
#______________________________________________________________________________
# Minimax Search
def minimax_decision(state, game):
"""Given a state in a game, calculate the best move by searching
forward all the way to the terminal states. [Fig. 6.4]"""
player = game.to_move(state)
def max_value(state):
if game.terminal_test(state):
return game.utility(state, player)
v = -infinity
for (a, s) in game.successors(state):
v = max(v, min_value(s))
return v
def min_value(state):
if game.terminal_test(state):
return game.utility(state, player)
v = infinity
for (a, s) in game.successors(state):
v = min(v, max_value(s))
return v
# Body of minimax_decision starts here:
action, state = argmax(game.successors(state),
lambda ((a, s)): min_value(s))
return action
#______________________________________________________________________________
def alphabeta_full_search(state, game):
"""Search game to determine best action; use alpha-beta pruning.
As in [Fig. 6.7], this version searches all the way to the leaves."""
player = game.to_move(state)
def max_value(state, alpha, beta):
if game.terminal_test(state):
return game.utility(state, player)
v = -infinity
for (a, s) in game.successors(state):
v = max(v, min_value(s, alpha, beta))
if v >= beta:
return v
alpha = max(alpha, v)
return v
def min_value(state, alpha, beta):
if game.terminal_test(state):
return game.utility(state, player)
v = infinity
for (a, s) in game.successors(state):
v = min(v, max_value(s, alpha, beta))
if v <= alpha:
return v
beta = min(beta, v)
return v
# Body of alphabeta_search starts here:
action, state = argmax(game.successors(state),
lambda ((a, s)): min_value(s, -infinity, infinity))
return action
def alphabeta_search(state, game, d=4, cutoff_test=None, eval_fn=None):
"""Search game to determine best action; use alpha-beta pruning.
This version cuts off search and uses an evaluation function."""
player = game.to_move(state)
def max_value(state, alpha, beta, depth):
if cutoff_test(state, depth):
return eval_fn(state)
v = -infinity
for (a, s) in game.successors(state):
v = max(v, min_value(s, alpha, beta, depth+1))
if v >= beta:
return v
alpha = max(alpha, v)
return v
def min_value(state, alpha, beta, depth):
if cutoff_test(state, depth):
return eval_fn(state)
v = infinity
for (a, s) in game.successors(state):
v = min(v, max_value(s, alpha, beta, depth+1))
if v <= alpha:
return v
beta = min(beta, v)
return v
# Body of alphabeta_search starts here:
# The default test cuts off at depth d or at a terminal state
cutoff_test = (cutoff_test or
(lambda state,depth: depth>d or game.terminal_test(state)))
eval_fn = eval_fn or (lambda state: game.utility(state, player))
action, state = argmax(game.successors(state),
lambda ((a, s)): min_value(s, -infinity, infinity, 0))
return action
#______________________________________________________________________________
# Players for Games
def query_player(game, state):
"Make a move by querying standard input."
game.display(state)
return num_or_str(raw_input('Your move? '))
def random_player(game, state):
"A player that chooses a legal move at random."
return random.choice(game.legal_moves())
def alphabeta_player(game, state):
return alphabeta_search(state, game)
def play_game(game, *players):
"Play an n-person, move-alternating game."
state = game.initial
while True:
for player in players:
move = player(game, state)
state = game.make_move(move, state)
if game.terminal_test(state):
return game.utility(state, players[0])
#______________________________________________________________________________
# Some Sample Games
class Game:
"""A game is similar to a problem, but it has a utility for each
state and a terminal test instead of a path cost and a goal
test. To create a game, subclass this class and implement
legal_moves, make_move, utility, and terminal_test. You may
override display and successors or you can inherit their default
methods. You will also need to set the .initial attribute to the
initial state; this can be done in the constructor."""
def legal_moves(self, state):
"Return a list of the allowable moves at this point."
abstract
def make_move(self, move, state):
"Return the state that results from making a move from a state."
abstract
def utility(self, state, player):
"Return the value of this final state to player."
abstract
def terminal_test(self, state):
"Return True if this is a final state for the game."
return not self.legal_moves(state)
def to_move(self, state):
"Return the player whose move it is in this state."
return state.to_move
def display(self, state):
"Print or otherwise display the state."
print state
def successors(self, state):
"Return a list of legal (move, state) pairs."
return [(move, self.make_move(move, state))
for move in self.legal_moves(state)]
def __repr__(self):
return '<%s>' % self.__class__.__name__
class Fig62Game(Game):
"""The game represented in [Fig. 6.2]. Serves as a simple test case.
>>> g = Fig62Game()
>>> minimax_decision('A', g)
'a1'
>>> alphabeta_full_search('A', g)
'a1'
>>> alphabeta_search('A', g)
'a1'
"""
succs = {'A': [('a1', 'B'), ('a2', 'C'), ('a3', 'D')],
'B': [('b1', 'B1'), ('b2', 'B2'), ('b3', 'B3')],
'C': [('c1', 'C1'), ('c2', 'C2'), ('c3', 'C3')],
'D': [('d1', 'D1'), ('d2', 'D2'), ('d3', 'D3')]}
utils = Dict(B1=3, B2=12, B3=8, C1=2, C2=4, C3=6, D1=14, D2=5, D3=2)
initial = 'A'
def successors(self, state):
return self.succs.get(state, [])
def utility(self, state, player):
if player == 'MAX':
return self.utils[state]
else:
return -self.utils[state]
def terminal_test(self, state):
return state not in ('A', 'B', 'C', 'D')
def to_move(self, state):
return if_(state in 'BCD', 'MIN', 'MAX')
class TicTacToe(Game):
"""Play TicTacToe on an h x v board, with Max (first player) playing 'X'.
A state has the player to move, a cached utility, a list of moves in
the form of a list of (x, y) positions, and a board, in the form of
a dict of {(x, y): Player} entries, where Player is 'X' or 'O'."""
def __init__(self, h=3, v=3, k=3):
update(self, h=h, v=v, k=k)
moves = [(x, y) for x in range(1, h+1)
for y in range(1, v+1)]
self.initial = Struct(to_move='X', utility=0, board={}, moves=moves)
def legal_moves(self, state):
"Legal moves are any square not yet taken."
return state.moves
def make_move(self, move, state):
if move not in state.moves:
return state # Illegal move has no effect
board = state.board.copy(); board[move] = state.to_move
moves = list(state.moves); moves.remove(move)
return Struct(to_move=if_(state.to_move == 'X', 'O', 'X'),
utility=self.compute_utility(board, move, state.to_move),
board=board, moves=moves)
def utility(self, state, player):
"Return the value to X; 1 for win, -1 for loss, 0 otherwise."
return if_(player == 'X', state.utility, -state.utility)
def terminal_test(self, state):
"A state is terminal if it is won or there are no empty squares."
return state.utility != 0 or len(state.moves) == 0
def display(self, state):
board = state.board
for x in range(1, self.h+1):
for y in range(1, self.v+1):
print board.get((x, y), '.'),
print
def compute_utility(self, board, move, player):
"If X wins with this move, return 1; if O return -1; else return 0."
if (self.k_in_row(board, move, player, (0, 1)) or
self.k_in_row(board, move, player, (1, 0)) or
self.k_in_row(board, move, player, (1, -1)) or
self.k_in_row(board, move, player, (1, 1))):
return if_(player == 'X', +1, -1)
else:
return 0
def k_in_row(self, board, move, player, (delta_x, delta_y)):
"Return true if there is a line through move on board for player."
x, y = move
n = 0 # n is number of moves in row
while board.get((x, y)) == player:
n += 1
x, y = x + delta_x, y + delta_y
x, y = move
while board.get((x, y)) == player:
n += 1
x, y = x - delta_x, y - delta_y
n -= 1 # Because we counted move itself twice
return n >= self.k
class ConnectFour(TicTacToe):
"""A TicTacToe-like game in which you can only make a move on the bottom
row, or in a square directly above an occupied square. Traditionally
played on a 7x6 board and requiring 4 in a row."""
def __init__(self, h=7, v=6, k=4):
TicTacToe.__init__(self, h, v, k)
def legal_moves(self, state):
"Legal moves are any square not yet taken."
return [(x, y) for (x, y) in state.moves
if y == 0 or (x, y-1) in state.board]
|
|
import json
import io
import freezegun
import pytest
from FeedMandiant import MandiantClient
def mock_client():
MandiantClient._get_token = lambda x: 'token'
client = MandiantClient('url', 'username', 'password', False, False, 60, '90 days', 1, ['Malware'])
return client
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def test_generate_token(mocker):
"""
Given -
client
When -
generating a token
Then -
Validate the result is as expected
"""
client = mock_client()
mocker.patch.object(client, '_http_request', return_value={'access_token': 'token'})
res = client._generate_token()
assert res == 'token'
@freezegun.freeze_time('2020-11-25T11:57:28Z')
def test_get_token():
"""
Given -
client
When -
getting a token
Then -
Validate the result is as expected
"""
from FeedMandiant import MandiantClient
MandiantClient._generate_token = lambda x: 'token'
client = MandiantClient('url', 'username', 'password', False, False, 60, 'x_app_name', 'first_fetch', 1, [])
res = client._get_token()
assert res == 'token'
@pytest.mark.parametrize('info_type, response, result',
[('info-type', {'info-type': 'res'}, 'res'),
('', {'info-type': 'res'}, {'info-type': 'res'}),
('attack-pattern', {'attack-patterns': 'redacted'}, []),
('attack-pattern', {'attack-patterns': 'res'}, 'res')])
def test_get_indicator_additional_info(mocker, info_type, response, result):
client = mock_client()
mocker.patch.object(client, '_http_request', return_value=response)
res = client.get_indicator_additional_info('identifier', 'Malware', info_type)
assert res == result
def test_get_indicators_valid(mocker):
client = mock_client()
mocker.patch.object(client, '_http_request', return_value={'malware': ['list']})
res = client.get_indicators('Malware')
assert res == ['list']
def test_get_indicators_invalid(mocker):
from FeedMandiant import DemistoException
client = mock_client()
mocker.patch.object(client, '_http_request', side_effect=DemistoException('exception'))
res = client.get_indicators('Malware')
assert res == []
INDICATOR_LIST = [{'last_updated': '2020-11-23T11:57:28Z'}, {'last_updated': '2020-11-24T11:57:28Z'}]
@pytest.mark.parametrize('indicator_type, result',
[('Indicators', INDICATOR_LIST),
('Malware', INDICATOR_LIST[::-1])])
@freezegun.freeze_time('2020-11-25T11:57:28Z')
def test_get_new_indicators(mocker, indicator_type, result):
from FeedMandiant import get_new_indicators
client = mock_client()
mocker.patch.object(client, 'get_indicators', return_value=INDICATOR_LIST)
res = get_new_indicators(client, '90 days ago', indicator_type, 10)
assert res == result
@pytest.mark.parametrize('mscore, res', [(None, 0), ('1', 1), ('22', 0), ('52', 2), ('82', 3), ('101', 0)])
def test_get_verdict(mscore, res):
"""
Given -
mscore
When -
get_verdict
Then -
receive valid verdict for each mscore
"""
from FeedMandiant import get_verdict
assert get_verdict(mscore) == res
def test_get_indicator_relationships():
from FeedMandiant import get_indicator_relationships, EntityRelationship
res = get_indicator_relationships({'field_indicator': [{'entity_b_field': 'value_b'}],
'entity_a_field': 'value_a'}, 'field_indicator',
'entity_a_field', 'entity_a_type', 'entity_b_field', 'entity_b_type',
EntityRelationship.Relationships.RELATED_TO,
EntityRelationship.Relationships.RELATED_TO)
assert len(res) == 1
assert res[0]._entity_a == 'value_a'
assert res[0]._entity_a_type == 'entity_a_type'
assert res[0]._entity_b == 'value_b'
assert res[0]._entity_b_type == 'entity_b_type'
assert res[0]._name == 'related-to'
assert res[0]._reverse_name == 'related-to'
BASIC_INDICATOR = {
'operating_systems': 'operatingsystemrefs',
'aliases': 'redacted',
'capabilities': 'capabilities',
'industries': [{'name': 'tags'}],
'detections': 'mandiantdetections',
'yara': [{'name': 'name', 'id': 'id'}],
'roles': 'roles',
'id': 'stixid',
'name': 'name',
'description': 'description',
'last_updated': 'updateddate',
'last_activity_time': 'lastseenbysource',
'actors': [],
'cve': [],
'mscore': 100,
'motivations': 'primarymotivation',
'locations': {'target': [{'name': 'target'}]}
}
def test_create_malware_indicator():
from FeedMandiant import create_malware_indicator
client = mock_client()
res = create_malware_indicator(client, BASIC_INDICATOR)
assert res['value'] == 'name'
assert res['type'] == 'Malware'
assert len(res['fields']) == 11
def test_create_actor_indicator():
from FeedMandiant import create_actor_indicator
client = mock_client()
res = create_actor_indicator(client, BASIC_INDICATOR)
assert res['value'] == 'name'
assert res['type'] == 'Threat Actor'
assert len(res['fields']) == 7
@freezegun.freeze_time('2020-11-25T11:57:28Z')
def test_fetch_indicators(mocker):
from FeedMandiant import fetch_indicators
client = mock_client()
mocker.patch.object(client, 'get_indicators', return_value=INDICATOR_LIST)
res = fetch_indicators(client, update_context=False)
assert len(res) == 1
@pytest.mark.parametrize('command', ['test-module', 'feed-mandiant-get-indicators'])
def test_main(mocker, command):
from FeedMandiant import main, MandiantClient
import demistomock as demisto
params = {'auth': {'identifier': 'identifier', 'password': 'password'},
'insecure': True,
'url': 'url',
'first_fetch': "89 days ago",
'indicatorMetadata': True,
'limit': 10,
'indicatorRelationships': True,
'type': []}
mocker.patch.object(demisto, 'params', return_value=params)
mocker.patch.object(MandiantClient, '_generate_token', return_value='token')
mocker.patch.object(demisto, 'command', return_value=command)
main()
def test_get_indicator_list():
"""
Given -
client
When -
getting new indicators
Then -
receive list of indicators
"""
import FeedMandiant
client = mock_client()
res_indicators = util_load_json('./test_data/result_indicators.json')
def get_new_indicators_mock(a, b, c, d):
return res_indicators['new_indicators']
FeedMandiant.get_new_indicators = get_new_indicators_mock
res = FeedMandiant.get_indicator_list(client, 2, '90 days ago', 'Indicators')
assert res == res_indicators['new_indicators']
|
|
#!/usr/bin/env python
# Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This is a script to publish a notebook containing Ipython graphs
The static data is published as an anonymous gist. GitHub does not
allow easy deletions of anonymous gists.
"""
import json
import os
import re
import requests
import argparse
import urlparse
from IPython.nbformat.sign import TrustNotebookApp
from requests.auth import HTTPBasicAuth
from argparse import RawTextHelpFormatter
from ConfigParser import ConfigParser
# Logging Configuration
import logging
from trappy.plotter import IPythonConf
logging.basicConfig(level=logging.INFO)
RAWGIT = "rawgit.com"
GITHUB_API_URL = "https://api.github.com/gists"
def change_resource_paths(txt):
"""Change the resource paths from local to
Web URLs
"""
# Replace the path for d3-tip
txt = txt.replace(
IPythonConf.add_web_base("plotter_scripts/EventPlot/d3.tip.v0.6.3"),
"http://labratrevenge.com/d3-tip/javascripts/d3.tip.v0.6.3")
txt = txt.replace(
IPythonConf.add_web_base("plotter_scripts/EventPlot/d3.v3.min"),
"http://d3js.org/d3.v3.min")
txt = txt.replace(
IPythonConf.add_web_base("plotter_scripts/EventPlot/EventPlot"),
"https://rawgit.com/sinkap/7f89de3e558856b81f10/raw/46144f8f8c5da670c54f826f0c634762107afc66/EventPlot")
txt = txt.replace(
IPythonConf.add_web_base("plotter_scripts/ILinePlot/synchronizer"),
"http://dygraphs.com/extras/synchronizer")
txt = txt.replace(
IPythonConf.add_web_base("plotter_scripts/ILinePlot/dygraph-combined"),
"http://cdnjs.cloudflare.com/ajax/libs/dygraph/1.1.1/dygraph-combined")
txt = txt.replace(
IPythonConf.add_web_base("plotter_scripts/ILinePlot/ILinePlot"),
"https://rawgit.com/sinkap/648927dfd6985d4540a9/raw/69d6f1f9031ae3624c15707315ce04be1a9d1ac3/ILinePlot")
txt = txt.replace(
IPythonConf.add_web_base("plotter_scripts/ILinePlot/underscore-min"),
"https://cdnjs.cloudflare.com/ajax/libs/underscore.js/1.8.3/underscore-min")
logging.info("Updated Library Paths...")
return txt
def get_url_from_response(response, file_name):
"""Get the URL of gist from GitHub API response"""
resp_data = response.json()
url = resp_data["files"][file_name]["raw_url"]
url = list(urlparse.urlsplit(url))
url[1] = RAWGIT
url = urlparse.urlunsplit(url)
logging.info("gist created at: %s", url)
return url
def fig_to_json(fig, profile):
"""Get the underlying data file from figure name"""
data_dir = IPythonConf.get_data_path(profile)
return os.path.expanduser(
os.path.join(
data_dir,
fig +
".json"))
def create_new_gist(fig, profile, login):
"""Create a new gist for the data of the figure"""
path = fig_to_json(fig, profile)
file_name = os.path.basename(path)
with open(path) as file_h:
content = file_h.read()
data = {}
data["description"] = "Gist Data: {}".format(file_name)
data["public"] = True
data["files"] = {}
data["files"][file_name] = {}
data["files"][file_name]["content"] = content
response = requests.post(GITHUB_API_URL, data=json.dumps(data), auth=login)
return get_url_from_response(response, file_name)
def publish(source, target, profile, login):
"""Publish the notebook for globally viewable interactive
plots
"""
regex = r"(ILinePlot|EventPlot)\.generate\(\'(fig_.{32})\', '\/(nbextensions|static)\/'\)"
txt = ""
with open(source, 'r') as file_fh:
for line in file_fh:
match = re.search(regex, line)
if match:
plot = match.group(1)
fig = match.group(2)
logging.info("Publishing %s : %s", plot, fig)
line = re.sub(
regex,
plot + ".generate('" + fig + "', '" +
create_new_gist(fig, profile, login) + "')",
line)
txt += line
txt = change_resource_paths(txt)
with open(target, 'w') as file_fh:
file_fh.write(txt)
trust = TrustNotebookApp()
trust.sign_notebook(target)
logging.info("Signed and Saved: %s", target)
def read_login_config(config_file):
"""returns an HTTPBasicAuth object if the
config exists"""
if not config_file:
logging.debug("Anonymous gists will be created")
return None
with open(config_file, 'r') as c_fh:
config = ConfigParser()
config.readfp(c_fh)
username = config.get("login", "username")
token = config.get("login", "token")
logging.info("Received Login info for: %s", username)
return HTTPBasicAuth(username, token)
def main():
"""Command Line Invocation Routine"""
parser = argparse.ArgumentParser(description="""
The data for the interactive plots is stored in the ipython profile.
In order to make it accessible when the notebook is published or shared,
a github gist of the data is created and the links in the notebook are
updated. The library links are also updated to their corresponding publicly
accessible URLs.
The login credentials can be added to a config file as follows
1. Go to settings in your github profile and create a 'Personal Access Token'
2. This token can be used in place of your password for BasicAuth APIs
3. Create a config file:
[login]
username=<your github username>
token=<personal access token>
and pass the path to the file as -c <config>.
The gists can then be viewed in the corresponding github account.
The absence of this will create an anonymous gist which cannot be deleted/managed.""",
prog="publish_interactive_plots.py", formatter_class=RawTextHelpFormatter)
parser.add_argument(
"-p",
"--profile",
help="ipython profile",
default="default",
type=str)
parser.add_argument(
"-o",
"--outfile",
help="name of the output notebook",
default="",
type=str)
parser.add_argument(
"-c",
"--config",
help="The path to a config file containing github login credentials",
default=None,
type=str)
parser.add_argument("notebook")
args = parser.parse_args()
profile = args.profile
notebook = args.notebook
outfile = args.outfile
config = args.config
login = read_login_config(config)
if outfile == "":
outfile = "published_" + os.path.basename(notebook)
logging.info("Setting outfile as %s", outfile)
elif not outfile.endswith(".ipynb"):
outfile += ".ipynb"
publish(notebook, outfile, profile, login)
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
#
# pip documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 22 22:08:49 2008
#
# This file is execfile()d with the current directory set to its containing dir
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import glob
import os
import re
import sys
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
docs_dir = os.path.dirname(os.path.dirname(__file__))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, docs_dir)
# sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# extensions = ['sphinx.ext.autodoc']
extensions = ['sphinx.ext.extlinks', 'pip_sphinxext', 'sphinx.ext.intersphinx']
# intersphinx
intersphinx_cache_limit = 0
intersphinx_mapping = {
'pypug': ('https://packaging.python.org/', None),
'pypa': ('https://www.pypa.io/en/latest/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pip'
copyright = '2008-2017, PyPA'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = release = 'dev'
# Readthedocs seems to install pip as an egg (via setup.py install) which
# is somehow resulting in "import pip" picking up an older copy of pip.
# Rather than trying to force RTD to install pip properly, we'll simply
# read the version direct from the __init__.py file. (Yes, this is
# fragile, but it works...)
pip_init = os.path.join(docs_dir, '..', 'src', 'pip', '__init__.py')
with open(pip_init) as f:
for line in f:
m = re.match(r'__version__ = "(.*)"', line)
if m:
__version__ = m.group(1)
# The short X.Y version.
version = '.'.join(__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
break
# We have this here because readthedocs plays tricks sometimes and there seems
# to be a heisenbug, related to the version of pip discovered. This is here to
# help debug that if someone decides to do that in the future.
print(version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_patterns = ['build/']
# The reST default role (used for this markup: `text`) to use for all documents
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
extlinks = {
'issue': ('https://github.com/pypa/pip/issues/%s', '#'),
'pull': ('https://github.com/pypa/pip/pull/%s', 'PR #'),
'pypi': ('https://pypi.org/project/%s', ''),
}
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = "pypa_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'collapsiblesidebar': True,
'externalrefs': True,
'navigation_depth': 3,
'issues_url': 'https://github.com/pypa/pip/issues'
}
# Add any paths that contain custom themes here, relative to this directory.
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = '_static/piplogo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = 'favicon.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, the Docutils Smart Quotes transform (originally based on
# SmartyPants) will be used to convert characters like quotes and dashes
# to typographically correct entities. The default is True.
smartquotes = True
# This string, for use with Docutils 0.14 or later, customizes the
# SmartQuotes transform. The default of "qDe" converts normal quote
# characters ('"' and "'"), en and em dashes ("--" and "---"), and
# ellipses "...".
# For now, we disable the conversion of dashes so that long options
# like "--find-links" won't render as "-find-links" if included in the
# text in places where monospaced type can't be used. For example, backticks
# can't be used inside roles like :ref:`--no-index <--no-index>` because
# of nesting.
smartquotes_action = "qe"
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'relations.html'],
'index': ['localtoc.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'pipdocs'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
(
'index',
'pip.tex',
u'pip Documentation',
u'pip developers',
'manual',
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
# -- Options for Manual Pages -------------------------------------------------
# List of manual pages generated
man_pages = [
(
'index',
'pip',
u'package manager for Python packages',
u'pip developers',
1
)
]
# Here, we crawl the entire man/commands/ directory and list every file with
# appropriate name and details
man_dir = os.path.join(docs_dir, 'man/')
raw_subcommands = glob.glob(os.path.join(man_dir, 'commands/*.rst'))
if not raw_subcommands:
raise FileNotFoundError(
'The individual subcommand manpages could not be found!'
)
for fname in raw_subcommands:
fname_base = fname[len(man_dir):-4]
outname = 'pip-' + fname_base[9:]
description = u'description of {} command'.format(
outname.replace('-', ' ')
)
man_pages.append((fname_base, outname, description, u'pip developers', 1))
|
|
# vim: expandtab:tabstop=4:shiftwidth=4
"""This module comprises Aws specific utility functions."""
import os
import re
import subprocess
from distutils.version import LooseVersion
import ruamel.yaml as yaml
# Buildbot does not have multi_inventory installed
#pylint: disable=no-name-in-module
from openshift_tools.inventory_clients import multi_inventory
class ArgumentError(Exception):
"""This class is raised when improper arguments are passed."""
def __init__(self, message):
"""Initialize an ArgumentError.
Keyword arguments:
message -- the exact error message being raised
"""
super(ArgumentError, self).__init__()
self.message = message
# pylint: disable=too-many-public-methods
class InventoryUtil(object):
"""This class contains the Inventory utility functions."""
def __init__(self, host_type_aliases=None, use_cache=True):
"""Initialize the Inventory utility class.
Keyword arguments:
host_type_aliases -- a list of aliases to common host-types (e.g. ex-node)
use_cache -- rely on cached inventory instead of querying for new inventory
"""
self.cached = use_cache
self.alias_lookup = {}
host_type_aliases = host_type_aliases or {}
self.host_type_aliases = host_type_aliases
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
self.setup_host_type_alias_lookup()
self._inventory = None
@property
def inventory(self):
""" Sets up a class property named inventory, this is the getter
It will pull in the file if the property is empty, otherwise just returns the variable
"""
if self._inventory is None:
self._inventory = multi_inventory.MultiInventory(None).run()
return self._inventory
def get_cluster(self, name):
""" return a cluster object """
return Cluster(name, self.inventory)
def setup_host_type_alias_lookup(self):
"""Sets up the alias to host-type lookup table."""
for key, values in self.host_type_aliases.iteritems():
for value in values:
self.alias_lookup[value] = key
def _get_tags_(self, regex):
""" Searches for tags in the inventory and returns all of the tags
found.
Param: a compiled regular expression
Returns: a List of tags
"""
tags = []
for key in self.inventory:
matched = regex.match(key)
if matched:
tags.append(matched.group(1))
tags.sort()
return tags
def get_clusters(self):
"""Searches for cluster tags in the inventory and returns all of the clusters found."""
pattern = re.compile(r'^oo_clusterid_(.*)')
return self._get_tags_(pattern)
def get_environments(self):
"""Searches for env tags in the inventory and returns all of the envs found."""
pattern = re.compile(r'^oo_environment_(.*)')
return self._get_tags_(pattern)
def get_host_types(self):
"""Searches for host-type tags in the inventory and returns all host-types found."""
pattern = re.compile(r'^oo_hosttype_(.*)')
return self._get_tags_(pattern)
def get_sub_host_types(self):
"""Searches for sub-host-type tags in the inventory and returns all sub-host-types found."""
pattern = re.compile(r'^oo_subhosttype_(.*)')
return self._get_tags_(pattern)
def get_security_groups(self):
"""Searches for security_groups in the inventory and returns all SGs found."""
pattern = re.compile(r'^security_group_(.*)')
return self._get_tags_(pattern)
def build_host_dict_by_env(self, args=None):
"""Searches the inventory for hosts in an env and returns their hostvars."""
args = args or []
inst_by_env = {}
for _, host in self.inventory['_meta']['hostvars'].items():
# If you don't have an environment tag, we're going to ignore you
if 'oo_environment' not in host:
continue
if host['oo_environment'] not in inst_by_env:
inst_by_env[host['oo_environment']] = {}
host_id = "%s:%s" % (host['oo_name'], host['oo_id'])
inst_by_env[host['oo_environment']][host_id] = host
return inst_by_env
def print_host_types(self):
"""Gets the list of host types and aliases and outputs them in columns."""
host_types = self.get_host_types()
ht_format_str = "%35s"
alias_format_str = "%-20s"
combined_format_str = ht_format_str + " " + alias_format_str
print
print combined_format_str % ('Host Types', 'Aliases')
print combined_format_str % ('----------', '-------')
for host_type in host_types:
aliases = []
if host_type in self.host_type_aliases:
aliases = self.host_type_aliases[host_type]
print combined_format_str % (host_type, ", ".join(aliases))
else:
print ht_format_str % host_type
print
def print_cluster_list(self):
"""Gets the list of clusters and outputs them"""
clusters = self.get_clusters()
for cluster in clusters:
print cluster
def resolve_host_type(self, host_type):
"""Converts a host-type alias into a host-type.
Keyword arguments:
host_type -- The alias or host_type to look up.
Example (depends on aliases defined in config file):
host_type = ex-node
returns: openshift-node
"""
if self.alias_lookup.has_key(host_type):
return self.alias_lookup[host_type]
return host_type
@staticmethod
def gen_version_tag(ver):
"""Generate the version tag
"""
return "oo_version_%s" % ver
@staticmethod
def gen_clusterid_tag(clu):
"""Generate the clusterid tag
"""
return "oo_clusterid_%s" % clu
@staticmethod
def gen_env_tag(env):
"""Generate the environment tag
"""
return "oo_environment_%s" % env
def gen_host_type_tag(self, host_type, version):
"""Generate the host type tag
"""
if version == '2':
host_type = self.resolve_host_type(host_type)
return "oo_hosttype_%s" % host_type
@staticmethod
def gen_sub_host_type_tag(sub_host_type):
"""Generate the host type tag
"""
return "oo_subhosttype_%s" % sub_host_type
# This function uses all of these params to perform a filters on our host inventory.
# pylint: disable=too-many-arguments
def get_host_list(self, clusters=None, host_type=None, sub_host_type=None, envs=None, version=None):
"""Get the list of hosts from the inventory using host-type and environment
"""
retval = set([])
envs = envs or []
retval.update(self.inventory.get('all_hosts', []))
if clusters:
cluster_hosts = set([])
if len(clusters) > 1:
for cluster in clusters:
clu_tag = InventoryUtil.gen_clusterid_tag(cluster)
cluster_hosts.update(self.inventory.get(clu_tag, []))
else:
cluster_hosts.update(self.inventory.get(InventoryUtil.gen_clusterid_tag(clusters[0]), []))
retval.intersection_update(cluster_hosts)
if envs:
env_hosts = set([])
if len(envs) > 1:
for env in envs:
env_tag = InventoryUtil.gen_env_tag(env)
env_hosts.update(self.inventory.get(env_tag, []))
else:
env_hosts.update(self.inventory.get(InventoryUtil.gen_env_tag(envs[0]), []))
retval.intersection_update(env_hosts)
if host_type:
retval.intersection_update(self.inventory.get(self.gen_host_type_tag(host_type, version), []))
if sub_host_type:
retval.intersection_update(self.inventory.get(self.gen_sub_host_type_tag(sub_host_type), []))
if version != 'all':
retval.intersection_update(self.inventory.get(InventoryUtil.gen_version_tag(version), []))
return list(retval)
def convert_to_ip(self, hosts):
"""convert a list of host names to ip addresses"""
if not isinstance(hosts, list):
hosts = [hosts]
ips = []
for host in hosts:
ips.append(self.inventory['_meta']['hostvars'][host]['oo_public_ip'])
return ips
def get_cluster_variable(self, cluster, variable):
""" return an inventory variable that is common to a cluster"""
cluster = self.get_cluster(cluster)
return cluster.get_variable(variable)
def get_node_variable(self, host, variable):
""" return an inventory variable from a host"""
if host in self.inventory['_meta']['hostvars'] and variable in self.inventory['_meta']['hostvars'][host]:
return self.inventory['_meta']['hostvars'][host][variable]
return None
class Cluster(object):
""" This is a class to acces data about an Ops cluster """
def __init__(self, name, inventory=None):
""" Init the cluster class """
if inventory is None:
inventory = multi_inventory.MultiInventory(None).run()
self._name = name
self._openshift_version = None
self._docker_version = None
self.inventory = inventory
self._master_config = None
def __str__(self):
""" str representation of Cluster """
return self._name
def __repr__(self):
""" repr representation of Cluster """
return self._name
@property
def name(self):
""" cluster name property """
return self._name
@property
def location(self):
""" cluster location property """
return self.get_variable('oo_location')
@property
def sublocation(self):
""" cluster sublocation property """
return self.get_variable('oo_sublocation')
@property
def environment(self):
""" cluster environment property """
return self.get_variable('oo_environment')
@property
def deployment(self):
""" cluster deployment property """
return self.get_variable('oo_deployment')
@property
def account(self):
""" cluster account property """
return self.get_variable('oo_account')
@property
def accountid(self):
""" cluster account property """
return self.get_variable('oo_accountid')
@property
def test_cluster(self):
""" cluster cluster property """
return bool(self.get_variable('oo_test_cluster'))
@property
def primary_master(self):
""" return the first master """
primary_master = list(set(self.inventory["oo_master_primary"]) &
set(self.inventory["oo_clusterid_" + self._name]))[0]
return primary_master
@property
def cluster_nodes(self):
""" return the number of nodes - infra and compute """
cluster_nodes = self.inventory["oo_clusterid_" + self._name]
return cluster_nodes
@property
def node_count(self):
""" return the number of nodes - infra and compute """
cluster_compute_nodes = list(set(self.inventory["oo_hosttype_node"]) &
set(self.inventory["oo_clusterid_" + self._name]))
return len(cluster_compute_nodes)
@property
def scalegroup_node_count(self):
""" return the number of scalegroup nodes - infra and compute """
sg_cluster_nodes = list(set(self.inventory["oo_hosttype_node"]) &
set(self.inventory["oo_scalegroup_True"]) &
set(self.inventory["oo_clusterid_" + self._name]))
return len(sg_cluster_nodes)
@property
def master_config(self):
""" This is the master_config.yaml file stored as a dict """
if not self._master_config:
master_config_yaml = self.run_cmd_on_master("/usr/bin/cat /etc/origin/master/master-config.yaml",
strip=False)
self._master_config = yaml.safe_load(master_config_yaml)
return self._master_config
@staticmethod
def set_version(version):
""" manipulate the version variable """
os_version = {}
version_split = version.split('.')
os_version['version_release'] = version
os_version['version'] = version.split('-')[0]
os_version['short'] = '.'.join(version_split[0:2])
os_version['short_underscore'] = os_version['short'].replace(".", "_")
os_version['release'] = version.split('-')[1]
os_version['version_release_no_git'] = version.split('.git')[0]
# Let's do the wierdness to set full version begin
if LooseVersion(os_version['short']) < LooseVersion('3.6'):
os_version['full'] = os_version['short']
elif os_version['short'] == '3.6':
os_version['full'] = os_version['version']
else:
if os_version['release'].startswith('0'):
if "git" in os_version['version_release']:
os_version['full'] = os_version['version_release_no_git']
else:
os_version['full'] = os_version['version_release']
else:
os_version['full'] = os_version['version']
os_version['vfull'] = 'v' + os_version['full']
return os_version
@property
def openshift_version(self):
""" return a dict of openshift_version """
if not self._openshift_version:
self._openshift_version = {}
version = self.run_cmd_on_master("rpm -q --queryformat '%{VERSION}-%{RELEASE}' \
atomic-openshift")
self._openshift_version = Cluster.set_version(version)
return self._openshift_version
def get_variable(self, variable):
""" return an inventory variable that is common to a cluster"""
variables = []
for host in self.inventory['oo_clusterid_' + self.name]:
if variable in self.inventory['_meta']['hostvars'][host]:
variables.append(self.inventory['_meta']['hostvars'][host][variable])
if len(list(set(variables))) == 1:
return variables[0]
return None
def run_cmd_on_master(self, command, strip=True):
"""
Run a command on the primary master and return the output
command: string of the command to run on the primary master
e.g: command = "echo 'hello world'"
"""
command_output = subprocess.check_output(["/usr/bin/ossh", "root@" + self.primary_master, "-c", command])
if strip:
return command_output.strip()
return command_output
def oc_get(self, os_object, namespace=None, selector=None, json=False):
"""
Run an oc command on the primary master and return the output
os_object: pod, node, ns
namespace: project namespace
selector: label selector
json: return in json
"""
cmd = "oc get {}".format(os_object)
if namespace:
cmd += " -n {}".format(namespace)
if selector:
cmd += " -l {}".format(selector)
if json:
cmd += " -o json"
else:
cmd += " -o yaml"
command_output = self.run_cmd_on_master(cmd)
return command_output
def convert_inv_to_os_name(self, hostname):
""" convert ops hostname to the openshift hostname
example: free-stg-master-03fb6 -> ip-172-31-78-254.us-east-2.compute.internal
"""
if hostname not in self.inventory["_meta"]["hostvars"]:
return None
host_vars = self.inventory["_meta"]["hostvars"][hostname]
if host_vars['oo_location'] == 'gcp':
return hostname
elif host_vars['oo_location'] == 'aws':
return host_vars['ec2_private_dns_name']
def convert_list_to_os_names(self, hostnames):
""" convert a list of ops hostname to the openshift hostname
example: ['free-stg-node-infra-a18ed', 'free-stg-node-compute-006ef'] ->
[u'ip-172-31-73-38.us-east-2.compute.internal',
u'ip-172-31-74-247.us-east-2.compute.internal']
"""
if hostnames:
converted_hosts = []
for host in hostnames:
os_host = self.convert_inv_to_os_name(host)
converted_hosts.append(os_host)
return converted_hosts
def convert_os_to_inv_name(self, hostname):
""" convert openshift name to inventory name:
example: 'ip-172-31-69-53.us-east-2.compute.internal' => free-stg-node-infra-70a4e
"""
if hostname in self.cluster_nodes:
return hostname
for node in self.cluster_nodes:
if hostname == self.inventory["_meta"]["hostvars"][node]["ec2_private_dns_name"]:
return node
return None
|
|
#!/usr/bin/env python
# standard library
import numbers
import shutil
import sys
# third party
import numpy as np
# treeCl
from .partition import Partition
from .tree import Tree
from . import errors
from .utils import fileIO, print_and_return
class Simulator(object):
"""
Simulate alignments from several trees.
Args:
class_list = a list with an entry for each class, which is the
(integer) number of genes in that class
permutations_list = a list with an entry for each class, which is the
(integer) number of permutations the class tree has
relative to the master tree (see master_tree)
num_species = number of leaves on the master tree
datatype = 'dna' or 'protein'
"""
def __init__(
self,
class_list,
permutations_list,
nspecies,
subst_model,
rate_model,
master_tree_generator_method='yule',
master_tree=None,
class_tree_permuter='nni',
gene_length_kappa=1.7719,
gene_length_theta=279.9,
gene_length_min=10,
gamma_rate_param=None,
outdir='./',
autocorrelated_relaxed_clock=False,
uncorrelated_relaxed_clock=False,
scale_rates=False,
verbosity=0,
):
# default
errors.optioncheck(master_tree_generator_method, ['yule', 'coal',
'rtree', 'custom'])
errors.optioncheck(class_tree_permuter, ['nni', 'spr', 'lgt', 'genetree'])
if master_tree is None and master_tree_generator_method == 'custom':
raise Exception('No custom tree was specified')
self.num_classes = len(class_list)
self.num_genes = sum(class_list)
self.class_list = class_list
self._master_tree = None
self.verbosity = verbosity
self.autocorrelated_relaxed_clock = autocorrelated_relaxed_clock
self.uncorrelated_relaxed_clock = uncorrelated_relaxed_clock
self.scale_rates = scale_rates
self.gene_trees = list()
if master_tree is None:
tree = self.generate_master_tree(master_tree_generator_method,
nspecies)
self.master_tree = tree
self.num_species = nspecies
else:
self.master_tree = master_tree
if len(master_tree) != nspecies:
msg = [
'Warning: supplied tree has {0} taxa.'.format(
len(master_tree)),
'Required number is {0}.\n'.format(nspecies),
'Resetting number of species to match the supplied tree.'
]
print(''.join(msg))
self.num_species = nspecies
self.set_gene_lengths(gene_length_kappa, gene_length_theta,
gene_length_min)
self.gamma_rate_param = gamma_rate_param
self.permuter = class_tree_permuter
self.permutations_list = permutations_list
self.datatype = datatype
self.tmpdir = errors.directorymake(tmpdir)
self.outdir = outdir
self.generate_class_trees() # sets self.class_trees dict
self.make_alf_dirs() # sets self.alf_dirs dict
self.write_alf_params()
self.get_true_partition()
@property
def master_tree(self):
return self._master_tree
@master_tree.setter
def master_tree(self, tree):
self._master_tree = tree
def generate_master_tree(self, method, nspecies):
if method == 'yule':
tree = Tree.new_yule(nspecies)
tree.name = '{}_master_tree'.format(method)
return tree
elif method == 'coal':
tree = Tree.new_coal(nspecies)
tree.name = '{}_master_tree'.format(method)
return tree
elif method == 'rtree':
tree = Tree.new_rtree(nspecies)
tree.name = '{}_master_tree'.format(method)
return tree
def set_gene_lengths(
self,
kappa,
theta,
min_,
):
self.gene_length_kappa = kappa
self.gene_length_theta = theta
self.gene_length_min = min_
def generate_class_trees(self):
class_trees = {}
if self.permuter == 'genetree':
for k in range(self.num_classes):
class_trees[k + 1] = self.master_tree.sample_gene_tree(
scale_to=self.permutations_list[k])
else:
# Base trees for each class
for k in range(self.num_classes):
if self.permuter == 'nni':
t = self.master_tree.rnni(times=self.permutations_list[k])
t.name = 'class{}'.format(k + 1)
class_trees[k + 1] = t
elif self.permuter == 'spr':
t = self.master_tree.rspr(times=self.permutations_list[k],
disallow_sibling_sprs=True, keep_entire_edge=True)
t.name = 'class{}'.format(k + 1)
class_trees[k + 1] = t
elif self.permuter == 'lgt':
t = self.master_tree.rlgt(times=self.permutations_list[k],
disallow_sibling_lgts=True)
t.name = 'class{}'.format(k + 1)
class_trees[k + 1] = t
# Expand base class trees into individual trees
gene_trees = list()
for k in range(self.num_classes):
num_genes = self.class_list[k]
trees = list()
# populate the trees list
for _ in range(num_genes):
class_tree = class_trees[k + 1]
tree = Tree(class_tree.newick)
tree.name = class_tree.name
trees.append(tree)
# do per-tree rates/branch length adjustments
for i, tree in enumerate(trees, start=1):
if self.autocorrelated_relaxed_clock:
tree.autocorrelated_relaxed_clock(1, 0.01)
for node in tree.postorder_node_iter():
node.edge_length *= node.rate
tree.name += '_{}'.format(i)
elif self.uncorrelated_relaxed_clock:
tree.uncorrelated_relaxed_clock(1, 0.01 * tree.length())
for node in tree.postorder_node_iter():
node.edge_length *= node.rate
tree.name += '_{}'.format(i)
elif self.scale_rates:
coeff = np.random.uniform(0.666, 1.333)
tree.scale(coeff, inplace=True)
tree.name += '_{}'.format(i)
else:
tree.name += '_{}'.format(i)
gene_trees.extend(trees)
self.class_trees = class_trees
self.gene_trees = gene_trees
def make_alf_dirs(self):
alf_dirs = {}
for i, g in enumerate(self.gene_trees, start=1):
dirname = fileIO.join_path(self.tmpdir, g.name)
alf_dirs[i] = errors.directorymake(dirname)
self.alf_dirs = alf_dirs
def make_alf_dirs_(self):
""" DEPRECATED """
alf_dirs = {}
for k in range(self.num_classes):
dirname = fileIO.join_path(self.tmpdir, 'class{0:0>1}'.format(
k + 1))
alf_dirs[k + 1] = errors.directorymake(dirname)
self.alf_dirs = alf_dirs
def write_alf_params(self):
if not hasattr(self, 'alf_dirs'):
self.make_alf_dirs()
if not hasattr(self, 'class_trees'):
self.generate_class_trees()
alf_params = {}
for i, tree in enumerate(self.gene_trees, start=1):
alfdir = self.alf_dirs[i]
datatype = self.datatype
name = tree.name
num_genes = 1
seqlength = self.gene_length_min
gene_length_kappa = self.gene_length_kappa
gene_length_theta = self.gene_length_theta
alf_obj = ALF(tree=tree,
datatype=datatype, num_genes=num_genes,
seqlength=seqlength, gene_length_kappa=gene_length_kappa,
gene_length_theta=gene_length_theta, name=name, tmpdir=alfdir)
if isinstance(self.gamma_rate_param, numbers.Number):
alf_obj.params.rate_variation(self.gamma_rate_param)
if datatype == 'protein':
alf_obj.params.one_word_model('WAG')
else:
alf_obj.params.jc_model()
alf_params[i] = alf_obj
self.alf_params = alf_params
def write_alf_params_(self):
""" DEPRECATED """
if not hasattr(self, 'alf_dirs'):
self.make_alf_dirs()
if not hasattr(self, 'class_trees'):
self.generate_class_trees()
alf_params = {}
for k in range(self.num_classes):
alfdir = self.alf_dirs[k + 1]
tree = self.class_trees[k + 1]
datatype = self.datatype
name = 'class{0}'.format(k + 1)
num_genes = self.class_list[k]
seqlength = self.gene_length_min
gene_length_kappa = self.gene_length_kappa
gene_length_theta = self.gene_length_theta
alf_obj = ALF(tree=tree,
datatype=datatype, num_genes=num_genes,
seqlength=seqlength, gene_length_kappa=gene_length_kappa,
gene_length_theta=gene_length_theta, name=name, tmpdir=alfdir)
if datatype == 'protein':
alf_obj.params.one_word_model('WAG')
else:
alf_obj.params.jc_model()
alf_params[k + 1] = alf_obj
self.alf_params = alf_params
def clean(self):
if not hasattr(self, 'alf_dirs'):
return
for directory in self.alf_dirs.values():
shutil.rmtree(directory)
def run(self):
all_records = []
total_jobs = len(self.gene_trees)
for i, tree in enumerate(self.gene_trees, start=1):
if self.verbosity > 0:
print_and_return('Simulating {} ({:.1f}%)'.format(tree.name,
100 * i / total_jobs),
sys.stderr)
simulated_record = self.alf_params[i].run()[0]
simulated_record.name = tree.name
all_records.append(simulated_record)
self.result = all_records
self.clean()
return all_records
def run_(self):
""" DEPRECATED """
all_records = []
for k in range(self.num_classes):
simulated_records = self.alf_params[k + 1].run()
names = ['class{0}_{1:0>{2}}'.format(k + 1, i,
len(str(self.class_list[k]))) for i in range(1,
len(
simulated_records) + 1)]
for (rec, name) in zip(simulated_records, names):
rec.name = name
all_records.extend(simulated_records)
self.result = all_records
self.clean()
return all_records
def write(self):
if hasattr(self, 'result'):
errors.directorymake(self.outdir)
errors.directorymake(fileIO.join_path(self.outdir,
'base_class_trees'))
errors.directorymake(fileIO.join_path(self.outdir,
'gene_trees'))
for rec in self.result:
filename = fileIO.join_path(self.outdir, rec.name) + '.phy'
rec.write_phylip(filename, interleaved=True)
for i in range(self.num_classes):
tree = self.class_trees[i + 1]
name = 'base_tree_class{0:0>{1}}.nwk'.format(i + 1,
len(str(self.num_classes)))
filename = fileIO.join_path(self.outdir, 'base_class_trees',
name)
tree.write_to_file(filename)
for i, tree in enumerate(self.gene_trees, start=1):
filename = fileIO.join_path(self.outdir, 'gene_trees',
tree.name + '.nwk')
tree.write_to_file(filename)
self.master_tree.write_to_file(fileIO.join_path(self.outdir,
'master_tree.nwk'))
filename = fileIO.join_path(self.outdir, 'true_partition.txt')
with open(filename, 'w') as partition_file:
partition_file.write(repr(self.true_partition))
def get_true_partition(self):
l = []
for k in range(len(self.class_list)):
l.extend([k + 1] * self.class_list[k])
self.true_partition = Partition(l)
return self.true_partition
if __name__ == '__main__':
import argparse
prog = fileIO.basename(__file__)
parser = argparse.ArgumentParser(description='{0}'.format(prog))
parser.add_argument('classes', type=int, nargs='+')
parser.add_argument('-p', '--permutations', type=int, nargs='+')
parser.add_argument('-s', '--species', type=int, default=12)
parser.add_argument('-d', '--datatype', type=str, default='protein')
parser.add_argument('-g', '--tree_generator', type=str, default='yule')
parser.add_argument('-t', '--tree', type=str)
parser.add_argument('--permuter', type=str, default='lgt')
parser.add_argument('-l', '--gamma_params', type=float, nargs=2,
default=(1.7719, 279.9))
parser.add_argument('-m', '--min_length', type=str, default=10)
parser.add_argument('--tmp', type=str, default='/tmp')
parser.add_argument('-o', '--output', type=str)
args = parser.parse_args()
if args.permutations is None:
args.permutations = [1 for _ in args.classes]
sim = Simulator(
class_list=args.classes,
permutations_list=args.permutations,
nspecies=args.species,
datatype=args.datatype,
master_tree_generator_method=args.tree_generator,
master_tree=args.tree,
class_tree_permuter=args.permuter,
gene_length_kappa=args.gamma_params[0],
gene_length_theta=args.gamma_params[1],
gene_length_min=args.min_length,
tmpdir=args.tmp,
outdir=args.output)
sim.run()
recs = sim.result
if args.output is not None:
sim.write()
|
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging
import sqlalchemy as sa
from sqlalchemy import func
from sqlalchemy import or_
from sqlalchemy import orm
from sqlalchemy.orm import joinedload
from sqlalchemy import sql
from neutron.common import constants
from neutron.common import utils as n_utils
from neutron import context as n_ctx
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import l3_attrs_db
from neutron.db import model_base
from neutron.extensions import l3agentscheduler
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
LOG = logging.getLogger(__name__)
L3_AGENTS_SCHEDULER_OPTS = [
cfg.StrOpt('router_scheduler_driver',
default='neutron.scheduler.l3_agent_scheduler.ChanceScheduler',
help=_('Driver to use for scheduling '
'router to a default L3 agent')),
cfg.BoolOpt('router_auto_schedule', default=True,
help=_('Allow auto scheduling of routers to L3 agent.')),
cfg.BoolOpt('allow_automatic_l3agent_failover', default=False,
help=_('Automatically reschedule routers from offline L3 '
'agents to online L3 agents.')),
]
cfg.CONF.register_opts(L3_AGENTS_SCHEDULER_OPTS)
class RouterL3AgentBinding(model_base.BASEV2):
"""Represents binding between neutron routers and L3 agents."""
router_id = sa.Column(sa.String(36),
sa.ForeignKey("routers.id", ondelete='CASCADE'),
primary_key=True)
l3_agent = orm.relation(agents_db.Agent)
l3_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id", ondelete='CASCADE'),
primary_key=True)
class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
agentschedulers_db.AgentSchedulerDbMixin):
"""Mixin class to add l3 agent scheduler extension to plugins
using the l3 agent for routing.
"""
router_scheduler = None
def start_periodic_l3_agent_status_check(self):
if not cfg.CONF.allow_automatic_l3agent_failover:
LOG.info(_LI("Skipping period L3 agent status check because "
"automatic router rescheduling is disabled."))
return
self.setup_agent_status_check(
self.reschedule_routers_from_down_agents)
def reschedule_routers_from_down_agents(self):
"""Reschedule routers from down l3 agents if admin state is up."""
agent_dead_limit = self.agent_dead_limit_seconds()
self.wait_down_agents('L3', agent_dead_limit)
cutoff = self.get_cutoff_time(agent_dead_limit)
context = n_ctx.get_admin_context()
down_bindings = (
context.session.query(RouterL3AgentBinding).
join(agents_db.Agent).
filter(agents_db.Agent.heartbeat_timestamp < cutoff,
agents_db.Agent.admin_state_up).
outerjoin(l3_attrs_db.RouterExtraAttributes,
l3_attrs_db.RouterExtraAttributes.router_id ==
RouterL3AgentBinding.router_id).
filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
try:
for binding in down_bindings:
LOG.warn(_LW(
"Rescheduling router %(router)s from agent %(agent)s "
"because the agent did not report to the server in "
"the last %(dead_time)s seconds."),
{'router': binding.router_id,
'agent': binding.l3_agent_id,
'dead_time': agent_dead_limit})
try:
self.reschedule_router(context, binding.router_id)
except (l3agentscheduler.RouterReschedulingFailed,
oslo_messaging.RemoteError):
# Catch individual router rescheduling errors here
# so one broken one doesn't stop the iteration.
LOG.exception(_LE("Failed to reschedule router %s"),
binding.router_id)
except db_exc.DBError:
# Catch DB errors here so a transient DB connectivity issue
# doesn't stop the loopingcall.
LOG.exception(_LE("Exception encountered during router "
"rescheduling."))
def validate_agent_router_combination(self, context, agent, router):
"""Validate if the router can be correctly assigned to the agent.
:raises: RouterL3AgentMismatch if attempting to assign DVR router
to legacy agent, or centralized router to compute's L3 agents.
:raises: InvalidL3Agent if attempting to assign router to an
unsuitable agent (disabled, type != L3, incompatible configuration)
:raises: DVRL3CannotAssignToDvrAgent if attempting to assign DVR
router from one DVR Agent to another.
"""
is_distributed = router.get('distributed')
agent_conf = self.get_configuration_dict(agent)
agent_mode = agent_conf.get(constants.L3_AGENT_MODE,
constants.L3_AGENT_MODE_LEGACY)
router_type = (
'distributed' if is_distributed else
'centralized')
is_agent_router_types_incompatible = (
agent_mode == constants.L3_AGENT_MODE_DVR and not is_distributed
or agent_mode == constants.L3_AGENT_MODE_LEGACY and is_distributed
)
if is_agent_router_types_incompatible:
raise l3agentscheduler.RouterL3AgentMismatch(
router_type=router_type, router_id=router['id'],
agent_mode=agent_mode, agent_id=agent['id'])
if agent_mode == constants.L3_AGENT_MODE_DVR and is_distributed:
raise l3agentscheduler.DVRL3CannotAssignToDvrAgent(
router_type=router_type, router_id=router['id'],
agent_id=agent['id'])
is_wrong_type_or_unsuitable_agent = (
agent['agent_type'] != constants.AGENT_TYPE_L3 or
not agentschedulers_db.services_available(agent['admin_state_up'])
or
not self.get_l3_agent_candidates(context, router, [agent],
ignore_admin_state=True))
if is_wrong_type_or_unsuitable_agent:
raise l3agentscheduler.InvalidL3Agent(id=agent['id'])
def check_agent_router_scheduling_needed(self, context, agent, router):
"""Check if the router scheduling is needed.
:raises: RouterHostedByL3Agent if router is already assigned
to a different agent.
:returns: True if scheduling is needed, otherwise False
"""
router_id = router['id']
agent_id = agent['id']
query = context.session.query(RouterL3AgentBinding)
bindings = query.filter_by(router_id=router_id).all()
if not bindings:
return True
for binding in bindings:
if binding.l3_agent_id == agent_id:
# router already bound to the agent we need
return False
if router.get('distributed'):
return False
# non-dvr case: centralized router is already bound to some agent
raise l3agentscheduler.RouterHostedByL3Agent(
router_id=router_id,
agent_id=bindings[0].l3_agent_id)
def create_router_to_agent_binding(self, context, agent, router):
"""Create router to agent binding."""
router_id = router['id']
agent_id = agent['id']
if self.router_scheduler:
try:
self.router_scheduler.bind_router(context, router_id, agent)
except db_exc.DBError:
raise l3agentscheduler.RouterSchedulingFailed(
router_id=router_id, agent_id=agent_id)
def add_router_to_l3_agent(self, context, agent_id, router_id):
"""Add a l3 agent to host a router."""
with context.session.begin(subtransactions=True):
router = self.get_router(context, router_id)
agent = self._get_agent(context, agent_id)
self.validate_agent_router_combination(context, agent, router)
if self.check_agent_router_scheduling_needed(
context, agent, router):
self.create_router_to_agent_binding(context, agent, router)
else:
return
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
l3_notifier.router_added_to_agent(
context, [router_id], agent.host)
def remove_router_from_l3_agent(self, context, agent_id, router_id):
"""Remove the router from l3 agent.
After removal, the router will be non-hosted until there is update
which leads to re-schedule or be added to another agent manually.
"""
agent = self._get_agent(context, agent_id)
self._unbind_router(context, router_id, agent_id)
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
l3_notifier.router_removed_from_agent(
context, router_id, agent.host)
def _unbind_router(self, context, router_id, agent_id):
with context.session.begin(subtransactions=True):
query = context.session.query(RouterL3AgentBinding)
query = query.filter(
RouterL3AgentBinding.router_id == router_id,
RouterL3AgentBinding.l3_agent_id == agent_id)
query.delete()
def reschedule_router(self, context, router_id, candidates=None):
"""Reschedule router to a new l3 agent
Remove the router from the agent(s) currently hosting it and
schedule it again
"""
cur_agents = self.list_l3_agents_hosting_router(
context, router_id)['agents']
with context.session.begin(subtransactions=True):
for agent in cur_agents:
self._unbind_router(context, router_id, agent['id'])
new_agent = self.schedule_router(context, router_id,
candidates=candidates)
if not new_agent:
raise l3agentscheduler.RouterReschedulingFailed(
router_id=router_id)
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
for agent in cur_agents:
l3_notifier.router_removed_from_agent(
context, router_id, agent['host'])
l3_notifier.router_added_to_agent(
context, [router_id], new_agent.host)
def list_routers_on_l3_agent(self, context, agent_id):
query = context.session.query(RouterL3AgentBinding.router_id)
query = query.filter(RouterL3AgentBinding.l3_agent_id == agent_id)
router_ids = [item[0] for item in query]
if router_ids:
return {'routers':
self.get_routers(context, filters={'id': router_ids})}
else:
# Exception will be thrown if the requested agent does not exist.
self._get_agent(context, agent_id)
return {'routers': []}
def _get_active_l3_agent_routers_sync_data(self, context, host, agent,
router_ids):
if n_utils.is_extension_supported(self,
constants.L3_HA_MODE_EXT_ALIAS):
return self.get_ha_sync_data_for_host(context, host,
router_ids=router_ids,
active=True)
return self.get_sync_data(context, router_ids=router_ids, active=True)
def list_active_sync_routers_on_active_l3_agent(
self, context, host, router_ids):
agent = self._get_agent_by_type_and_host(
context, constants.AGENT_TYPE_L3, host)
if not agentschedulers_db.services_available(agent.admin_state_up):
return []
query = context.session.query(RouterL3AgentBinding.router_id)
query = query.filter(
RouterL3AgentBinding.l3_agent_id == agent.id)
if router_ids:
query = query.filter(
RouterL3AgentBinding.router_id.in_(router_ids))
router_ids = [item[0] for item in query]
if router_ids:
return self._get_active_l3_agent_routers_sync_data(context, host,
agent,
router_ids)
return []
def get_l3_agents_hosting_routers(self, context, router_ids,
admin_state_up=None,
active=None):
if not router_ids:
return []
query = context.session.query(RouterL3AgentBinding)
query = query.options(orm.contains_eager(
RouterL3AgentBinding.l3_agent))
query = query.join(RouterL3AgentBinding.l3_agent)
query = query.filter(RouterL3AgentBinding.router_id.in_(router_ids))
if admin_state_up is not None:
query = (query.filter(agents_db.Agent.admin_state_up ==
admin_state_up))
l3_agents = [binding.l3_agent for binding in query]
if active is not None:
l3_agents = [l3_agent for l3_agent in
l3_agents if not
agents_db.AgentDbMixin.is_agent_down(
l3_agent['heartbeat_timestamp'])]
return l3_agents
def _get_l3_bindings_hosting_routers(self, context, router_ids):
if not router_ids:
return []
query = context.session.query(RouterL3AgentBinding)
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id.in_(router_ids))
return query.all()
def list_l3_agents_hosting_router(self, context, router_id):
with context.session.begin(subtransactions=True):
bindings = self._get_l3_bindings_hosting_routers(
context, [router_id])
return {'agents': [self._make_agent_dict(binding.l3_agent) for
binding in bindings]}
def get_l3_agents(self, context, active=None, filters=None):
query = context.session.query(agents_db.Agent)
query = query.filter(
agents_db.Agent.agent_type == constants.AGENT_TYPE_L3)
if active is not None:
query = (query.filter(agents_db.Agent.admin_state_up == active))
if filters:
for key, value in filters.iteritems():
column = getattr(agents_db.Agent, key, None)
if column:
if not value:
return []
query = query.filter(column.in_(value))
agent_modes = filters.get('agent_modes', [])
if agent_modes:
agent_mode_key = '\"agent_mode\": \"'
configuration_filter = (
[agents_db.Agent.configurations.contains('%s%s\"' %
(agent_mode_key, agent_mode))
for agent_mode in agent_modes])
query = query.filter(or_(*configuration_filter))
return [l3_agent
for l3_agent in query
if agentschedulers_db.AgentSchedulerDbMixin.is_eligible_agent(
active, l3_agent)]
def check_ports_exist_on_l3agent(self, context, l3_agent, router_id):
"""
This function checks for existence of dvr serviceable
ports on the host, running the input l3agent.
"""
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
if not subnet_ids:
return False
core_plugin = manager.NeutronManager.get_plugin()
# NOTE(swami):Before checking for existence of dvr
# serviceable ports on the host managed by the l3
# agent, let's verify if at least one subnet has
# dhcp enabled. If so, then the host will have a
# dvr serviceable port, which is in fact the DHCP
# port.
# This optimization is valid assuming that the L3
# DVR_SNAT node will be the one hosting the DHCP
# Agent.
agent_conf = self.get_configuration_dict(l3_agent)
agent_mode = agent_conf.get(constants.L3_AGENT_MODE,
constants.L3_AGENT_MODE_LEGACY)
for subnet_id in subnet_ids:
subnet_dict = core_plugin.get_subnet(context, subnet_id)
if (subnet_dict['enable_dhcp'] and (
agent_mode == constants.L3_AGENT_MODE_DVR_SNAT)):
return True
filter = {'fixed_ips': {'subnet_id': subnet_ids}}
ports = core_plugin.get_ports(context, filters=filter)
for port in ports:
if (n_utils.is_dvr_serviced(port['device_owner']) and
l3_agent['host'] == port['binding:host_id']):
return True
return False
def get_snat_candidates(self, sync_router, l3_agents):
"""Get the valid snat enabled l3 agents for the distributed router."""
candidates = []
is_router_distributed = sync_router.get('distributed', False)
if not is_router_distributed:
return candidates
for l3_agent in l3_agents:
if not l3_agent.admin_state_up:
continue
agent_conf = self.get_configuration_dict(l3_agent)
agent_mode = agent_conf.get(constants.L3_AGENT_MODE,
constants.L3_AGENT_MODE_LEGACY)
if agent_mode != constants.L3_AGENT_MODE_DVR_SNAT:
continue
router_id = agent_conf.get('router_id', None)
use_namespaces = agent_conf.get('use_namespaces', True)
if not use_namespaces and router_id != sync_router['id']:
continue
handle_internal_only_routers = agent_conf.get(
'handle_internal_only_routers', True)
gateway_external_network_id = agent_conf.get(
'gateway_external_network_id', None)
ex_net_id = (sync_router['external_gateway_info'] or {}).get(
'network_id')
if ((not ex_net_id and not handle_internal_only_routers) or
(ex_net_id and gateway_external_network_id and
ex_net_id != gateway_external_network_id)):
continue
candidates.append(l3_agent)
return candidates
def get_l3_agent_candidates(self, context, sync_router, l3_agents,
ignore_admin_state=False):
"""Get the valid l3 agents for the router from a list of l3_agents."""
candidates = []
for l3_agent in l3_agents:
if not ignore_admin_state and not l3_agent.admin_state_up:
# ignore_admin_state True comes from manual scheduling
# where admin_state_up judgement is already done.
continue
agent_conf = self.get_configuration_dict(l3_agent)
router_id = agent_conf.get('router_id', None)
use_namespaces = agent_conf.get('use_namespaces', True)
handle_internal_only_routers = agent_conf.get(
'handle_internal_only_routers', True)
gateway_external_network_id = agent_conf.get(
'gateway_external_network_id', None)
agent_mode = agent_conf.get(constants.L3_AGENT_MODE,
constants.L3_AGENT_MODE_LEGACY)
if not use_namespaces and router_id != sync_router['id']:
continue
ex_net_id = (sync_router['external_gateway_info'] or {}).get(
'network_id')
if ((not ex_net_id and not handle_internal_only_routers) or
(ex_net_id and gateway_external_network_id and
ex_net_id != gateway_external_network_id)):
continue
is_router_distributed = sync_router.get('distributed', False)
if agent_mode in (
constants.L3_AGENT_MODE_LEGACY,
constants.L3_AGENT_MODE_DVR_SNAT) and (
not is_router_distributed):
candidates.append(l3_agent)
elif is_router_distributed and agent_mode.startswith(
constants.L3_AGENT_MODE_DVR) and (
self.check_ports_exist_on_l3agent(
context, l3_agent, sync_router['id'])):
candidates.append(l3_agent)
return candidates
def auto_schedule_routers(self, context, host, router_ids):
if self.router_scheduler:
return self.router_scheduler.auto_schedule_routers(
self, context, host, router_ids)
def schedule_router(self, context, router, candidates=None):
if self.router_scheduler:
return self.router_scheduler.schedule(
self, context, router, candidates=candidates)
def schedule_routers(self, context, routers):
"""Schedule the routers to l3 agents."""
for router in routers:
self.schedule_router(context, router, candidates=None)
def get_l3_agent_with_min_routers(self, context, agent_ids):
"""Return l3 agent with the least number of routers."""
if not agent_ids:
return None
query = context.session.query(
agents_db.Agent,
func.count(
RouterL3AgentBinding.router_id
).label('count')).outerjoin(RouterL3AgentBinding).group_by(
RouterL3AgentBinding.l3_agent_id).order_by('count')
res = query.filter(agents_db.Agent.id.in_(agent_ids)).first()
return res[0]
|
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Tickformatstop(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.ternary.baxis"
_path_str = "layout.ternary.baxis.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
# dtickrange
# ----------
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
# enabled
# -------
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# value
# -----
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.ternary
.baxis.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super(Tickformatstop, self).__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.ternary.baxis.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.ternary.baxis.Tickformatstop`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dtickrange", None)
_v = dtickrange if dtickrange is not None else _v
if _v is not None:
self["dtickrange"] = _v
_v = arg.pop("enabled", None)
_v = enabled if enabled is not None else _v
if _v is not None:
self["enabled"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
#!/usr/bin/env python
##
## Copyright 2009 Adriana Lukas & Alec Muffett
##
## Licensed under the Apache License, Version 2.0 (the "License"); you
## may not use this file except in compliance with the License. You
## may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
## implied. See the License for the specific language governing
## permissions and limitations under the License.
##
"""
http://docs.djangoproject.com/en/dev/howto/static-files/ says:
<QUOTE>Django itself doesn't serve static (media) files, such as
images, style sheets, or video. It leaves that job to whichever Web
server you choose. The reasoning here is that standard Web servers,
such as Apache, lighttpd and Cherokee, are much more fine-tuned at
serving static files than a Web application framework.</QUOTE>
...which is fine, but which doesn't actually help us when Django is
being used to implement potentially hundreds of mini-websites with
their own novel forms of authentication and where you don't want
management overhead of keeping (documenting?) how to synchronise their
authentication needs with [INSERT NAME OF PREFERRED WEBSERVER DU
JOUR].
See also: http://code.djangoproject.com/ticket/2131#comment:2
<QUOTE>Django isn't meant to serve static files, so I'm marking this
as a wontfix.</QUOTE>
So in the face of those wanting to nanny us into "proper behaviour",
regrettably we have to roll our own.
We are allowed to take the performance hit, because the point is to
have "one mine per user spattered all over the world" rather than
"bazillion mines all at one hosting company which subsequently suffers
performance issues".
"""
from django.conf import settings
from django.http import HttpResponseNotFound, HttpResponse, HttpResponseRedirect
from django.core.servers.basehttp import FileWrapper
from django.shortcuts import render_to_response
import os
import re
import pymine.util.mimestuff as mimestuff
##################################################################
def cleanpath(old):
# fasttrack root
if not old or (old == '/'):
return ''
# fault anything with a "?" - we don't do those
if old.find('?') >= 0:
raise RuntimeError, "string contains query data: %s" % old
# is there a trailing slash?
if old.endswith('/'):
has_trailing = True
else:
has_trailing = False
# split on slash/multislash
elements = re.split('/+', old)
if not elements: return ''
# delete any trailing empty element
if elements[-1] == '':
elements.pop(-1) # explicit
if not elements: return ''
# delete any element which is "."
elements = [ x for x in elements if x != '.' ]
if not elements: return ''
# for any element which is "..", delete it and its previous
output = []
while elements:
x = elements.pop(0)
if x == '..':
if output:
output.pop()
else:
output.append(x)
elements = output
if not elements: return ''
# result
new = "/".join(elements)
# reappend trailing slash
if has_trailing:
new += '/'
# join
return new
##################################################################
def httpserve_error(url_path):
return HttpResponseNotFound(url_path)
##################################################################
def httpserve_file(file_path, content_type):
fw = FileWrapper(file(file_path), 65536)
if not content_type:
content_type, encoding = mimestuff.lookup.guess_type(file_path)
response = HttpResponse(fw, content_type=content_type)
response['Content-Length'] = os.path.getsize(file_path)
return response
##################################################################
def httpserve_directory(file_path):
s = {
'name' : 'insert dirname here',
'list' : [],
}
list = s['list']
files = os.listdir(file_path)
files.sort()
for file in files:
if file.startswith("."):
continue
if os.path.isdir(os.path.join(file_path, file)):
list.append(dict(name=file,
link='%s/' % file,
description='[dir]'))
else:
list.append(dict(name=file,
link=file,
description=''))
return render_to_response('list/directory.html', s)
##################################################################
def httpserve_path(request, orig_path):
# clean the url
url_path = cleanpath(orig_path)
# if the cleaning has changed anything, force user to go around again
if url_path != orig_path:
redirect_path = "/pub/%s" % url_path
return HttpResponseRedirect(redirect_path)
# if there's a url_path
if url_path == '':
# there's no orig_path
file_path = settings.MEDIA_ROOT
else:
# split the url_path into components
elements = url_path.split('/')
# compose a file path
file_path = os.path.join(settings.MEDIA_ROOT, *elements)
# if it's a directory
if os.path.isdir(file_path):
# redirect if a trailing slash is missing
if not (url_path == '' or url_path.endswith('/')):
redirect_path = "/pub/%s/" % url_path
return HttpResponseRedirect(redirect_path)
# look for an index
for i in ('index.html', 'index.htm', 'INDEX.HTML', 'INDEX.HTM'):
ifile = os.path.join(file_path, i)
if os.path.isfile(ifile):
return httpserve_file(ifile, None)
# else raw directory
return httpserve_directory(file_path)
# if it's a file
elif os.path.isfile(file_path):
return httpserve_file(file_path, None)
# if it's an other
return httpserve_error(url_path)
##################################################################
|
|
import select
import urllib.parse
import socket
import http.client
import time
import os
import utils
import ssl
class Connection():
def __init__(self, sock):
self.sock = sock
self.create_time = time.time()
def close(self):
self.sock.close()
class BaseResponse(object):
def __init__(self, status=601, reason=b"", headers={}, body=b""):
self.status = status
self.reason = reason
self.headers = {}
for key in headers:
if isinstance(key, tuple):
key, value = key
else:
value = headers[key]
key = key.title()
self.headers[key] = value
self.text = body
def getheader(self, key, default_value=b""):
key = key.title()
if key in self.headers:
return self.headers[key]
else:
return default_value
class TxtResponse(BaseResponse):
def __init__(self, buffer):
BaseResponse.__init__(self)
if isinstance(buffer, memoryview):
self.view = buffer
self.read_buffer = buffer.tobytes()
elif isinstance(buffer, str):
self.read_buffer = utils.to_bytes(buffer)
self.view = memoryview(self.read_buffer)
elif isinstance(buffer, bytes):
self.read_buffer = buffer
self.view = memoryview(buffer)
else:
raise Exception("TxtResponse error")
self.buffer_start = 0
self.parse()
def read_line(self):
n1 = self.read_buffer.find(b"\r\n", self.buffer_start)
if n1 == -1:
raise Exception("read_line fail")
line = self.read_buffer[self.buffer_start:n1]
self.buffer_start = n1 + 2
return line
def read_headers(self):
n1 = self.read_buffer.find(b"\r\n\r\n", self.buffer_start)
if n1 == -1:
raise Exception("read_headers fail")
block = self.read_buffer[self.buffer_start:n1]
self.buffer_start = n1 + 4
return block
def parse(self):
requestline = self.read_line()
words = requestline.split()
if len(words) < 2:
raise Exception("status line:%s" % requestline)
self.version = words[0]
self.status = int(words[1])
self.info = b" ".join(words[2:])
self.headers = {}
header_block = self.read_headers()
lines = header_block.split(b"\r\n")
for line in lines:
p = line.find(b":")
key = line[0:p]
value = line[p+2:]
key = str(key.title())
self.headers[key] = value
self.body = self.view[self.buffer_start:]
self.read_buffer = b""
self.buffer_start = 0
class Response(BaseResponse):
def __init__(self, ssl_sock):
BaseResponse.__init__(self)
self.connection = ssl_sock
ssl_sock.settimeout(1)
self.read_buffer = b""
self.buffer_start = 0
self.chunked = False
def read_line(self, timeout=60):
start_time = time.time()
sock = self.connection
sock.setblocking(0)
try:
while True:
n1 = self.read_buffer.find(b"\r\n", self.buffer_start)
if n1 > -1:
line = self.read_buffer[self.buffer_start:n1]
self.buffer_start = n1 + 2
return line
if time.time() - start_time > timeout:
raise socket.timeout()
time.sleep(0.001)
try:
data = sock.recv(8192)
except socket.error as e:
# logging.exception("e:%r", e)
if e.errno in [2, 11, 10035]:
#time.sleep(0.1)
time_left = start_time + timeout - time.time()
r, w, e = select.select([sock], [], [], time_left)
continue
else:
raise e
if isinstance(data, int):
continue
if data and len(data):
self.read_buffer += data
finally:
sock.setblocking(1)
def read_headers(self, timeout=60):
start_time = time.time()
sock = self.connection
sock.setblocking(0)
try:
while True:
n1 = self.read_buffer.find(b"\r\n\r\n", self.buffer_start)
if n1 > -1:
block = self.read_buffer[self.buffer_start:n1]
self.buffer_start = n1 + 4
return block
if time.time() - start_time > timeout:
raise socket.timeout()
time.sleep(0.001)
try:
data = sock.recv(8192)
except socket.error as e:
# logging.exception("e:%r", e)
if e.errno in [2, 11, 10035]:
time.sleep(0.1)
continue
else:
raise e
self.read_buffer += data
except Exception as e:
print(e)
finally:
sock.setblocking(1)
def begin(self, timeout=60):
start_time = time.time()
line = self.read_line(500)
requestline = line.rstrip(b'\r\n')
words = requestline.split()
if len(words) < 2:
raise Exception("status line:%s" % requestline)
self.version = words[0]
self.status = int(words[1])
self.reason = b" ".join(words[2:])
self.headers = {}
timeout -= time.time() - start_time
timeout = max(timeout, 0.1)
header_block = self.read_headers(timeout)
lines = header_block.split(b"\r\n")
for line in lines:
p = line.find(b":")
key = line[0:p]
value = line[p+2:]
key = key.title()
self.headers[key] = value
self.content_length = self.getheader(b"content-length", b"")
if b"chunked" in self.getheader(b"Transfer-Encoding", b""):
self.chunked = True
self.chunk_list = []
if b"gzip" in self.getheader(b"Transfer-Encoding", b""):
print("not work")
def _read_plain(self, read_len, timeout):
if read_len == 0:
return ""
#elif read_len > 0:
# return self._read_size(read_len, timeout)
if read_len is not None and len(self.read_buffer) - self.buffer_start > read_len:
out_str = self.read_buffer[self.buffer_start:self.buffer_start + read_len]
self.buffer_start += read_len
if len(self.read_buffer) == self.buffer_start:
self.read_buffer = b""
self.buffer_start = 0
return out_str
self.connection.setblocking(0)
start_time = time.time()
out_len = len(self.read_buffer) - self.buffer_start
out_list = [ self.read_buffer[self.buffer_start:] ]
self.read_buffer = b""
self.buffer_start = 0
while time.time() - start_time < timeout:
if not read_len and out_len > 0:
break
if read_len and out_len >= read_len:
break
if read_len:
to_read = read_len - out_len
to_read = min(to_read, 65535)
else:
to_read = 65535
try:
data = self.connection.recv(to_read)
except socket.error as e:
# logging.exception("e:%r", e)
if e.errno in [2, 11, 10035]:
#time.sleep(0.1)
time_left = start_time + timeout - time.time()
r, w, e = select.select([self.connection], [], [], time_left)
continue
else:
raise e
if data:
out_list.append(data)
out_len += len(data)
if read_len is not None and out_len < read_len:
raise socket.timeout()
return b"".join(out_list)
def _read_size(self, read_len, timeout):
if len(self.read_buffer) - self.buffer_start > read_len:
buf = memoryview(self.read_buffer)
out_str = buf[self.buffer_start:self.buffer_start + read_len]
self.buffer_start += read_len
if len(self.read_buffer) == self.buffer_start:
self.read_buffer = b""
self.buffer_start = 0
return out_str
self.connection.setblocking(0)
start_time = time.time()
out_len = len(self.read_buffer) - self.buffer_start
out_bytes = bytearray(read_len)
view = memoryview(out_bytes)
view[0:out_len] = self.read_buffer[self.buffer_start:]
self.read_buffer = b""
self.buffer_start = 0
while time.time() - start_time < timeout:
if out_len >= read_len:
break
to_read = read_len - out_len
to_read = min(to_read, 65535)
try:
nbytes = self.connection.recv_into(view[out_len:], to_read)
except socket.error as e:
# logging.exception("e:%r", e)
if e.errno in [2, 11, 10035]:
# time.sleep(0.1)
time_left = start_time + timeout - time.time()
r, w, e = select.select([self.connection], [], [], time_left)
continue
else:
raise e
out_len += nbytes
if out_len < read_len:
raise socket.timeout()
return out_bytes
def _read_chunked(self, timeout):
line = self.read_line(timeout)
chunk_size = int(line, 16)
dat = self._read_plain(chunk_size + 2, timeout)
return dat[:-2]
def read(self, read_len=None, timeout=60):
#if not read_len and self.content_length is not None:
# read_len = int(self.content_length)
if not self.chunked:
data = self._read_plain(read_len, timeout)
else:
data = self._read_chunked(timeout)
return data
def readall(self, timeout=60):
start_time = time.time()
if self.chunked:
out_list = []
while True:
time_left = timeout - (time.time() - start_time)
if time_left < 0:
raise socket.timeout()
dat = self._read_chunked(time_left)
if not dat:
break
out_list.append(dat)
return b"".join(out_list)
else:
return self._read_plain(int(self.content_length), timeout=timeout)
class Client(object):
def __init__(self, proxy=None, timeout=60, cert=""):
self.timeout = timeout
self.cert = cert
self.connection = None
self.host = None
self.port = None
self.tls = None
if isinstance(proxy, str):
proxy_sp = urllib.parse.urlsplit(proxy)
self.proxy = {
"type": proxy_sp.scheme,
"host": proxy_sp.hostname,
"port": proxy_sp.port,
"user": proxy_sp.username,
"pass": proxy_sp.password
}
elif isinstance(proxy, dict):
self.proxy = proxy
else:
self.proxy = None
def direct_connect(self, host, port):
connect_timeout = 30
if b':' in host:
info = [(socket.AF_INET6, socket.SOCK_STREAM, 0, "", (host, port, 0, 0))]
elif utils.check_ip_valid4(host):
info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", (host, port))]
else:
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM)
except socket.gaierror:
info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", (host, port))]
for res in info:
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 32 * 1024)
s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, True)
s.settimeout(connect_timeout)
s.connect((host, port))
return s
except socket.error:
if s:
s.close()
return None
def connect(self, host, port, tls):
if self.connection and host == self.host and port == self.port and self.tls == tls:
return self.connection
if not self.proxy:
sock = self.direct_connect(host, port)
else:
connect_timeout = 5
import socks
sock = socks.socksocket(socket.AF_INET)
sock.set_proxy(proxy_type=self.proxy["type"],
addr=self.proxy["host"],
port=self.proxy["port"], rdns=True,
username=self.proxy["user"],
password=self.proxy["pass"])
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 32*1024)
sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, True)
sock.settimeout(connect_timeout)
sock.connect((host, port))
# conn_time = time.time() - start_time
# xlog.debug("proxy:%s tcp conn:%s time:%d", proxy["host"], host, conn_time * 1000)
if tls:
if os.path.isfile(self.cert):
sock = ssl.wrap_socket(sock, ca_certs=self.cert)
else:
sock = ssl.wrap_socket(sock)
self.connection = sock
self.host = host
self.port = port
self.tls = tls
return sock
def request(self, method, url, headers={}, body=b"", read_payload=True):
method = utils.to_bytes(method)
url = utils.to_bytes(url)
upl = urllib.parse.urlsplit(url)
headers[b"Content-Length"] = str(len(body))
headers[b"Host"] = upl.netloc
port = upl.port
if not port:
if upl.scheme == b"http":
port = 80
elif upl.scheme == b"https":
port = 443
else:
raise Exception("unknown method:%s" % upl.scheme)
path = upl.path
if not path:
path = b"/"
if upl.query:
path += b"?" + upl.query
sock = self.connect(upl.hostname, port, upl.scheme == b"https")
if not sock:
return None
request_data = b'%s %s HTTP/1.1\r\n' % (method, path)
for k, v in headers.items():
if isinstance(v, int):
request_data += b'%s: %d\r\n' % (utils.to_bytes(k), v)
else:
request_data += b'%s: %s\r\n' % (utils.to_bytes(k), utils.to_bytes(v))
request_data += b'\r\n'
body = utils.to_bytes(body)
if len(request_data) + len(body) < 1300:
body = request_data + body
else:
sock.send(request_data)
payload_len = len(body)
start = 0
while start < payload_len:
send_size = min(payload_len - start, 65535)
sended = sock.send(body[start:start + send_size])
start += sended
sock.settimeout(self.timeout)
response = Response(sock)
response.begin(timeout=self.timeout)
if response.status != 200:
#logging.warn("status:%r", response.status)
return response
if not read_payload:
return response
if b'Transfer-Encoding' in response.headers:
data_buffer = []
while True:
try:
data = response.read(8192, timeout=self.timeout)
except http.client.IncompleteRead as e:
data = e.partial
except Exception as e:
raise e
if not data:
break
else:
data_buffer.append(data)
response.text = b"".join(data_buffer)
return response
else:
content_length = int(response.getheader(b'Content-Length', 0))
if content_length:
response.text = response.read(content_length, timeout=self.timeout)
return response
def request(method="GET", url=None, headers={}, body="", proxy=None, timeout=60, read_payload=True):
if not url:
raise Exception("no url")
client = Client(proxy, timeout=timeout)
return client.request(method, url, headers, body, read_payload)
|
|
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:5432")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:5432")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Renamedcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Renamedcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import Permission, Group, User
from django.contrib.auth import authenticate, login as django_login, logout as django_logout
from django.contrib.contenttypes.models import ContentType
from django.shortcuts import render, redirect
from django.db.models import Q
from django.views.generic import View
from django.contrib.auth.models import AnonymousUser, User
from django.http import HttpResponseForbidden
from .forms import PermissionForm, GroupForm, UserGroupForm
from .utils import AdminLTEBaseView, AdminMenu, Pager
class IndexView(View):
template_name = 'adminlte/index.html'
login_required = True
menu = None
permission = None
@staticmethod
def _default_is_login_func(request):
return not isinstance(request.user, AnonymousUser) and request.user.is_staff
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if getattr(self, 'login_required', True):
is_login = getattr(settings, 'ADMINLTE_IS_LOGIN_FUNC', self._default_is_login_func)
login_view = getattr(settings, 'ADMINLTE_LOGIN_VIEW', 'adminlte.login')
if not is_login(request):
return redirect(login_view)
if self.permission:
if not request.user.has_perm(self.permission):
return HttpResponseForbidden()
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
class LogoutView(AdminLTEBaseView):
def get(self, request, *args, **kwargs):
django_logout(request)
return redirect(getattr(settings, 'ADMINLTE_LOGIN_VIEW', 'adminlte.login'))
class LoginView(AdminLTEBaseView):
login_required = False
def get(self, request, *args, **kwargs):
return render(request, 'adminlte/login.html')
def post(self, request, *args, **kwargs):
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password, type=1)
if not user or not user.is_staff:
message = 'user name or password error'
return render(request, 'adminlte/login.html', context={
"message": message
})
django_login(request, user)
return redirect('adminlte.index')
permission_group_menu = AdminMenu(name="Permissions", icon_classes='fa-lock')
class PermissionsView(AdminLTEBaseView):
permission = 'admin'
menu = AdminMenu('Permissions', parent_menu=permission_group_menu)
def get(self, request, *args, **kwargs):
search = request.GET.get('search', '')
query = Permission.objects.all()
if search:
query = query.filter(
Q(name__contains=search) | Q(codename__contains=search))
pager = Pager.from_request(query, request)
return render(request, 'adminlte/permissions/index.html', context={
"pager": pager,
"search": search,
})
class PermissionsCreateView(AdminLTEBaseView):
permission = 'admin'
menu = AdminMenu('Add Permission', parent_menu=permission_group_menu)
template_name = 'adminlte/permissions/edit.html'
def post(self, request, *args, **kwargs):
form = PermissionForm(request.POST)
if form.is_valid():
Permission.objects.create(codename=form.cleaned_data['codename'],
name=form.cleaned_data['name'],
content_type=ContentType.objects.get(model='permission'))
messages.add_message(request, messages.SUCCESS, 'add success')
return redirect('adminlte.permissions')
messages.add_message(request, messages.ERROR, 'params error')
return render(request, self.template_name)
class PermissionDeleteView(AdminLTEBaseView):
permission = 'admin'
_regex_name = '^permissions/(?P<pk>[0-9]+)/delete$'
def get(self, request, pk, *args, **kwargs):
permission = Permission.objects.get(pk=pk)
permission.delete()
messages.add_message(request, messages.SUCCESS, 'delete success')
return redirect('adminlte.permissions')
class PermissionEditView(AdminLTEBaseView):
permission = 'admin'
_regex_name = '^permissions/(?P<pk>[0-9]+)/edit$'
template_name = 'adminlte/permissions/edit.html'
def get(self, request, pk, *args, **kwargs):
permission = Permission.objects.get(pk=pk)
return render(request, self.template_name, context={
"permission": permission
})
def post(self, request, pk, *args, **kwargs):
form = PermissionForm(request.POST)
if form.is_valid():
permission = Permission.objects.get(pk=pk)
permission.codename = form.cleaned_data['codename']
permission.name = form.cleaned_data['name']
permission.save()
messages.add_message(request, messages.SUCCESS, 'edit success')
return redirect('adminlte.permissions')
messages.add_message(request, messages.ERROR, 'params error')
return render(request, self.template_name)
class GroupsView(AdminLTEBaseView):
permission = 'admin'
menu = AdminMenu('Groups', parent_menu=permission_group_menu)
def get(self, request, *args, **kwargs):
search = request.GET.get('search', '')
query = Group.objects.all()
if search:
query = query.filter(name__contains=search)
pager = Pager.from_request(query, request)
return render(request, 'adminlte/groups/index.html', context={
"pager": pager,
"search": search
})
class GroupCreateView(AdminLTEBaseView):
permission = 'admin'
menu = AdminMenu('Add Group', parent_menu=permission_group_menu)
template_name = 'adminlte/groups/edit.html'
def post(self, request, *args, **kwargs):
form = GroupForm(request.POST)
if form.is_valid():
Group.objects.create(name=form.cleaned_data['name'])
messages.add_message(request, messages.SUCCESS, 'add success')
return redirect('adminlte.groups')
messages.add_message(request, messages.ERROR, 'params error')
return render(request, self.template_name)
class GroupDeleteView(AdminLTEBaseView):
permission = 'admin'
_regex_name = '^groups/(?P<pk>[0-9]+)/delete$'
def get(self, request, pk, *args, **kwargs):
group = Group.objects.get(pk=pk)
group.delete()
messages.add_message(request, messages.SUCCESS, 'delete success')
return redirect('adminlte.groups')
class GroupEditView(AdminLTEBaseView):
permission = 'admin'
_regex_name = '^groups/(?P<pk>[0-9]+)/edit$'
template_name = 'adminlte/groups/edit.html'
def get(self, request, pk, *args, **kwargs):
group = Group.objects.get(pk=pk)
user_permissions = group.permissions.all()
permissions = Permission.objects.all()
return render(request, self.template_name, context={
"group": group,
"permissions": permissions,
"user_permissions": user_permissions,
})
def post(self, request, pk, *args, **kwargs):
form = GroupForm(request.POST)
if form.is_valid():
group = Group.objects.get(pk=pk)
group.name = form.cleaned_data['name']
permissions = form.cleaned_data['permissions']
delete_permissions = set(group.permissions.all()) - set(permissions)
add_permissions = set(permissions) - set(group.permissions.all())
for permission in delete_permissions:
group.permissions.remove(permission)
for permission in add_permissions:
group.permissions.add(permission)
group.save()
messages.add_message(request, messages.SUCCESS, 'edit success')
return redirect('adminlte.group.edit', pk=group.id)
messages.add_message(request, messages.ERROR, 'params error')
return render(request, self.template_name)
class UserGroupsView(AdminLTEBaseView):
permission = 'admin'
menu = AdminMenu('User groups', parent_menu=permission_group_menu)
def get(self, request, *args, **kwargs):
search = request.GET.get('search', '')
query = User.objects.all()
if search:
query = query.filter(
Q(username__contains=search) | Q(email__contains=search))
pager = Pager.from_request(query, request)
return render(request, 'adminlte/user_groups/index.html', context={
"pager": pager,
"search": search
})
class UserGroupEditView(AdminLTEBaseView):
permission = 'admin'
_regex_name = '^user_groups/(?P<pk>[0-9]+)/edit$'
template_name = 'adminlte/user_groups/edit.html'
def get(self, request, pk, *args, **kwargs):
user = User.objects.get(pk=pk)
user_groups = user.groups.all()
groups = Group.objects.all()
return render(request, self.template_name, context={
"user": user,
"user_groups": user_groups,
"groups": groups,
})
def post(self, request, pk, *args, **kwargs):
form = UserGroupForm(request.POST)
user = User.objects.get(pk=pk)
if form.is_valid():
groups = form.cleaned_data['groups']
delete_groups = set(user.groups.all()) - set(groups)
add_groups = set(groups) - set(user.groups.all())
for group in delete_groups:
user.groups.remove(group)
for group in add_groups:
user.groups.add(group)
user.save()
messages.add_message(request, messages.SUCCESS, 'edit success')
return redirect('adminlte.user.group.edit', pk=user.id)
messages.add_message(request, messages.ERROR, 'params error')
return redirect('adminlte.user.group.edit', pk=user.id)
|
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
r"""
autosummary_generate.py OPTIONS FILES
Generate automatic RST source files for items referred to in
autosummary:: directives.
Each generated RST file contains a single auto*:: directive which
extracts the docstring of the referred item.
Example Makefile rule::
generate:
./ext/autosummary_generate.py -o source/generated source/*.rst
"""
from __future__ import print_function
import glob
import re
import inspect
import os
import optparse
import pydoc
from autosummary import import_by_name
try:
from phantom_import import import_phantom_module
except ImportError:
import_phantom_module = lambda x: x
def main():
p = optparse.OptionParser(__doc__.strip())
p.add_option("-p", "--phantom", action="store", type="string",
dest="phantom", default=None,
help="Phantom import modules from a file")
p.add_option("-o", "--output-dir", action="store", type="string",
dest="output_dir", default=None,
help=("Write all output files to the given directory (instead "
"of writing them as specified in the autosummary:: "
"directives)"))
options, args = p.parse_args()
if len(args) == 0:
p.error("wrong number of arguments")
if options.phantom and os.path.isfile(options.phantom):
import_phantom_module(options.phantom)
# read
names = {}
for name, loc in list(get_documented(args).items()):
for (filename, sec_title, keyword, toctree) in loc:
if toctree is not None:
path = os.path.join(os.path.dirname(filename), toctree)
names[name] = os.path.abspath(path)
# write
for name, path in sorted(names.items()):
if options.output_dir is not None:
path = options.output_dir
if not os.path.isdir(path):
os.makedirs(path)
try:
obj, name = import_by_name(name)
except ImportError as e:
print("Failed to import '%s': %s" % (name, e))
continue
fn = os.path.join(path, '%s.rst' % name)
if os.path.exists(fn):
# skip
continue
f = open(fn, 'w')
try:
f.write('%s\n%s\n\n' % (name, '=' * len(name)))
if inspect.isclass(obj):
if issubclass(obj, Exception):
f.write(format_modulemember(name, 'autoexception'))
else:
f.write(format_modulemember(name, 'autoclass'))
elif inspect.ismodule(obj):
f.write(format_modulemember(name, 'automodule'))
elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj):
f.write(format_classmember(name, 'automethod'))
elif callable(obj):
f.write(format_modulemember(name, 'autofunction'))
elif hasattr(obj, '__get__'):
f.write(format_classmember(name, 'autoattribute'))
else:
f.write(format_modulemember(name, 'autofunction'))
finally:
f.close()
def format_modulemember(name, directive):
parts = name.split('.')
mod, name = '.'.join(parts[:-1]), parts[-1]
return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name)
def format_classmember(name, directive):
parts = name.split('.')
mod, name = '.'.join(parts[:-2]), '.'.join(parts[-2:])
return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name)
def get_documented(filenames):
"""
Find out what items are documented in source/*.rst
See `get_documented_in_lines`.
"""
documented = {}
for filename in filenames:
f = open(filename, 'r')
lines = f.read().splitlines()
documented.update(get_documented_in_lines(lines, filename=filename))
f.close()
return documented
def get_documented_in_docstring(name, module=None, filename=None):
"""
Find out what items are documented in the given object's docstring.
See `get_documented_in_lines`.
"""
try:
obj, real_name = import_by_name(name)
lines = pydoc.getdoc(obj).splitlines()
return get_documented_in_lines(lines, module=name, filename=filename)
except AttributeError:
pass
except ImportError as e:
print("Failed to import '%s': %s" % (name, e))
return {}
def get_documented_in_lines(lines, module=None, filename=None):
"""
Find out what items are documented in the given lines
Returns
-------
documented : dict of list of (filename, title, keyword, toctree)
Dictionary whose keys are documented names of objects.
The value is a list of locations where the object was documented.
Each location is a tuple of filename, the current section title,
the name of the directive, and the value of the :toctree: argument
(if present) of the directive.
"""
title_underline_re = re.compile("^[-=*_^#]{3,}\s*$")
autodoc_re = re.compile(".. auto(function|method|attribute|class|exception|module)::\s*([A-Za-z0-9_.]+)\s*$")
autosummary_re = re.compile(r'^\.\.\s+autosummary::\s*')
module_re = re.compile(r'^\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$')
autosummary_item_re = re.compile(r'^\s+([_a-zA-Z][a-zA-Z0-9_.]*)\s*.*?')
toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
documented = {}
current_title = []
last_line = None
toctree = None
current_module = module
in_autosummary = False
for line in lines:
try:
if in_autosummary:
m = toctree_arg_re.match(line)
if m:
toctree = m.group(1)
continue
if line.strip().startswith(':'):
continue # skip options
m = autosummary_item_re.match(line)
if m:
name = m.group(1).strip()
if current_module and not name.startswith(current_module + '.'):
name = "%s.%s" % (current_module, name)
documented.setdefault(name, []).append(
(filename, current_title, 'autosummary', toctree))
continue
if line.strip() == '':
continue
in_autosummary = False
m = autosummary_re.match(line)
if m:
in_autosummary = True
continue
m = autodoc_re.search(line)
if m:
name = m.group(2).strip()
if m.group(1) == "module":
current_module = name
documented.update(get_documented_in_docstring(
name, filename=filename))
elif current_module and not name.startswith(current_module + '.'):
name = "%s.%s" % (current_module, name)
documented.setdefault(name, []).append(
(filename, current_title, "auto" + m.group(1), None))
continue
m = title_underline_re.match(line)
if m and last_line:
current_title = last_line.strip()
continue
m = module_re.match(line)
if m:
current_module = m.group(2)
continue
finally:
last_line = line
return documented
if __name__ == "__main__":
main()
|
|
"""
LLDB AppKit formatters
part of The LLVM Compiler Infrastructure
This file is distributed under the University of Illinois Open Source
License. See LICENSE.TXT for details.
"""
# example summary provider for NSArray
# the real summary is now C++ code built into LLDB
import lldb
import ctypes
import lldb.runtime.objc.objc_runtime
import lldb.formatters.metrics
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
# much less functional than the other two cases below
# just runs code to get to the count and then returns
# no children
class NSArrayKVC_SynthProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, dict, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def num_children(self):
logger = lldb.formatters.Logger.Logger()
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
num_children_vo = self.valobj.CreateValueFromExpression(
"count", "(int)[" + stream.GetData() + " count]")
if num_children_vo.IsValid():
return num_children_vo.GetValueAsUnsigned(0)
return "<variable is not NSArray>"
# much less functional than the other two cases below
# just runs code to get to the count and then returns
# no children
class NSArrayCF_SynthProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, dict, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not (self.sys_params.types_cache.ulong):
self.sys_params.types_cache.ulong = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def num_children(self):
logger = lldb.formatters.Logger.Logger()
num_children_vo = self.valobj.CreateChildAtOffset(
"count", self.sys_params.cfruntime_size, self.sys_params.types_cache.ulong)
return num_children_vo.GetValueAsUnsigned(0)
class NSArrayI_SynthProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, dict, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.long):
self.sys_params.types_cache.long = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeLong)
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
# skip the isa pointer and get at the size
def num_children(self):
logger = lldb.formatters.Logger.Logger()
count = self.valobj.CreateChildAtOffset(
"count",
self.sys_params.pointer_size,
self.sys_params.types_cache.long)
return count.GetValueAsUnsigned(0)
class NSArrayM_SynthProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, dict, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.long):
self.sys_params.types_cache.long = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeLong)
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
# skip the isa pointer and get at the size
def num_children(self):
logger = lldb.formatters.Logger.Logger()
count = self.valobj.CreateChildAtOffset(
"count",
self.sys_params.pointer_size,
self.sys_params.types_cache.long)
return count.GetValueAsUnsigned(0)
# this is the actual synth provider, but is just a wrapper that checks
# whether valobj is an instance of __NSArrayI or __NSArrayM and sets up an
# appropriate backend layer to do the computations
class NSArray_SynthProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, dict):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.adjust_for_architecture()
self.error = False
self.wrapper = self.make_wrapper()
self.invalid = (self.wrapper is None)
def num_children(self):
logger = lldb.formatters.Logger.Logger()
if self.wrapper is None:
return 0
return self.wrapper.num_children()
def update(self):
logger = lldb.formatters.Logger.Logger()
if self.wrapper is None:
return
self.wrapper.update()
# this code acts as our defense against NULL and uninitialized
# NSArray pointers, which makes it much longer than it would be otherwise
def make_wrapper(self):
logger = lldb.formatters.Logger.Logger()
if self.valobj.GetValueAsUnsigned() == 0:
self.error = True
return lldb.runtime.objc.objc_runtime.InvalidPointer_Description(
True)
else:
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
self.valobj, statistics)
if wrapper:
self.error = True
return wrapper
name_string = class_data.class_name()
logger >> "Class name is " + str(name_string)
if name_string == '__NSArrayI':
wrapper = NSArrayI_SynthProvider(
self.valobj, dict, class_data.sys_params)
statistics.metric_hit('code_notrun', self.valobj.GetName())
elif name_string == '__NSArrayM':
wrapper = NSArrayM_SynthProvider(
self.valobj, dict, class_data.sys_params)
statistics.metric_hit('code_notrun', self.valobj.GetName())
elif name_string == '__NSCFArray':
wrapper = NSArrayCF_SynthProvider(
self.valobj, dict, class_data.sys_params)
statistics.metric_hit('code_notrun', self.valobj.GetName())
else:
wrapper = NSArrayKVC_SynthProvider(
self.valobj, dict, class_data.sys_params)
statistics.metric_hit(
'unknown_class', str(
self.valobj.GetName()) + " seen as " + name_string)
return wrapper
def CFArray_SummaryProvider(valobj, dict):
logger = lldb.formatters.Logger.Logger()
provider = NSArray_SynthProvider(valobj, dict)
if not provider.invalid:
if provider.error:
return provider.wrapper.message()
try:
summary = int(provider.num_children())
except:
summary = None
logger >> "provider gave me " + str(summary)
if summary is None:
summary = '<variable is not NSArray>'
elif isinstance(summary, basestring):
pass
else:
# we format it like it were a CFString to make it look the same as
# the summary from Xcode
summary = '@"' + str(summary) + \
(" objects" if summary != 1 else " object") + '"'
return summary
return 'Summary Unavailable'
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F CFArray.CFArray_SummaryProvider NSArray CFArrayRef CFMutableArrayRef")
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests For miscellaneous util methods used with volume."""
import datetime
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from cinder import context
from cinder import exception
from cinder import test
from cinder import utils
from cinder.volume import throttling
from cinder.volume import utils as volume_utils
CONF = cfg.CONF
class NotifyUsageTestCase(test.TestCase):
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_volume_usage(self, mock_rpc, mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_volume_usage(mock.sentinel.context,
mock.sentinel.volume,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume)
mock_rpc.get_notifier.assert_called_once_with('volume', 'host1')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'volume.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_volume_usage_with_kwargs(self, mock_rpc, mock_conf,
mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_volume_usage(
mock.sentinel.context,
mock.sentinel.volume,
'test_suffix',
extra_usage_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume, a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('volume', 'host2')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'volume.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_replication_usage(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_replication_usage(
mock.sentinel.context,
mock.sentinel.volume,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume)
mock_rpc.get_notifier.assert_called_once_with('replication', 'host1')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'replication.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_replication_usage_with_kwargs(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_replication_usage(
mock.sentinel.context,
mock.sentinel.volume,
'test_suffix',
extra_usage_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume,
a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('replication', 'host2')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'replication.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_replication_error(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_replication_error(
mock.sentinel.context,
mock.sentinel.volume,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume)
mock_rpc.get_notifier.assert_called_once_with('replication', 'host1')
mock_rpc.get_notifier.return_value.error.assert_called_once_with(
mock.sentinel.context,
'replication.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_replication_error_with_kwargs(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_replication_error(
mock.sentinel.context,
mock.sentinel.volume,
'test_suffix',
extra_error_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume,
a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('replication', 'host2')
mock_rpc.get_notifier.return_value.error.assert_called_once_with(
mock.sentinel.context,
'replication.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_snapshot')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_snapshot_usage(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_snapshot_usage(
mock.sentinel.context,
mock.sentinel.snapshot,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.snapshot)
mock_rpc.get_notifier.assert_called_once_with('snapshot', 'host1')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'snapshot.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_snapshot')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_snapshot_usage_with_kwargs(self, mock_rpc, mock_conf,
mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_snapshot_usage(
mock.sentinel.context,
mock.sentinel.snapshot,
'test_suffix',
extra_usage_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.snapshot,
a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('snapshot', 'host2')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'snapshot.test_suffix',
mock_usage.return_value)
def test_usage_from_snapshot(self):
raw_snapshot = {
'project_id': '12b0330ec2584a',
'user_id': '158cba1b8c2bb6008e',
'volume': {'availability_zone': 'nova'},
'volume_id': '55614621',
'volume_size': 1,
'id': '343434a2',
'display_name': '11',
'created_at': '2014-12-11T10:10:00',
'status': 'pause',
'deleted': '',
'metadata': {'fake_snap_meta_key': 'fake_snap_meta_value'},
}
usage_info = volume_utils._usage_from_snapshot(raw_snapshot)
expected_snapshot = {
'tenant_id': '12b0330ec2584a',
'user_id': '158cba1b8c2bb6008e',
'availability_zone': 'nova',
'volume_id': '55614621',
'volume_size': 1,
'snapshot_id': '343434a2',
'display_name': '11',
'created_at': '2014-12-11T10:10:00',
'status': 'pause',
'deleted': '',
'metadata': "{'fake_snap_meta_key': 'fake_snap_meta_value'}",
}
self.assertEqual(expected_snapshot, usage_info)
@mock.patch('cinder.db.volume_glance_metadata_get')
@mock.patch('cinder.db.volume_attachment_get_used_by_volume_id')
def test_usage_from_volume(self, mock_attachment, mock_image_metadata):
mock_image_metadata.return_value = {'image_id': 'fake_image_id'}
mock_attachment.return_value = [{'instance_uuid': 'fake_instance_id'}]
raw_volume = {
'project_id': '12b0330ec2584a',
'user_id': '158cba1b8c2bb6008e',
'host': 'fake_host',
'availability_zone': 'nova',
'volume_type_id': 'fake_volume_type_id',
'id': 'fake_volume_id',
'size': 1,
'display_name': 'test_volume',
'created_at': datetime.datetime(2015, 1, 1, 1, 1, 1),
'launched_at': datetime.datetime(2015, 1, 1, 1, 1, 1),
'snapshot_id': None,
'replication_status': None,
'replication_extended_status': None,
'replication_driver_data': None,
'status': 'available',
'volume_metadata': {'fake_metadata_key': 'fake_metadata_value'},
}
usage_info = volume_utils._usage_from_volume(
mock.sentinel.context,
raw_volume)
expected_volume = {
'tenant_id': '12b0330ec2584a',
'user_id': '158cba1b8c2bb6008e',
'host': 'fake_host',
'availability_zone': 'nova',
'volume_type': 'fake_volume_type_id',
'volume_id': 'fake_volume_id',
'size': 1,
'display_name': 'test_volume',
'created_at': '2015-01-01T01:01:01',
'launched_at': '2015-01-01T01:01:01',
'snapshot_id': None,
'replication_status': None,
'replication_extended_status': None,
'replication_driver_data': None,
'status': 'available',
'metadata': {'fake_metadata_key': 'fake_metadata_value'},
'glance_metadata': {'image_id': 'fake_image_id'},
'volume_attachment': [{'instance_uuid': 'fake_instance_id'}],
}
self.assertEqual(expected_volume, usage_info)
@mock.patch('cinder.volume.utils._usage_from_consistencygroup')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_consistencygroup_usage(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_consistencygroup_usage(
mock.sentinel.context,
mock.sentinel.consistencygroup,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.consistencygroup)
mock_rpc.get_notifier.assert_called_once_with('consistencygroup',
'host1')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'consistencygroup.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_consistencygroup')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_consistencygroup_usage_with_kwargs(self, mock_rpc,
mock_conf,
mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_consistencygroup_usage(
mock.sentinel.context,
mock.sentinel.consistencygroup,
'test_suffix',
extra_usage_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.consistencygroup,
a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('consistencygroup',
'host2')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'consistencygroup.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_cgsnapshot')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_cgsnapshot_usage(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_cgsnapshot_usage(
mock.sentinel.context,
mock.sentinel.cgsnapshot,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.cgsnapshot)
mock_rpc.get_notifier.assert_called_once_with('cgsnapshot', 'host1')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'cgsnapshot.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_cgsnapshot')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_cgsnapshot_usage_with_kwargs(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_cgsnapshot_usage(
mock.sentinel.context,
mock.sentinel.cgsnapshot,
'test_suffix',
extra_usage_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.cgsnapshot,
a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('cgsnapshot', 'host2')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'cgsnapshot.test_suffix',
mock_usage.return_value)
def test_usage_from_backup(self):
raw_backup = {
'project_id': '12b0330ec2584a',
'user_id': '158cba1b8c2bb6008e',
'availability_zone': 'nova',
'id': 'fake_id',
'host': 'fake_host',
'display_name': 'test_backup',
'created_at': '2014-12-11T10:10:00',
'status': 'available',
'volume_id': 'fake_volume_id',
'size': 1,
'service_metadata': None,
'service': 'cinder.backup.drivers.swift',
'fail_reason': None,
'parent_id': 'fake_parent_id',
'num_dependent_backups': 0,
}
# Make it easier to find out differences between raw and expected.
expected_backup = raw_backup.copy()
expected_backup['tenant_id'] = expected_backup.pop('project_id')
expected_backup['backup_id'] = expected_backup.pop('id')
usage_info = volume_utils._usage_from_backup(raw_backup)
self.assertEqual(expected_backup, usage_info)
class LVMVolumeDriverTestCase(test.TestCase):
def test_convert_blocksize_option(self):
# Test valid volume_dd_blocksize
bs, count = volume_utils._calculate_count(1024, '10M')
self.assertEqual('10M', bs)
self.assertEqual(103, count)
bs, count = volume_utils._calculate_count(1024, '1xBBB')
self.assertEqual('1M', bs)
self.assertEqual(1024, count)
# Test 'volume_dd_blocksize' with fraction
bs, count = volume_utils._calculate_count(1024, '1.3M')
self.assertEqual('1M', bs)
self.assertEqual(1024, count)
# Test zero-size 'volume_dd_blocksize'
bs, count = volume_utils._calculate_count(1024, '0M')
self.assertEqual('1M', bs)
self.assertEqual(1024, count)
# Test negative 'volume_dd_blocksize'
bs, count = volume_utils._calculate_count(1024, '-1M')
self.assertEqual('1M', bs)
self.assertEqual(1024, count)
# Test non-digital 'volume_dd_blocksize'
bs, count = volume_utils._calculate_count(1024, 'ABM')
self.assertEqual('1M', bs)
self.assertEqual(1024, count)
class OdirectSupportTestCase(test.TestCase):
@mock.patch('cinder.utils.execute')
def test_check_for_odirect_support(self, mock_exec):
output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def')
self.assertTrue(output)
mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc',
'of=/dev/def', 'oflag=direct',
run_as_root=True)
mock_exec.reset_mock()
output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def',
'iflag=direct')
self.assertTrue(output)
mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc',
'of=/dev/def', 'iflag=direct',
run_as_root=True)
@mock.patch('cinder.utils.execute',
side_effect=processutils.ProcessExecutionError)
def test_check_for_odirect_support_error(self, mock_exec):
output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def')
self.assertFalse(output)
mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc',
'of=/dev/def', 'oflag=direct',
run_as_root=True)
class ClearVolumeTestCase(test.TestCase):
@mock.patch('cinder.volume.utils.copy_volume', return_value=None)
@mock.patch('cinder.volume.utils.CONF')
def test_clear_volume_conf(self, mock_conf, mock_copy):
mock_conf.volume_clear = 'zero'
mock_conf.volume_clear_size = 0
mock_conf.volume_dd_blocksize = '1M'
mock_conf.volume_clear_ionice = '-c3'
output = volume_utils.clear_volume(1024, 'volume_path')
self.assertIsNone(output)
mock_copy.assert_called_once_with('/dev/zero', 'volume_path', 1024,
'1M', sync=True,
execute=utils.execute, ionice='-c3',
throttle=None, sparse=False)
@mock.patch('cinder.volume.utils.copy_volume', return_value=None)
@mock.patch('cinder.volume.utils.CONF')
def test_clear_volume_args(self, mock_conf, mock_copy):
mock_conf.volume_clear = 'shred'
mock_conf.volume_clear_size = 0
mock_conf.volume_dd_blocksize = '1M'
mock_conf.volume_clear_ionice = '-c3'
output = volume_utils.clear_volume(1024, 'volume_path', 'zero', 1,
'-c0')
self.assertIsNone(output)
mock_copy.assert_called_once_with('/dev/zero', 'volume_path', 1,
'1M', sync=True,
execute=utils.execute, ionice='-c0',
throttle=None, sparse=False)
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.utils.CONF')
def test_clear_volume_shred(self, mock_conf, mock_exec):
mock_conf.volume_clear = 'shred'
mock_conf.volume_clear_size = 1
mock_conf.volume_clear_ionice = None
output = volume_utils.clear_volume(1024, 'volume_path')
self.assertIsNone(output)
mock_exec.assert_called_once_with(
'shred', '-n3', '-s1MiB', "volume_path", run_as_root=True)
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.utils.CONF')
def test_clear_volume_shred_not_clear_size(self, mock_conf, mock_exec):
mock_conf.volume_clear = 'shred'
mock_conf.volume_clear_size = None
mock_conf.volume_clear_ionice = None
output = volume_utils.clear_volume(1024, 'volume_path')
self.assertIsNone(output)
mock_exec.assert_called_once_with(
'shred', '-n3', "volume_path", run_as_root=True)
@mock.patch('cinder.volume.utils.CONF')
def test_clear_volume_invalid_opt(self, mock_conf):
mock_conf.volume_clear = 'non_existent_volume_clearer'
mock_conf.volume_clear_size = 0
mock_conf.volume_clear_ionice = None
self.assertRaises(exception.InvalidConfigurationValue,
volume_utils.clear_volume,
1024, "volume_path")
class CopyVolumeTestCase(test.TestCase):
@mock.patch('cinder.volume.utils._calculate_count',
return_value=(1234, 5678))
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=True)
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.utils.CONF')
def test_copy_volume_dd_iflag_and_oflag(self, mock_conf, mock_exec,
mock_support, mock_count):
fake_throttle = throttling.Throttle(['fake_throttle'])
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=True, execute=utils.execute,
ionice=None, throttle=fake_throttle)
self.assertIsNone(output)
mock_exec.assert_called_once_with('fake_throttle', 'dd',
'if=/dev/zero',
'of=/dev/null', 'count=5678',
'bs=1234', 'iflag=direct',
'oflag=direct', run_as_root=True)
mock_exec.reset_mock()
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=False, execute=utils.execute,
ionice=None, throttle=fake_throttle)
self.assertIsNone(output)
mock_exec.assert_called_once_with('fake_throttle', 'dd',
'if=/dev/zero',
'of=/dev/null', 'count=5678',
'bs=1234', 'iflag=direct',
'oflag=direct', run_as_root=True)
@mock.patch('cinder.volume.utils._calculate_count',
return_value=(1234, 5678))
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=False)
@mock.patch('cinder.utils.execute')
def test_copy_volume_dd_no_iflag_or_oflag(self, mock_exec,
mock_support, mock_count):
fake_throttle = throttling.Throttle(['fake_throttle'])
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=True, execute=utils.execute,
ionice=None, throttle=fake_throttle)
self.assertIsNone(output)
mock_exec.assert_called_once_with('fake_throttle', 'dd',
'if=/dev/zero',
'of=/dev/null', 'count=5678',
'bs=1234', 'conv=fdatasync',
run_as_root=True)
mock_exec.reset_mock()
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=False, execute=utils.execute,
ionice=None, throttle=fake_throttle)
self.assertIsNone(output)
mock_exec.assert_called_once_with('fake_throttle', 'dd',
'if=/dev/zero',
'of=/dev/null', 'count=5678',
'bs=1234', run_as_root=True)
@mock.patch('cinder.volume.utils._calculate_count',
return_value=(1234, 5678))
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=False)
@mock.patch('cinder.utils.execute')
def test_copy_volume_dd_no_throttle(self, mock_exec, mock_support,
mock_count):
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=True, execute=utils.execute,
ionice=None)
self.assertIsNone(output)
mock_exec.assert_called_once_with('dd', 'if=/dev/zero', 'of=/dev/null',
'count=5678', 'bs=1234',
'conv=fdatasync', run_as_root=True)
@mock.patch('cinder.volume.utils._calculate_count',
return_value=(1234, 5678))
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=False)
@mock.patch('cinder.utils.execute')
def test_copy_volume_dd_with_ionice(self, mock_exec,
mock_support, mock_count):
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=True, execute=utils.execute,
ionice='-c3')
self.assertIsNone(output)
mock_exec.assert_called_once_with('ionice', '-c3', 'dd',
'if=/dev/zero', 'of=/dev/null',
'count=5678', 'bs=1234',
'conv=fdatasync', run_as_root=True)
@mock.patch('cinder.volume.utils._calculate_count',
return_value=(1234, 5678))
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=False)
@mock.patch('cinder.utils.execute')
def test_copy_volume_dd_with_sparse(self, mock_exec,
mock_support, mock_count):
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=True, execute=utils.execute,
sparse=True)
self.assertIsNone(output)
mock_exec.assert_called_once_with('dd', 'if=/dev/zero', 'of=/dev/null',
'count=5678', 'bs=1234',
'conv=fdatasync,sparse',
run_as_root=True)
@mock.patch('cinder.volume.utils._calculate_count',
return_value=(1234, 5678))
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=True)
@mock.patch('cinder.utils.execute')
def test_copy_volume_dd_with_sparse_iflag_and_oflag(self, mock_exec,
mock_support,
mock_count):
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=True, execute=utils.execute,
sparse=True)
self.assertIsNone(output)
mock_exec.assert_called_once_with('dd', 'if=/dev/zero', 'of=/dev/null',
'count=5678', 'bs=1234',
'iflag=direct', 'oflag=direct',
'conv=sparse', run_as_root=True)
class VolumeUtilsTestCase(test.TestCase):
def test_null_safe_str(self):
self.assertEqual('', volume_utils.null_safe_str(None))
self.assertEqual('', volume_utils.null_safe_str(False))
self.assertEqual('', volume_utils.null_safe_str(0))
self.assertEqual('', volume_utils.null_safe_str([]))
self.assertEqual('', volume_utils.null_safe_str(()))
self.assertEqual('', volume_utils.null_safe_str({}))
self.assertEqual('', volume_utils.null_safe_str(set()))
self.assertEqual('a', volume_utils.null_safe_str('a'))
self.assertEqual('1', volume_utils.null_safe_str(1))
self.assertEqual('True', volume_utils.null_safe_str(True))
@mock.patch('cinder.utils.get_root_helper')
@mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning')
def test_supports_thin_provisioning(self, mock_supports_thin, mock_helper):
self.assertEqual(mock_supports_thin.return_value,
volume_utils.supports_thin_provisioning())
mock_helper.assert_called_once_with()
@mock.patch('cinder.utils.get_root_helper')
@mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes')
def test_get_all_physical_volumes(self, mock_get_vols, mock_helper):
self.assertEqual(mock_get_vols.return_value,
volume_utils.get_all_physical_volumes())
mock_helper.assert_called_once_with()
@mock.patch('cinder.utils.get_root_helper')
@mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_volume_groups')
def test_get_all_volume_groups(self, mock_get_groups, mock_helper):
self.assertEqual(mock_get_groups.return_value,
volume_utils.get_all_volume_groups())
mock_helper.assert_called_once_with()
def test_generate_password(self):
password = volume_utils.generate_password()
self.assertTrue(any(c for c in password if c in '23456789'))
self.assertTrue(any(c for c in password
if c in 'abcdefghijkmnopqrstuvwxyz'))
self.assertTrue(any(c for c in password
if c in 'ABCDEFGHJKLMNPQRSTUVWXYZ'))
self.assertEqual(16, len(password))
self.assertEqual(10, len(volume_utils.generate_password(10)))
@mock.patch('cinder.volume.utils.generate_password')
def test_generate_username(self, mock_gen_pass):
output = volume_utils.generate_username()
self.assertEqual(mock_gen_pass.return_value, output)
def test_extract_host(self):
host = 'Host'
# default level is 'backend'
self.assertEqual(host,
volume_utils.extract_host(host))
self.assertEqual(host,
volume_utils.extract_host(host, 'host'))
self.assertEqual(host,
volume_utils.extract_host(host, 'backend'))
# default_pool_name doesn't work for level other than 'pool'
self.assertEqual(host,
volume_utils.extract_host(host, 'host', True))
self.assertEqual(host,
volume_utils.extract_host(host, 'host', False))
self.assertEqual(host,
volume_utils.extract_host(host, 'backend', True))
self.assertEqual(host,
volume_utils.extract_host(host, 'backend', False))
self.assertEqual(None,
volume_utils.extract_host(host, 'pool'))
self.assertEqual('_pool0',
volume_utils.extract_host(host, 'pool', True))
host = 'Host@Backend'
self.assertEqual('Host@Backend',
volume_utils.extract_host(host))
self.assertEqual('Host',
volume_utils.extract_host(host, 'host'))
self.assertEqual(host,
volume_utils.extract_host(host, 'backend'))
self.assertEqual(None,
volume_utils.extract_host(host, 'pool'))
self.assertEqual('_pool0',
volume_utils.extract_host(host, 'pool', True))
host = 'Host@Backend#Pool'
pool = 'Pool'
self.assertEqual('Host@Backend',
volume_utils.extract_host(host))
self.assertEqual('Host',
volume_utils.extract_host(host, 'host'))
self.assertEqual('Host@Backend',
volume_utils.extract_host(host, 'backend'))
self.assertEqual(pool,
volume_utils.extract_host(host, 'pool'))
self.assertEqual(pool,
volume_utils.extract_host(host, 'pool', True))
host = 'Host#Pool'
self.assertEqual('Host',
volume_utils.extract_host(host))
self.assertEqual('Host',
volume_utils.extract_host(host, 'host'))
self.assertEqual('Host',
volume_utils.extract_host(host, 'backend'))
self.assertEqual(pool,
volume_utils.extract_host(host, 'pool'))
self.assertEqual(pool,
volume_utils.extract_host(host, 'pool', True))
def test_append_host(self):
host = 'Host'
pool = 'Pool'
expected = 'Host#Pool'
self.assertEqual(expected,
volume_utils.append_host(host, pool))
pool = None
expected = 'Host'
self.assertEqual(expected,
volume_utils.append_host(host, pool))
host = None
pool = 'pool'
expected = None
self.assertEqual(expected,
volume_utils.append_host(host, pool))
host = None
pool = None
expected = None
self.assertEqual(expected,
volume_utils.append_host(host, pool))
def test_compare_hosts(self):
host_1 = 'fake_host@backend1'
host_2 = 'fake_host@backend1#pool1'
self.assertTrue(volume_utils.hosts_are_equivalent(host_1, host_2))
host_2 = 'fake_host@backend1'
self.assertTrue(volume_utils.hosts_are_equivalent(host_1, host_2))
host_2 = 'fake_host2@backend1'
self.assertFalse(volume_utils.hosts_are_equivalent(host_1, host_2))
def test_check_managed_volume_already_managed(self):
mock_db = mock.Mock()
result = volume_utils.check_already_managed_volume(
mock_db, 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1')
self.assertTrue(result)
@mock.patch('cinder.volume.utils.CONF')
def test_check_already_managed_with_vol_id_vol_pattern(self, conf_mock):
mock_db = mock.Mock()
conf_mock.volume_name_template = 'volume-%s-volume'
result = volume_utils.check_already_managed_volume(
mock_db, 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1-volume')
self.assertTrue(result)
@mock.patch('cinder.volume.utils.CONF')
def test_check_already_managed_with_id_vol_pattern(self, conf_mock):
mock_db = mock.Mock()
conf_mock.volume_name_template = '%s-volume'
result = volume_utils.check_already_managed_volume(
mock_db, 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1-volume')
self.assertTrue(result)
def test_check_managed_volume_not_managed_cinder_like_name(self):
mock_db = mock.Mock()
mock_db.volume_get = mock.Mock(
side_effect=exception.VolumeNotFound(
'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1'))
result = volume_utils.check_already_managed_volume(
mock_db, 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1')
self.assertFalse(result)
def test_check_managed_volume_not_managed(self):
mock_db = mock.Mock()
result = volume_utils.check_already_managed_volume(
mock_db, 'test-volume')
self.assertFalse(result)
def test_check_managed_volume_not_managed_id_like_uuid(self):
mock_db = mock.Mock()
result = volume_utils.check_already_managed_volume(
mock_db, 'volume-d8cd1fe')
self.assertFalse(result)
def test_convert_config_string_to_dict(self):
test_string = "{'key-1'='val-1' 'key-2'='val-2' 'key-3'='val-3'}"
expected_dict = {'key-1': 'val-1', 'key-2': 'val-2', 'key-3': 'val-3'}
self.assertEqual(
expected_dict,
volume_utils.convert_config_string_to_dict(test_string))
def test_process_reserve_over_quota(self):
ctxt = context.get_admin_context()
ctxt.project_id = 'fake'
overs_one = ['gigabytes']
over_two = ['snapshots']
usages = {'gigabytes': {'reserved': 1, 'in_use': 9},
'snapshots': {'reserved': 1, 'in_use': 9}}
quotas = {'gigabytes': 10, 'snapshots': 10}
size = 1
self.assertRaises(exception.VolumeSizeExceedsAvailableQuota,
volume_utils.process_reserve_over_quota,
ctxt, overs_one, usages, quotas, size)
self.assertRaises(exception.SnapshotLimitExceeded,
volume_utils.process_reserve_over_quota,
ctxt, over_two, usages, quotas, size)
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Exploration-related jobs."""
from core import jobs_registry
from core.domain import exp_domain
from core.domain import exp_jobs_one_off
from core.domain import exp_services
from core.domain import rights_manager
from core.platform import models
from core.tests import test_utils
import feconf
import utils
(job_models, exp_models,) = models.Registry.import_models([
models.NAMES.job, models.NAMES.exploration])
search_services = models.Registry.import_search_services()
class ExpSummariesCreationOneOffJobTest(test_utils.GenericTestBase):
"""Tests for ExpSummary aggregations."""
ONE_OFF_JOB_MANAGERS_FOR_TESTS = [
exp_jobs_one_off.ExpSummariesCreationOneOffJob]
# Specify explorations that will be used in the test.
EXP_SPECS = [{
'category': 'Category A',
'title': 'Title 1'
}, {
'category': 'Category B',
'title': 'Title 2'
}, {
'category': 'Category C',
'title': 'Title 3'
}, {
'category': 'Category A',
'title': 'Title 4'
}, {
'category': 'Category C',
'title': 'Title 5'
}]
def test_all_exps_publicized(self):
"""Test exploration summary batch job if all explorations are
publicized.
"""
self._run_batch_job_once_and_verify_output(
self.EXP_SPECS,
default_status=rights_manager.ACTIVITY_STATUS_PUBLICIZED)
def test_all_exps_public(self):
"""Test summary batch job if all explorations are public
but not publicized."""
self._run_batch_job_once_and_verify_output(
self.EXP_SPECS,
default_status=rights_manager.ACTIVITY_STATUS_PUBLIC)
def test_exps_some_publicized(self):
"""Test summary batch job if some explorations are publicized."""
exp_specs = [{
'category': 'Category A',
'status': rights_manager.ACTIVITY_STATUS_PUBLIC,
'title': 'Title 1'
}, {
'category': 'Category B',
'status': rights_manager.ACTIVITY_STATUS_PUBLICIZED,
'title': 'Title 2'
}, {
'category': 'Category C',
'status': rights_manager.ACTIVITY_STATUS_PRIVATE,
'title': 'Title 3'
}, {
'category': 'Category A',
'status': rights_manager.ACTIVITY_STATUS_PUBLICIZED,
'title': 'Title 4'
}, {
'category': 'Category C',
'status': rights_manager.ACTIVITY_STATUS_PUBLICIZED,
'title': 'Title 5'
}]
self._run_batch_job_once_and_verify_output(exp_specs)
def _run_batch_job_once_and_verify_output(
self, exp_specs,
default_title='A title',
default_category='A category',
default_status=rights_manager.ACTIVITY_STATUS_PUBLICIZED):
"""Run batch job for creating exploration summaries once and verify its
output. exp_specs is a list of dicts with exploration specifications.
Allowed keys are category, status, title. If a key is not specified,
the default value is used.
"""
with self.swap(
jobs_registry, 'ONE_OFF_JOB_MANAGERS',
self.ONE_OFF_JOB_MANAGERS_FOR_TESTS
):
default_spec = {
'title': default_title,
'category': default_category,
'status': default_status
}
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.login(self.ADMIN_EMAIL)
admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
# Create and delete an exploration (to make sure job handles
# deleted explorations correctly).
exp_id = '100'
self.save_new_valid_exploration(
exp_id,
admin_id,
title=default_spec['title'],
category=default_spec['category'])
exploration = exp_services.get_exploration_by_id(exp_id)
exp_services.delete_exploration(admin_id, exp_id)
# Get dummy explorations.
num_exps = len(exp_specs)
expected_job_output = {}
for ind in range(num_exps):
exp_id = str(ind)
spec = default_spec
spec.update(exp_specs[ind])
self.save_new_valid_exploration(
exp_id,
admin_id,
title=spec['title'],
category=spec['category'])
exploration = exp_services.get_exploration_by_id(exp_id)
# Publish or publicize exploration.
if spec['status'] == rights_manager.ACTIVITY_STATUS_PUBLIC:
rights_manager.publish_exploration(admin_id, exp_id)
elif (
spec['status'] ==
rights_manager.ACTIVITY_STATUS_PUBLICIZED):
rights_manager.publish_exploration(admin_id, exp_id)
rights_manager.publicize_exploration(admin_id, exp_id)
# Do not include user_id here, so all explorations are not
# editable for now (will be updated depending on user_id
# in galleries)
exp_rights_model = exp_models.ExplorationRightsModel.get(
exp_id)
exploration = exp_services.get_exploration_by_id(exp_id)
exploration_model_last_updated = exploration.last_updated
exploration_model_created_on = exploration.created_on
# Manually create the expected summary specifying title,
# category, etc.
expected_job_output[exp_id] = exp_domain.ExplorationSummary(
exp_id,
spec['title'],
spec['category'],
exploration.objective,
exploration.language_code,
exploration.tags,
feconf.get_empty_ratings(),
spec['status'],
exp_rights_model.community_owned,
exp_rights_model.owner_ids,
exp_rights_model.editor_ids,
exp_rights_model.viewer_ids,
[admin_id],
{admin_id: 1},
exploration.version,
exploration_model_created_on,
exploration_model_last_updated)
# Note: Calling constructor for fields that are not required
# and have no default value does not work, because
# unspecified fields will be empty list in
# expected_job_output but will be unspecified in
# actual_job_output.
if exploration.tags:
expected_job_output[exp_id].tags = exploration.tags
if exp_rights_model.owner_ids:
expected_job_output[exp_id].owner_ids = (
exp_rights_model.owner_ids)
if exp_rights_model.editor_ids:
expected_job_output[exp_id].editor_ids = (
exp_rights_model.editor_ids)
if exp_rights_model.viewer_ids:
expected_job_output[exp_id].viewer_ids = (
exp_rights_model.viewer_ids)
if exploration.version:
expected_job_output[exp_id].version = (
exploration.version)
# Run batch job.
job_id = (
exp_jobs_one_off.ExpSummariesCreationOneOffJob.create_new())
exp_jobs_one_off.ExpSummariesCreationOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Get and check job output.
actual_job_output = exp_services.get_all_exploration_summaries()
self.assertEqual(
actual_job_output.keys(), expected_job_output.keys())
# Note: 'exploration_model_last_updated' is not expected to be the
# same, because it is now read from the version model representing
# the exploration's history snapshot, and not the ExplorationModel.
simple_props = ['id', 'title', 'category', 'objective',
'language_code', 'tags', 'ratings', 'status',
'community_owned', 'owner_ids',
'editor_ids', 'viewer_ids',
'contributor_ids', 'contributors_summary',
'version', 'exploration_model_created_on']
for exp_id in actual_job_output:
for prop in simple_props:
self.assertEqual(
getattr(actual_job_output[exp_id], prop),
getattr(expected_job_output[exp_id], prop))
class OneOffExplorationFirstPublishedJobTest(test_utils.GenericTestBase):
EXP_ID = 'exp_id'
def setUp(self):
super(OneOffExplorationFirstPublishedJobTest, self).setUp()
def test_first_published_time_of_exploration_that_is_unpublished(self):
"""This tests that, if an exploration is published, unpublished, and
then published again, the job uses the first publication time as the
value for first_published_msec.
"""
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.save_new_valid_exploration(
self.EXP_ID, owner_id, end_state_name='End')
rights_manager.publish_exploration(owner_id, self.EXP_ID)
job_class = exp_jobs_one_off.ExplorationFirstPublishedOneOffJob
job_id = job_class.create_new()
exp_jobs_one_off.ExplorationFirstPublishedOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
exploration_rights = rights_manager.get_exploration_rights(self.EXP_ID)
# Test to see whether first_published_msec was correctly updated.
exp_first_published = exploration_rights.first_published_msec
exp_rights_model = exp_models.ExplorationRightsModel.get(self.EXP_ID)
last_updated_time_msec = utils.get_time_in_millisecs(
exp_rights_model.last_updated)
self.assertLess(
exp_first_published, last_updated_time_msec)
rights_manager.unpublish_exploration(admin_id, self.EXP_ID)
rights_manager.publish_exploration(owner_id, self.EXP_ID)
job_id = job_class.create_new()
exp_jobs_one_off.ExplorationFirstPublishedOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Test to see whether first_published_msec remains the same despite the
# republication.
exploration_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertEqual(
exp_first_published, exploration_rights.first_published_msec)
class ExpSummariesContributorsOneOffJobTest(test_utils.GenericTestBase):
ONE_OFF_JOB_MANAGERS_FOR_TESTS = [
exp_jobs_one_off.ExpSummariesContributorsOneOffJob]
EXP_ID = 'exp_id'
USERNAME_A = 'usernamea'
USERNAME_B = 'usernameb'
EMAIL_A = '[email protected]'
EMAIL_B = '[email protected]'
def setUp(self):
super(ExpSummariesContributorsOneOffJobTest, self).setUp()
def test_contributors_for_valid_contribution(self):
"""Test that if only one commit is made, that the contributor
list consists of that contributor's user id.
"""
self.signup(self.EMAIL_A, self.USERNAME_A)
user_a_id = self.get_user_id_from_email(self.EMAIL_A)
exploration = self.save_new_valid_exploration(
self.EXP_ID, user_a_id)
job_id = (
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.create_new())
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
exploration_summary = exp_services.get_exploration_summary_by_id(
exploration.id)
self.assertEqual(
[user_a_id], exploration_summary.contributor_ids)
def test_repeat_contributors(self):
"""Test that if the same user makes more than one commit that changes
the content of an exploration, the user is only represented once in the
list of contributors for that exploration.
"""
self.signup(self.EMAIL_A, self.USERNAME_A)
user_a_id = self.get_user_id_from_email(self.EMAIL_A)
# Have one user make two commits.
exploration = self.save_new_valid_exploration(
self.EXP_ID, user_a_id, title='Original Title')
exploration_model = exp_models.ExplorationModel.get(
self.EXP_ID, strict=True, version=None)
exploration_model.title = 'New title'
exploration_model.commit(
user_a_id, 'Changed title.', [])
# Run the job to compute the contributor ids.
job_id = (
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.create_new())
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Verify that the length of the contributor list is one, and that
# the list contains the user who made these commits.
exploration_summary = exp_services.get_exploration_summary_by_id(
exploration.id)
self.assertEqual(
[user_a_id], exploration_summary.contributor_ids)
def test_contributors_with_only_reverts_not_counted(self):
"""Test that contributors who have only done reverts do not
have their user id appear in the contributor list.
"""
# Sign up two users.
self.signup(self.EMAIL_A, self.USERNAME_A)
user_a_id = self.get_user_id_from_email(self.EMAIL_A)
self.signup(self.EMAIL_B, self.USERNAME_B)
user_b_id = self.get_user_id_from_email(self.EMAIL_B)
# Have one user make two commits.
exploration = self.save_new_valid_exploration(
self.EXP_ID, user_a_id, title='Original Title')
exploration_model = exp_models.ExplorationModel.get(
self.EXP_ID, strict=True, version=None)
exploration_model.title = 'New title'
exploration_model.commit(
user_a_id, 'Changed title.', [])
# Have the second user revert version 2 to version 1
exp_services.revert_exploration(user_b_id, self.EXP_ID, 2, 1)
# Run the job to compute the contributor ids.
job_id = (
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.create_new())
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Verify that the committer list does not contain the user
# who only reverted.
exploration_summary = exp_services.get_exploration_summary_by_id(
exploration.id)
self.assertEqual([user_a_id], exploration_summary.contributor_ids)
def test_nonhuman_committers_not_counted(self):
"""Test that only human committers are counted as contributors.
"""
# Create a commit with the system user id.
exploration = self.save_new_valid_exploration(
self.EXP_ID, feconf.SYSTEM_COMMITTER_ID, title='Original Title')
# Run the job to compute the contributor ids.
job_id = (
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.create_new())
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Check that the system id was not added to the exploration's
# contributor ids.
exploration_summary = exp_services.get_exploration_summary_by_id(
exploration.id)
self.assertNotIn(
feconf.SYSTEM_COMMITTER_ID,
exploration_summary.contributor_ids)
# Create a commit with the migration bot user id.
exploration_model = exp_models.ExplorationModel.get(
self.EXP_ID, strict=True, version=None)
exploration_model.title = 'New title'
exploration_model.commit(
feconf.MIGRATION_BOT_USERNAME, 'Changed title.', [])
# Run the job to compute the contributor ids.
job_id = (
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.create_new())
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Check that the migration bot id was not added to the exploration's
# contributor ids.
exploration_summary = exp_services.get_exploration_summary_by_id(
exploration.id)
self.assertNotIn(
feconf.MIGRATION_BOT_USERNAME, exploration_summary.contributor_ids)
class OneOffReindexExplorationsJobTest(test_utils.GenericTestBase):
EXP_ID = 'exp_id'
def setUp(self):
super(OneOffReindexExplorationsJobTest, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s%s' % (self.EXP_ID, i), 'title %d' % i, 'category%d' % i
) for i in xrange(5)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
rights_manager.publish_exploration(self.owner_id, exp.id)
self.process_and_flush_pending_tasks()
def test_standard_operation(self):
job_id = (exp_jobs_one_off.IndexAllExplorationsJobManager.create_new())
exp_jobs_one_off.IndexAllExplorationsJobManager.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
indexed_docs = []
def add_docs_mock(docs, index):
indexed_docs.extend(docs)
self.assertEqual(index, exp_services.SEARCH_INDEX_EXPLORATIONS)
add_docs_swap = self.swap(
search_services, 'add_documents_to_index', add_docs_mock)
with add_docs_swap:
self.process_and_flush_pending_tasks()
ids = [doc['id'] for doc in indexed_docs]
titles = [doc['title'] for doc in indexed_docs]
categories = [doc['category'] for doc in indexed_docs]
for index in xrange(5):
self.assertIn("%s%s" % (self.EXP_ID, index), ids)
self.assertIn('title %d' % index, titles)
self.assertIn('category%d' % index, categories)
class ExplorationMigrationJobTest(test_utils.GenericTestBase):
ALBERT_EMAIL = '[email protected]'
ALBERT_NAME = 'albert'
VALID_EXP_ID = 'exp_id0'
NEW_EXP_ID = 'exp_id1'
EXP_TITLE = 'title'
def setUp(self):
super(ExplorationMigrationJobTest, self).setUp()
# Setup user who will own the test explorations.
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.process_and_flush_pending_tasks()
def test_migration_job_does_not_convert_up_to_date_exp(self):
"""Tests that the exploration migration job does not convert an
exploration that is already the latest states schema version.
"""
# Create a new, default exploration that should not be affected by the
# job.
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, 'title', 'category')
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('EndExploration')
init_state.interaction.default_outcome = None
exp_services.save_new_exploration(self.albert_id, exploration)
self.assertEqual(
exploration.states_schema_version,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
yaml_before_migration = exploration.to_yaml()
# Start migration job on sample exploration.
job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Verify the exploration is exactly the same after migration.
updated_exp = exp_services.get_exploration_by_id(self.VALID_EXP_ID)
self.assertEqual(
updated_exp.states_schema_version,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
after_converted_yaml = updated_exp.to_yaml()
self.assertEqual(after_converted_yaml, yaml_before_migration)
def test_migration_job_does_not_have_validation_fail_on_default_exp(self):
"""Tests that the exploration migration job does not have a validation
failure for a default exploration (of states schema version 0), due to
the exploration having a null interaction ID in its initial state.
"""
self.save_new_exp_with_states_schema_v0(
self.NEW_EXP_ID, self.albert_id, self.EXP_TITLE)
# Start migration job on sample exploration.
job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Verify the new exploration has been migrated by the job.
updated_exp = exp_services.get_exploration_by_id(self.NEW_EXP_ID)
self.assertEqual(
updated_exp.states_schema_version,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
# Ensure the states structure within the exploration was changed.
self.assertNotEqual(
updated_exp.to_dict()['states'], self.VERSION_0_STATES_DICT)
def test_migration_job_skips_deleted_explorations(self):
"""Tests that the exploration migration job skips deleted explorations
and does not attempt to migrate.
"""
self.save_new_exp_with_states_schema_v0(
self.NEW_EXP_ID, self.albert_id, self.EXP_TITLE)
# Note: This creates a summary based on the upgraded model (which is
# fine). A summary is needed to delete the exploration.
exp_services.create_exploration_summary(
self.NEW_EXP_ID, None)
# Delete the exploration before migration occurs.
exp_services.delete_exploration(self.albert_id, self.NEW_EXP_ID)
# Ensure the exploration is deleted.
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
exp_services.get_exploration_by_id(self.NEW_EXP_ID)
# Start migration job on sample exploration.
job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
# This running without errors indicates the deleted exploration is
# being ignored, since otherwise exp_services.get_exploration_by_id
# (used within the job) will raise an error.
self.process_and_flush_pending_tasks()
# Ensure the exploration is still deleted.
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
exp_services.get_exploration_by_id(self.NEW_EXP_ID)
|
|
import math
import warnings
from fractions import Fraction
from typing import List, Tuple, Dict, Optional, Union
import torch
from ..extension import _load_library
try:
_load_library("video_reader")
_HAS_VIDEO_OPT = True
except (ImportError, OSError):
_HAS_VIDEO_OPT = False
default_timebase = Fraction(0, 1)
# simple class for torch scripting
# the complex Fraction class from fractions module is not scriptable
class Timebase:
__annotations__ = {"numerator": int, "denominator": int}
__slots__ = ["numerator", "denominator"]
def __init__(
self,
numerator: int,
denominator: int,
) -> None:
self.numerator = numerator
self.denominator = denominator
class VideoMetaData:
__annotations__ = {
"has_video": bool,
"video_timebase": Timebase,
"video_duration": float,
"video_fps": float,
"has_audio": bool,
"audio_timebase": Timebase,
"audio_duration": float,
"audio_sample_rate": float,
}
__slots__ = [
"has_video",
"video_timebase",
"video_duration",
"video_fps",
"has_audio",
"audio_timebase",
"audio_duration",
"audio_sample_rate",
]
def __init__(self) -> None:
self.has_video = False
self.video_timebase = Timebase(0, 1)
self.video_duration = 0.0
self.video_fps = 0.0
self.has_audio = False
self.audio_timebase = Timebase(0, 1)
self.audio_duration = 0.0
self.audio_sample_rate = 0.0
def _validate_pts(pts_range: Tuple[int, int]) -> None:
if pts_range[1] > 0:
assert (
pts_range[0] <= pts_range[1]
), """Start pts should not be smaller than end pts, got
start pts: {:d} and end pts: {:d}""".format(
pts_range[0],
pts_range[1],
)
def _fill_info(
vtimebase: torch.Tensor,
vfps: torch.Tensor,
vduration: torch.Tensor,
atimebase: torch.Tensor,
asample_rate: torch.Tensor,
aduration: torch.Tensor,
) -> VideoMetaData:
"""
Build update VideoMetaData struct with info about the video
"""
meta = VideoMetaData()
if vtimebase.numel() > 0:
meta.video_timebase = Timebase(int(vtimebase[0].item()), int(vtimebase[1].item()))
timebase = vtimebase[0].item() / float(vtimebase[1].item())
if vduration.numel() > 0:
meta.has_video = True
meta.video_duration = float(vduration.item()) * timebase
if vfps.numel() > 0:
meta.video_fps = float(vfps.item())
if atimebase.numel() > 0:
meta.audio_timebase = Timebase(int(atimebase[0].item()), int(atimebase[1].item()))
timebase = atimebase[0].item() / float(atimebase[1].item())
if aduration.numel() > 0:
meta.has_audio = True
meta.audio_duration = float(aduration.item()) * timebase
if asample_rate.numel() > 0:
meta.audio_sample_rate = float(asample_rate.item())
return meta
def _align_audio_frames(
aframes: torch.Tensor, aframe_pts: torch.Tensor, audio_pts_range: Tuple[int, int]
) -> torch.Tensor:
start, end = aframe_pts[0], aframe_pts[-1]
num_samples = aframes.size(0)
step_per_aframe = float(end - start + 1) / float(num_samples)
s_idx = 0
e_idx = num_samples
if start < audio_pts_range[0]:
s_idx = int((audio_pts_range[0] - start) / step_per_aframe)
if audio_pts_range[1] != -1 and end > audio_pts_range[1]:
e_idx = int((audio_pts_range[1] - end) / step_per_aframe)
return aframes[s_idx:e_idx, :]
def _read_video_from_file(
filename: str,
seek_frame_margin: float = 0.25,
read_video_stream: bool = True,
video_width: int = 0,
video_height: int = 0,
video_min_dimension: int = 0,
video_max_dimension: int = 0,
video_pts_range: Tuple[int, int] = (0, -1),
video_timebase: Fraction = default_timebase,
read_audio_stream: bool = True,
audio_samples: int = 0,
audio_channels: int = 0,
audio_pts_range: Tuple[int, int] = (0, -1),
audio_timebase: Fraction = default_timebase,
) -> Tuple[torch.Tensor, torch.Tensor, VideoMetaData]:
"""
Reads a video from a file, returning both the video frames as well as
the audio frames
Args:
filename (str): path to the video file
seek_frame_margin (double, optional): seeking frame in the stream is imprecise. Thus,
when video_start_pts is specified, we seek the pts earlier by seek_frame_margin seconds
read_video_stream (int, optional): whether read video stream. If yes, set to 1. Otherwise, 0
video_width/video_height/video_min_dimension/video_max_dimension (int): together decide
the size of decoded frames:
- When video_width = 0, video_height = 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the original frame resolution
- When video_width = 0, video_height = 0, video_min_dimension != 0,
and video_max_dimension = 0, keep the aspect ratio and resize the
frame so that shorter edge size is video_min_dimension
- When video_width = 0, video_height = 0, video_min_dimension = 0,
and video_max_dimension != 0, keep the aspect ratio and resize
the frame so that longer edge size is video_max_dimension
- When video_width = 0, video_height = 0, video_min_dimension != 0,
and video_max_dimension != 0, resize the frame so that shorter
edge size is video_min_dimension, and longer edge size is
video_max_dimension. The aspect ratio may not be preserved
- When video_width = 0, video_height != 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the aspect ratio and resize
the frame so that frame video_height is $video_height
- When video_width != 0, video_height == 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the aspect ratio and resize
the frame so that frame video_width is $video_width
- When video_width != 0, video_height != 0, video_min_dimension = 0,
and video_max_dimension = 0, resize the frame so that frame
video_width and video_height are set to $video_width and
$video_height, respectively
video_pts_range (list(int), optional): the start and end presentation timestamp of video stream
video_timebase (Fraction, optional): a Fraction rational number which denotes timebase in video stream
read_audio_stream (int, optional): whether read audio stream. If yes, set to 1. Otherwise, 0
audio_samples (int, optional): audio sampling rate
audio_channels (int optional): audio channels
audio_pts_range (list(int), optional): the start and end presentation timestamp of audio stream
audio_timebase (Fraction, optional): a Fraction rational number which denotes time base in audio stream
Returns
vframes (Tensor[T, H, W, C]): the `T` video frames
aframes (Tensor[L, K]): the audio frames, where `L` is the number of points and
`K` is the number of audio_channels
info (Dict): metadata for the video and audio. Can contain the fields video_fps (float)
and audio_fps (int)
"""
_validate_pts(video_pts_range)
_validate_pts(audio_pts_range)
result = torch.ops.video_reader.read_video_from_file(
filename,
seek_frame_margin,
0, # getPtsOnly
read_video_stream,
video_width,
video_height,
video_min_dimension,
video_max_dimension,
video_pts_range[0],
video_pts_range[1],
video_timebase.numerator,
video_timebase.denominator,
read_audio_stream,
audio_samples,
audio_channels,
audio_pts_range[0],
audio_pts_range[1],
audio_timebase.numerator,
audio_timebase.denominator,
)
vframes, _vframe_pts, vtimebase, vfps, vduration, aframes, aframe_pts, atimebase, asample_rate, aduration = result
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
if aframes.numel() > 0:
# when audio stream is found
aframes = _align_audio_frames(aframes, aframe_pts, audio_pts_range)
return vframes, aframes, info
def _read_video_timestamps_from_file(filename: str) -> Tuple[List[int], List[int], VideoMetaData]:
"""
Decode all video- and audio frames in the video. Only pts
(presentation timestamp) is returned. The actual frame pixel data is not
copied. Thus, it is much faster than read_video(...)
"""
result = torch.ops.video_reader.read_video_from_file(
filename,
0, # seek_frame_margin
1, # getPtsOnly
1, # read_video_stream
0, # video_width
0, # video_height
0, # video_min_dimension
0, # video_max_dimension
0, # video_start_pts
-1, # video_end_pts
0, # video_timebase_num
1, # video_timebase_den
1, # read_audio_stream
0, # audio_samples
0, # audio_channels
0, # audio_start_pts
-1, # audio_end_pts
0, # audio_timebase_num
1, # audio_timebase_den
)
_vframes, vframe_pts, vtimebase, vfps, vduration, _aframes, aframe_pts, atimebase, asample_rate, aduration = result
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
vframe_pts = vframe_pts.numpy().tolist()
aframe_pts = aframe_pts.numpy().tolist()
return vframe_pts, aframe_pts, info
def _probe_video_from_file(filename: str) -> VideoMetaData:
"""
Probe a video file and return VideoMetaData with info about the video
"""
result = torch.ops.video_reader.probe_video_from_file(filename)
vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
return info
def _read_video_from_memory(
video_data: torch.Tensor,
seek_frame_margin: float = 0.25,
read_video_stream: int = 1,
video_width: int = 0,
video_height: int = 0,
video_min_dimension: int = 0,
video_max_dimension: int = 0,
video_pts_range: Tuple[int, int] = (0, -1),
video_timebase_numerator: int = 0,
video_timebase_denominator: int = 1,
read_audio_stream: int = 1,
audio_samples: int = 0,
audio_channels: int = 0,
audio_pts_range: Tuple[int, int] = (0, -1),
audio_timebase_numerator: int = 0,
audio_timebase_denominator: int = 1,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Reads a video from memory, returning both the video frames as well as
the audio frames
This function is torchscriptable.
Args:
video_data (data type could be 1) torch.Tensor, dtype=torch.int8 or 2) python bytes):
compressed video content stored in either 1) torch.Tensor 2) python bytes
seek_frame_margin (double, optional): seeking frame in the stream is imprecise.
Thus, when video_start_pts is specified, we seek the pts earlier by seek_frame_margin seconds
read_video_stream (int, optional): whether read video stream. If yes, set to 1. Otherwise, 0
video_width/video_height/video_min_dimension/video_max_dimension (int): together decide
the size of decoded frames:
- When video_width = 0, video_height = 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the original frame resolution
- When video_width = 0, video_height = 0, video_min_dimension != 0,
and video_max_dimension = 0, keep the aspect ratio and resize the
frame so that shorter edge size is video_min_dimension
- When video_width = 0, video_height = 0, video_min_dimension = 0,
and video_max_dimension != 0, keep the aspect ratio and resize
the frame so that longer edge size is video_max_dimension
- When video_width = 0, video_height = 0, video_min_dimension != 0,
and video_max_dimension != 0, resize the frame so that shorter
edge size is video_min_dimension, and longer edge size is
video_max_dimension. The aspect ratio may not be preserved
- When video_width = 0, video_height != 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the aspect ratio and resize
the frame so that frame video_height is $video_height
- When video_width != 0, video_height == 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the aspect ratio and resize
the frame so that frame video_width is $video_width
- When video_width != 0, video_height != 0, video_min_dimension = 0,
and video_max_dimension = 0, resize the frame so that frame
video_width and video_height are set to $video_width and
$video_height, respectively
video_pts_range (list(int), optional): the start and end presentation timestamp of video stream
video_timebase_numerator / video_timebase_denominator (float, optional): a rational
number which denotes timebase in video stream
read_audio_stream (int, optional): whether read audio stream. If yes, set to 1. Otherwise, 0
audio_samples (int, optional): audio sampling rate
audio_channels (int optional): audio audio_channels
audio_pts_range (list(int), optional): the start and end presentation timestamp of audio stream
audio_timebase_numerator / audio_timebase_denominator (float, optional):
a rational number which denotes time base in audio stream
Returns:
vframes (Tensor[T, H, W, C]): the `T` video frames
aframes (Tensor[L, K]): the audio frames, where `L` is the number of points and
`K` is the number of channels
"""
_validate_pts(video_pts_range)
_validate_pts(audio_pts_range)
if not isinstance(video_data, torch.Tensor):
video_data = torch.frombuffer(video_data, dtype=torch.uint8)
result = torch.ops.video_reader.read_video_from_memory(
video_data,
seek_frame_margin,
0, # getPtsOnly
read_video_stream,
video_width,
video_height,
video_min_dimension,
video_max_dimension,
video_pts_range[0],
video_pts_range[1],
video_timebase_numerator,
video_timebase_denominator,
read_audio_stream,
audio_samples,
audio_channels,
audio_pts_range[0],
audio_pts_range[1],
audio_timebase_numerator,
audio_timebase_denominator,
)
vframes, _vframe_pts, vtimebase, vfps, vduration, aframes, aframe_pts, atimebase, asample_rate, aduration = result
if aframes.numel() > 0:
# when audio stream is found
aframes = _align_audio_frames(aframes, aframe_pts, audio_pts_range)
return vframes, aframes
def _read_video_timestamps_from_memory(
video_data: torch.Tensor,
) -> Tuple[List[int], List[int], VideoMetaData]:
"""
Decode all frames in the video. Only pts (presentation timestamp) is returned.
The actual frame pixel data is not copied. Thus, read_video_timestamps(...)
is much faster than read_video(...)
"""
if not isinstance(video_data, torch.Tensor):
video_data = torch.frombuffer(video_data, dtype=torch.uint8)
result = torch.ops.video_reader.read_video_from_memory(
video_data,
0, # seek_frame_margin
1, # getPtsOnly
1, # read_video_stream
0, # video_width
0, # video_height
0, # video_min_dimension
0, # video_max_dimension
0, # video_start_pts
-1, # video_end_pts
0, # video_timebase_num
1, # video_timebase_den
1, # read_audio_stream
0, # audio_samples
0, # audio_channels
0, # audio_start_pts
-1, # audio_end_pts
0, # audio_timebase_num
1, # audio_timebase_den
)
_vframes, vframe_pts, vtimebase, vfps, vduration, _aframes, aframe_pts, atimebase, asample_rate, aduration = result
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
vframe_pts = vframe_pts.numpy().tolist()
aframe_pts = aframe_pts.numpy().tolist()
return vframe_pts, aframe_pts, info
def _probe_video_from_memory(
video_data: torch.Tensor,
) -> VideoMetaData:
"""
Probe a video in memory and return VideoMetaData with info about the video
This function is torchscriptable
"""
if not isinstance(video_data, torch.Tensor):
video_data = torch.frombuffer(video_data, dtype=torch.uint8)
result = torch.ops.video_reader.probe_video_from_memory(video_data)
vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
return info
def _convert_to_sec(
start_pts: Union[float, Fraction], end_pts: Union[float, Fraction], pts_unit: str, time_base: Fraction
) -> Tuple[Union[float, Fraction], Union[float, Fraction], str]:
if pts_unit == "pts":
start_pts = float(start_pts * time_base)
end_pts = float(end_pts * time_base)
pts_unit = "sec"
return start_pts, end_pts, pts_unit
def _read_video(
filename: str,
start_pts: Union[float, Fraction] = 0,
end_pts: Optional[Union[float, Fraction]] = None,
pts_unit: str = "pts",
) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, float]]:
if end_pts is None:
end_pts = float("inf")
if pts_unit == "pts":
warnings.warn(
"The pts_unit 'pts' gives wrong results and will be removed in a "
+ "follow-up version. Please use pts_unit 'sec'."
)
info = _probe_video_from_file(filename)
has_video = info.has_video
has_audio = info.has_audio
video_pts_range = (0, -1)
video_timebase = default_timebase
audio_pts_range = (0, -1)
audio_timebase = default_timebase
time_base = default_timebase
if has_video:
video_timebase = Fraction(info.video_timebase.numerator, info.video_timebase.denominator)
time_base = video_timebase
if has_audio:
audio_timebase = Fraction(info.audio_timebase.numerator, info.audio_timebase.denominator)
time_base = time_base if time_base else audio_timebase
# video_timebase is the default time_base
start_pts_sec, end_pts_sec, pts_unit = _convert_to_sec(start_pts, end_pts, pts_unit, time_base)
def get_pts(time_base):
start_offset = start_pts_sec
end_offset = end_pts_sec
if pts_unit == "sec":
start_offset = int(math.floor(start_pts_sec * (1 / time_base)))
if end_offset != float("inf"):
end_offset = int(math.ceil(end_pts_sec * (1 / time_base)))
if end_offset == float("inf"):
end_offset = -1
return start_offset, end_offset
if has_video:
video_pts_range = get_pts(video_timebase)
if has_audio:
audio_pts_range = get_pts(audio_timebase)
vframes, aframes, info = _read_video_from_file(
filename,
read_video_stream=True,
video_pts_range=video_pts_range,
video_timebase=video_timebase,
read_audio_stream=True,
audio_pts_range=audio_pts_range,
audio_timebase=audio_timebase,
)
_info = {}
if has_video:
_info["video_fps"] = info.video_fps
if has_audio:
_info["audio_fps"] = info.audio_sample_rate
return vframes, aframes, _info
def _read_video_timestamps(
filename: str, pts_unit: str = "pts"
) -> Tuple[Union[List[int], List[Fraction]], Optional[float]]:
if pts_unit == "pts":
warnings.warn(
"The pts_unit 'pts' gives wrong results and will be removed in a "
+ "follow-up version. Please use pts_unit 'sec'."
)
pts: Union[List[int], List[Fraction]]
pts, _, info = _read_video_timestamps_from_file(filename)
if pts_unit == "sec":
video_time_base = Fraction(info.video_timebase.numerator, info.video_timebase.denominator)
pts = [x * video_time_base for x in pts]
video_fps = info.video_fps if info.has_video else None
return pts, video_fps
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import pytest
from google.api import monitored_resource_pb2
from google.cloud import monitoring_v3
from google.cloud.monitoring_v3.proto import group_pb2
from google.cloud.monitoring_v3.proto import group_service_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestGroupServiceClient(object):
def test_list_groups(self):
# Setup Expected Response
next_page_token = ''
group_element = {}
group = [group_element]
expected_response = {
'next_page_token': next_page_token,
'group': group
}
expected_response = group_service_pb2.ListGroupsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = monitoring_v3.GroupServiceClient(channel=channel)
# Setup Request
name = client.project_path('[PROJECT]')
paged_list_response = client.list_groups(name)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.group[0] == resources[0]
assert len(channel.requests) == 1
expected_request = group_service_pb2.ListGroupsRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_groups_exception(self):
channel = ChannelStub(responses=[CustomException()])
client = monitoring_v3.GroupServiceClient(channel=channel)
# Setup request
name = client.project_path('[PROJECT]')
paged_list_response = client.list_groups(name)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_group(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
display_name = 'displayName1615086568'
parent_name = 'parentName1015022848'
filter_ = 'filter-1274492040'
is_cluster = False
expected_response = {
'name': name_2,
'display_name': display_name,
'parent_name': parent_name,
'filter': filter_,
'is_cluster': is_cluster
}
expected_response = group_pb2.Group(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = monitoring_v3.GroupServiceClient(channel=channel)
# Setup Request
name = client.group_path('[PROJECT]', '[GROUP]')
response = client.get_group(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = group_service_pb2.GetGroupRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_group_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = monitoring_v3.GroupServiceClient(channel=channel)
# Setup request
name = client.group_path('[PROJECT]', '[GROUP]')
with pytest.raises(CustomException):
client.get_group(name)
def test_create_group(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
display_name = 'displayName1615086568'
parent_name = 'parentName1015022848'
filter_ = 'filter-1274492040'
is_cluster = False
expected_response = {
'name': name_2,
'display_name': display_name,
'parent_name': parent_name,
'filter': filter_,
'is_cluster': is_cluster
}
expected_response = group_pb2.Group(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = monitoring_v3.GroupServiceClient(channel=channel)
# Setup Request
name = client.project_path('[PROJECT]')
group = {}
response = client.create_group(name, group)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = group_service_pb2.CreateGroupRequest(
name=name, group=group)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_group_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = monitoring_v3.GroupServiceClient(channel=channel)
# Setup request
name = client.project_path('[PROJECT]')
group = {}
with pytest.raises(CustomException):
client.create_group(name, group)
def test_update_group(self):
# Setup Expected Response
name = 'name3373707'
display_name = 'displayName1615086568'
parent_name = 'parentName1015022848'
filter_ = 'filter-1274492040'
is_cluster = False
expected_response = {
'name': name,
'display_name': display_name,
'parent_name': parent_name,
'filter': filter_,
'is_cluster': is_cluster
}
expected_response = group_pb2.Group(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = monitoring_v3.GroupServiceClient(channel=channel)
# Setup Request
group = {}
response = client.update_group(group)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = group_service_pb2.UpdateGroupRequest(group=group)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_group_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = monitoring_v3.GroupServiceClient(channel=channel)
# Setup request
group = {}
with pytest.raises(CustomException):
client.update_group(group)
def test_delete_group(self):
channel = ChannelStub()
client = monitoring_v3.GroupServiceClient(channel=channel)
# Setup Request
name = client.group_path('[PROJECT]', '[GROUP]')
client.delete_group(name)
assert len(channel.requests) == 1
expected_request = group_service_pb2.DeleteGroupRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_group_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = monitoring_v3.GroupServiceClient(channel=channel)
# Setup request
name = client.group_path('[PROJECT]', '[GROUP]')
with pytest.raises(CustomException):
client.delete_group(name)
def test_list_group_members(self):
# Setup Expected Response
next_page_token = ''
total_size = 705419236
members_element = {}
members = [members_element]
expected_response = {
'next_page_token': next_page_token,
'total_size': total_size,
'members': members
}
expected_response = group_service_pb2.ListGroupMembersResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = monitoring_v3.GroupServiceClient(channel=channel)
# Setup Request
name = client.group_path('[PROJECT]', '[GROUP]')
paged_list_response = client.list_group_members(name)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.members[0] == resources[0]
assert len(channel.requests) == 1
expected_request = group_service_pb2.ListGroupMembersRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_group_members_exception(self):
channel = ChannelStub(responses=[CustomException()])
client = monitoring_v3.GroupServiceClient(channel=channel)
# Setup request
name = client.group_path('[PROJECT]', '[GROUP]')
paged_list_response = client.list_group_members(name)
with pytest.raises(CustomException):
list(paged_list_response)
|
|
#!/usr/bin/python
import os
import datetime
import copy
import sys
from lxml import etree as et
import pcbmode.config as config
from . import messages as msg
from . import svg
from . import utils
from . import place
try:
# Python 3
import html.parser as HTMLParser
except:
# Python 2
import HTMLParser
from .shape import Shape
from .style import Style
from .component import Component
from .point import Point
class Module():
"""
"""
def __init__(self,
module_dict,
routing_dict,
asmodule=False):
"""
"""
self._module_dict = module_dict
self._routing_dict = routing_dict
self._outline = self._getOutline()
self._width = self._outline.getWidth()
self._height = self._outline.getHeight()
# Get dictionary of component definitions
components_dict = self._module_dict.get('components') or {}
self._components = self._getComponents(components_dict)
# Get dictionary of component definitions
vias_dict = self._routing_dict.get('vias') or {}
self._vias = self._getComponents(vias_dict)
# Get dictionary of component definitions
shapes_dict = self._module_dict.get('shapes') or {}
self._shapes = self._getComponents(shapes_dict)
sig_dig = config.cfg['significant-digits']
self._transform = 'translate(%s %s)' % (round(self._width/2, sig_dig),
round(self._height/2, sig_dig))
# Create the Inkscape SVG document
self._module = self._getModuleElement()
svg_doc = et.ElementTree(self._module)
# Get a dictionary of SVG layers
self._layers = svg.makeSvgLayers(self._module, self._transform)
# Add a 'defs' element:
# http://www.w3.org/TR/SVG/struct.html#Head
# This is where masking elements that are used for pours are stored
defs = et.SubElement(self._module, 'defs')
self._masks = {}
for pcb_layer in config.stk['layer-names']:
element = et.SubElement(defs, 'mask',
id="mask-%s" % pcb_layer,
transform=self._transform)
# This will identify the masks for each PCB layer when
# the layer is converted to Gerber
element.set('{'+config.cfg['ns']['pcbmode']+'}pcb-layer', pcb_layer)
self._masks[pcb_layer] = element
self._placeOutline()
self._placeOutlineDimensions()
msg.subInfo('Placing components:', newline=False)
self._placeComponents(components=self._components,
component_type='component',
print_refdef=True)
sys.stdout.write("\n")
msg.subInfo('Placing routes')
self._placeRouting()
msg.subInfo('Placing vias')
self._placeComponents(components=self._vias,
component_type='via',
print_refdef=False)
msg.subInfo('Placing shapes')
self._placeComponents(components=self._shapes,
component_type='shape',
print_refdef=False)
if config.tmp['no-docs'] == False:
msg.subInfo('Placing documentation')
self._placeDocs()
if config.tmp['no-drill-index'] == False:
msg.subInfo('Placing drill index')
self._placeDrillIndex()
if config.tmp['no-layer-index'] == False:
msg.subInfo('Placing layer index')
self._placeLayerIndex()
# This 'cover' "enables" the mask shapes defined in the mask are
# shown. It *must* be the last element in the mask definition;
# any mask element after it won't show
for pcb_layer in config.stk['layer-names']:
if utils.checkForPoursInLayer(pcb_layer) is True:
mask_cover = et.SubElement(self._masks[pcb_layer], 'rect',
x="%s" % str(-self._width/2),
y="%s" % str(-self._height/2),
width="%s" % self._width,
height="%s" % self._height,
style="fill:#fff;")
# This tells the Gerber conversion to ignore this shape
mask_cover.set('{'+config.cfg['ns']['pcbmode']+'}type', 'mask-cover')
output_file = os.path.join(config.cfg['base-dir'],
config.cfg['locations']['build'],
config.cfg['name'] + '.svg')
try:
f = open(output_file, 'wb')
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
f.write(et.tostring(svg_doc, pretty_print=True))
f.close()
def _placeOutlineDimensions(self):
"""
Places outline dimension arrows
"""
def makeArrow(width, gap):
"""
Returns a path for an arrow of width 'width' with a center gap of
width 'gap'
"""
# Length of bar perpendicular to the arrow's shaft
base_length = 1.8
# Height of arrow's head
arrow_height = 2.5
# Width of arrow's head
arrow_base = 1.2
# Create path
path = "m %s,%s %s,%s m %s,%s %s,%s m %s,%s %s,%s m %s,%s %s,%s m %s,%s m %s,%s %s,%s m %s,%s %s,%s m %s,%s %s,%s m %s,%s %s,%s" % (-gap/2,0, -width/2+gap/2,0, 0,base_length/2, 0,-base_length, arrow_height,(base_length-arrow_base)/2, -arrow_height,arrow_base/2, arrow_height,arrow_base/2, -arrow_height,-arrow_base/2, width/2,0, gap/2,0, width/2-gap/2,0, 0,base_length/2, 0,-base_length, -arrow_height,(base_length-arrow_base)/2, arrow_height,arrow_base/2, -arrow_height,arrow_base/2, arrow_height,-arrow_base/2,)
return path
# Create text shapes
shape_dict = {}
shape_dict['type'] = 'text'
style_dict = config.stl['layout']['dimensions'].get('text') or {}
shape_dict['font-family'] = style_dict.get('font-family') or "UbuntuMono-R-webfont"
shape_dict['font-size'] = style_dict.get('font-size') or "1.5mm"
shape_dict['line-height'] = style_dict.get('line-height') or "1mm"
shape_dict['letter-spacing'] = style_dict.get('letter-spacing') or "0mm"
# Locations
arrow_gap = 1.5
width_loc = [0, self._height/2+arrow_gap]
height_loc = [-(self._width/2+arrow_gap), 0]
# Width text
width_text_dict = shape_dict.copy()
width_text_dict['value'] = "%s mm" % round(self._width,2)
width_text_dict['location'] = width_loc
width_text = Shape(width_text_dict)
style = Style(width_text_dict, 'dimensions')
width_text.setStyle(style)
# Height text
height_text_dict = shape_dict.copy()
height_text_dict['value'] = "%s mm" % round(self._height,2)
height_text_dict['rotate'] = -90
height_text_dict['location'] = height_loc
height_text = Shape(height_text_dict)
style = Style(height_text_dict, 'dimensions')
height_text.setStyle(style)
# Width arrow
shape_dict = {}
shape_dict['type'] = 'path'
shape_dict['value'] = makeArrow(self._width, width_text.getWidth()*1.2)
shape_dict['location'] = width_loc
width_arrow = Shape(shape_dict)
style = Style(shape_dict, 'dimensions')
width_arrow.setStyle(style)
# Height arrow
shape_dict = {}
shape_dict['type'] = 'path'
shape_dict['value'] = makeArrow(self._height, height_text.getHeight()*1.2)
shape_dict['rotate'] = -90
shape_dict['location'] = height_loc
height_arrow = Shape(shape_dict)
style = Style(shape_dict, 'dimensions')
height_arrow.setStyle(style)
svg_layer = self._layers['dimensions']['layer']
group = et.SubElement(svg_layer, 'g')
group.set('{'+config.cfg['ns']['pcbmode']+'}type', 'module-shapes')
place.placeShape(width_text, group)
place.placeShape(height_text, group)
place.placeShape(width_arrow, group)
place.placeShape(height_arrow, group)
def _placeComponents(self, components, component_type, print_refdef=False):
"""
Places the component on the board.
'component_type' is the content of the 'type' fiels of the
placed group. This is used by the extractor to identify the
type of component ('component', 'via', 'shape')
"""
htmlpar = HTMLParser.HTMLParser()
for component in components:
shapes_dict = component.getShapes()
location = component.getLocation()
rotation = component.getRotation()
refdef = component.getRefdef()
if print_refdef == True:
sys.stdout.write("%s " % refdef)
# If the component is placed on the bottom layer we need
# to invert the shapes AND their 'x' coordinate. This is
# done using the 'invert' indicator set below
placement_layer = component.getPlacementLayer()
if placement_layer == 'bottom':
invert = True
else:
invert = False
for pcb_layer in config.stk['layer-names']:
there_are_pours = utils.checkForPoursInLayer(pcb_layer)
# Copper
shapes = shapes_dict['conductor'].get(pcb_layer) or []
if len(shapes) > 0:
svg_layer = self._layers[pcb_layer]['conductor']['pads']['layer']
transform = "translate(%s,%s)" % (location[0],
config.cfg['invert-y']*location[1])
shape_group = et.SubElement(svg_layer, 'g',
transform=transform)
shape_group.set('{'+config.cfg['ns']['pcbmode']+'}type', component_type)
# Add the reference designator as well if it's a
# 'component'
if component_type == 'component':
shape_group.set('{'+config.cfg['ns']['pcbmode']+'}refdef', component.getRefdef())
style = utils.dictToStyleText(config.stl['layout']['conductor']['pads']['labels'])
label_group = et.SubElement(shape_group, 'g', style=style)
for shape in shapes:
place.placeShape(shape, shape_group, invert)
# Add pin labels
# TODO: This isn't perfect, but good enough for now
label = shape.getLabel()
if label != None:
label_location = shape.getLocation()
label_rotation = shape.getRotation()
label_transform = "rotate(%s)" % label_rotation
t = et.SubElement(label_group, 'text',
x=str(((1,-1)[invert])*label_location.x),
y=str(config.cfg['invert-y']*label_location.y),
transform=label_transform)
t.text = label
if there_are_pours == True:
mask_group = et.SubElement(self._masks[pcb_layer], 'g',
transform=transform)
self._placeMask(mask_group,
shape,
'pad',
original=False,
mirror=invert)
# Pours
shapes = shapes_dict['pours'].get(pcb_layer) or []
try:
svg_layer = self._layers[pcb_layer]['conductor']['pours']['layer']
except:
svg_layer = None
if len(shapes) > 0 and svg_layer != None:
shape_group = et.SubElement(svg_layer, 'g',
mask='url(#mask-%s)' % pcb_layer)
transform = "translate(%s,%s)" % (location[0],
config.cfg['invert-y']*location[1])
group = et.SubElement(shape_group, 'g', transform=transform)
group.set('{'+config.cfg['ns']['pcbmode']+'}type', 'pours')
for shape in shapes:
placed_element = place.placeShape(shape, group, invert)
# Soldermask
shapes = shapes_dict['soldermask'].get(pcb_layer) or []
try:
svg_layer = self._layers[pcb_layer]['soldermask']['layer']
except:
svg_layer = None
if len(shapes) > 0 and svg_layer != None:
transform = "translate(%s,%s)" % (location[0],
config.cfg['invert-y']*location[1])
group = et.SubElement(svg_layer, 'g', transform=transform)
group.set('{'+config.cfg['ns']['pcbmode']+'}type', 'component-shapes')
for shape in shapes:
placed_element = place.placeShape(shape, group, invert)
# Solderpaste
shapes = shapes_dict['solderpaste'].get(pcb_layer) or []
try:
svg_layer = self._layers[pcb_layer]['solderpaste']['layer']
except:
svg_layer = None
if len(shapes) > 0 and svg_layer != None:
transform = "translate(%s,%s)" % (location[0],
config.cfg['invert-y']*location[1])
group = et.SubElement(svg_layer, 'g', transform=transform)
group.set('{'+config.cfg['ns']['pcbmode']+'}type', 'component-shapes')
for shape in shapes:
placed_element = place.placeShape(shape, group, invert)
# Silkscreen
shapes = shapes_dict['silkscreen'].get(pcb_layer) or []
try:
svg_layer = self._layers[pcb_layer]['silkscreen']['layer']
except:
svg_layer = None
if len(shapes) > 0 and svg_layer != None:
transform = "translate(%s,%s)" % (location[0],
config.cfg['invert-y']*location[1])
shape_group = et.SubElement(svg_layer, 'g', transform=transform)
shape_group.set('{'+config.cfg['ns']['pcbmode']+'}type', 'component-shapes')
for shape in shapes:
# Refdefs need to be in their own groups so that their
# location can later be extracted, hence this...
try:
is_refdef = getattr(shape, 'is_refdef')
except:
is_refdef = False
if is_refdef == True:
# Shapes don't need to have silkscreen
# reference designators
if component_type != 'shape':
refdef_group = et.SubElement(svg_layer, 'g', transform=transform)
refdef_group.set('{'+config.cfg['ns']['pcbmode']+'}type', 'refdef')
refdef_group.set('{'+config.cfg['ns']['pcbmode']+'}refdef', refdef)
placed_element = place.placeShape(shape, refdef_group, invert)
else:
placed_element = place.placeShape(shape, shape_group, invert)
# Assembly
shapes = shapes_dict['assembly'].get(pcb_layer) or []
try:
svg_layer = self._layers[pcb_layer]['assembly']['layer']
except:
svg_layer = None
if len(shapes) > 0 and svg_layer != None:
transform = "translate(%s,%s)" % (location[0],
config.cfg['invert-y']*location[1])
group = et.SubElement(svg_layer, 'g', transform=transform)
for shape in shapes:
placed_element = place.placeShape(shape, group, invert)
# Drills
shapes = shapes_dict['drills'].get(pcb_layer) or []
if len(shapes) > 0:
svg_layer = self._layers['drills']['layer']
transform = "translate(%s,%s)" % (location[0],
config.cfg['invert-y']*location[1])
group = et.SubElement(svg_layer, 'g', transform=transform)
group.set('{'+config.cfg['ns']['pcbmode']+'}type', 'component-shapes')
for shape in shapes:
placed_element = place.placeShape(shape, group, invert)
placed_element.set('{'+config.cfg['ns']['pcbmode']+'}diameter',
str(shape.getDiameter()))
# Place component origin marker
svg_layer = self._layers[placement_layer]['placement']['layer']
# Here pcb_layer may not exist for components that define
# shapes for internal layers but only surface layers are
# defined in the stackup
try:
group = et.SubElement(svg_layer, 'g', transform=transform)
except:
return
# Add PCBmodE information, useful for when extracting
group.set('{'+config.cfg['ns']['pcbmode']+'}type', component_type)
group.set('{'+config.cfg['ns']['pcbmode']+'}footprint', component.getFootprintName())
if (component_type == 'component') or (component_type == 'shape'):
group.set('{'+config.cfg['ns']['pcbmode']+'}refdef', refdef)
elif (component_type == 'via'):
group.set('{'+config.cfg['ns']['pcbmode']+'}id', refdef)
else:
pass
path = svg.placementMarkerPath()
transform = "translate(%s,%s)" % (location[0],
config.cfg['invert-y']*location[1])
if placement_layer == 'bottom':
rotation *= -1
marker_element = et.SubElement(group, 'path',
d=path,
transform="rotate(%s)" % rotation)
if (component_type == 'component'):
style = utils.dictToStyleText(config.stl['layout']['placement']['text'])
t = et.SubElement(group, 'text', x="0", y="-0.17", style=style)
ts = et.SubElement(t, 'tspan', x="0", dy="0.1")
ts.text = "%s" % (refdef)
ts = et.SubElement(t, 'tspan', x="0", dy="0.1")
ts.text = htmlpar.unescape("%s°" % (rotation))
ts = et.SubElement(t, 'tspan', x="0", dy="0.1")
ts.text = "[%.2f,%.2f]" % (location[0], location[1])
elif (component_type == 'shape'):
style = utils.dictToStyleText(config.stl['layout']['placement']['text'])
t = et.SubElement(group, 'text', x="0", y="-0.17", style=style)
ts = et.SubElement(t, 'tspan', x="0", dy="0.1")
ts.text = "%s" % (refdef)
ts = et.SubElement(t, 'tspan', x="0", dy="0.1")
ts.text = htmlpar.unescape("%s°" % (rotation))
ts = et.SubElement(t, 'tspan', x="0", dy="0.1")
ts.text = "[%.2f,%.2f]" % (location[0], location[1])
elif (component_type == 'via'):
style = utils.dictToStyleText(config.stl['layout']['placement']['text'])
t = et.SubElement(group, 'text', x="0", y="-0.11", style=style)
ts = et.SubElement(t, 'tspan', x="0", dy="0.1")
ts.text = htmlpar.unescape("%s°" % (rotation))
ts = et.SubElement(t, 'tspan', x="0", dy="0.1")
ts.text = "[%.2f,%.2f]" % (location[0], location[1])
else:
continue
def _placeRouting(self):
"""
"""
routing = config.rte
routes = routing.get('routes') or {}
# Path effects are used for meandering paths, for example
path_effects = routes.get('path_effects')
xpath_expr = "//g[@inkscape:label='%s']//g[@inkscape:label='%s']"
extra_attributes = ['inkscape:connector-curvature', 'inkscape:original-d', 'inkscape:path-effect']
for pcb_layer in config.stk['layer-names']:
# Are there pours in the layer? This makes a difference for whether to place
# masks
there_are_pours = utils.checkForPoursInLayer(pcb_layer)
# Define a group where masks are stored
mask_group = et.SubElement(self._masks[pcb_layer], 'g')
# Place defined routes on this SVG layer
sheet = self._layers[pcb_layer]['conductor']['routing']['layer']
for route_key in (routes.get(pcb_layer) or {}):
shape_dict = routes[pcb_layer][route_key]
shape = Shape(shape_dict)
style = Style(shape_dict, 'conductor')
shape.setStyle(style)
# Routes are a special case where they are used as-is
# counting on Inkscapes 'optimised' setting to modify
# the path such that placement is refleced in
# it. Therefor we use the original path, not the
# transformed one as usual
use_original_path = True
mirror_path = False
route_element = place.placeShape(shape,
sheet,
mirror_path,
use_original_path)
route_element.set('style', shape.getStyleString())
# Set the key as pcbmode:id of the route. This is used
# when extracting routing to offset the location of a
# modified route
route_element.set('{'+config.cfg['ns']['pcbmode']+'}%s' % ('id'),
route_key)
# Add a custom buffer definition if it exists
custom_buffer = shape_dict.get('buffer-to-pour')
if custom_buffer != None:
route_element.set('{'+config.cfg['namespace']['pcbmode']+'}%s' % "buffer-to-pour", str(custom_buffer))
# TODO: can this be done more elegantly, and "general purpose"?
for extra_attrib in extra_attributes:
ea = shape_dict.get(extra_attrib)
if ea is not None:
route_element.set('{'+config.cfg['namespace']['inkscape']+'}%s' % (extra_attrib[extra_attrib.index(':')+1:], ea))
# TODO: this needs to eventually go away or be done properly
pcbmode_params = shape_dict.get('pcbmode')
if pcbmode_params is not None:
route_element.set('pcbmode', pcbmode_params)
if ((there_are_pours == True) and (custom_buffer != "0")):
self._placeMask(self._masks[pcb_layer],
shape,
'route',
use_original_path)
# # Due to the limitation of the Gerber format, and the method chosen
# # for applying masks onto pours, it is not possible to have copper
# # pour material inside of paths that have more than a single segment.
# # In order to make the apperance in the SVG and Gerbers consistent,
# # each path segment is added with a 'fill'. In the future, when the
# # *actual* shape is calculated, it may be possible to avoid this
# # hack. On the other hand, one can argue that having pours inside of
# # shapes doesn't make sense anyway, because it alters its apperance,
# # and such shapes are stylistic anyway. OK, back to code now...
# gerber_lp = shape.getGerberLP()
# if gerber_lp is not None:
# if len(gerber_lp) > 1:
# path_segments = path.split('m')
# i = 0
# for path_segment in path_segments[1:]:
# # only mask dark bits
# if gerber_lp[i] == 'd':
# mask_element = et.SubElement(mask_group, 'path',
# type="mask_shape",
# style="fill:#000;stroke:none;",
# d='m '+path_segment)
#
# i += 1
def _placeMask(self, svg_layer, shape, kind, original=False, mirror=False):
"""
Places a mask of a shape of type 'Shape' on SVG layer 'svg_layer'.
'kind' : type of shape; used to fetch the correct distance to pour
'original': use the original path, not the transformed one
"""
# Get the desired distance based on 'kind' 'outline', 'drill',
# 'pad', 'route' unless 'pour_buffer' is specified
pour_buffer = shape.getPourBuffer()
if pour_buffer == None:
try:
pour_buffer = self._module_dict['distances']['from-pour-to'][kind]
except:
pour_buffer = config.brd['distances']['from-pour-to'][kind]
style_template = "fill:%s;stroke:#000;stroke-linejoin:round;stroke-width:%s;stroke-linecap:round;"
style = shape.getStyle()
if (pour_buffer > 0):
mask_element = place.placeShape(shape, svg_layer, mirror, original)
if style.getStyleType() == 'fill':
mask_element.set('style', style_template % ('#000', pour_buffer*2))
else:
# This width provides a distance of 'pour_buffer' from the
# edge of the trace to a pour
width = style.getStrokeWidth() + pour_buffer*2
mask_element.set('style', style_template % ('none', width))
path = shape.getOriginalPath().lower()
segments = path.count('m')
mask_element.set('{'+config.cfg['ns']['pcbmode']+'}gerber-lp', 'c'*segments)
def _placeOutline(self):
"""
"""
# Place shape
shape_group = et.SubElement(self._layers['outline']['layer'], 'g')
shape_group.set('{'+config.cfg['ns']['pcbmode']+'}type', 'module-shapes')
place.placeShape(self._outline, shape_group)
# Place a mask for the board's outline. This creates a buffer between
# the board's edge and pours
try:
pour_buffer = self._module['distances']['from-pour-to']['outline']
except:
pour_buffer = config.brd['distances']['from-pour-to']['outline']
for pcb_layer in config.stk['layer-names']:
if utils.checkForPoursInLayer(pcb_layer) is True:
mask_element = place.placeShape(self._outline, self._masks[pcb_layer])
# Override style so that we get the desired effect
# We stroke the outline with twice the size of the buffer, so
# we get the actual distance between the outline and board
style = "fill:none;stroke:#000;stroke-linejoin:round;stroke-width:%s;" % str(pour_buffer*2)
mask_element.set('style', style)
# Also override mask's gerber-lp and set to all clear
path = self._outline.getOriginalPath().lower()
segments = path.count('m')
mask_element.set('{'+config.cfg['ns']['pcbmode']+'}gerber-lp', 'c'*segments)
def _placeDocs(self):
"""
Places documentation blocks on the documentation layer
"""
try:
docs_dict = config.brd['documentation']
except:
return
for key in docs_dict:
location = utils.toPoint(docs_dict[key]['location'])
docs_dict[key]['location'] = [0, 0]
shape_group = et.SubElement(self._layers['documentation']['layer'], 'g')
shape_group.set('{'+config.cfg['ns']['pcbmode']+'}type', 'module-shapes')
shape_group.set('{'+config.cfg['ns']['pcbmode']+'}doc-key', key)
shape_group.set('transform', "translate(%s,%s)" % (location.x, config.cfg['invert-y']*location.y))
location = docs_dict[key]['location']
docs_dict[key]['location'] = [0, 0]
shape = Shape(docs_dict[key])
style = Style(docs_dict[key], 'documentation')
shape.setStyle(style)
element = place.placeShape(shape, shape_group)
def _placeLayerIndex(self):
"""
Adds a layer index
"""
text_dict = config.stl['layout']['layer-index']['text']
text_dict['type'] = 'text'
# Set the height (and width) of the rectangle (square) to the
# size of the text
rect_width = utils.parseDimension(text_dict['font-size'])[0]
rect_height = rect_width
rect_gap = 0.25
# Get location, or generate one
try:
location = config.brd['layer-index']['location']
except:
# If not location is specified, put the drill index at the
# top right of the board. The 'gap' defines the extra
# spcae between the top of the largest drill and the
# board's edge
gap = 2
location = [self._width/2+gap, self._height/2-rect_height/2]
location = utils.toPoint(location)
rect_dict = {}
rect_dict['type'] = 'rect'
rect_dict['style'] = 'fill'
rect_dict['width'] = rect_width
rect_dict['height'] = rect_height
# Create group for placing index
for pcb_layer in config.stk['layer-names']:
if pcb_layer in config.stk['surface-layer-names']:
sheets = ['conductor', 'soldermask', 'silkscreen', 'assembly', 'solderpaste']
else:
sheets = ['conductor']
for sheet in sheets:
layer = self._layers[pcb_layer][sheet]['layer']
transform = "translate(%s,%s)" % (location.x, config.cfg['invert-y']*location.y)
group = et.SubElement(layer, 'g',
transform=transform)
group.set('{'+config.cfg['ns']['pcbmode']+'}type', 'layer-index')
rect_shape = Shape(rect_dict)
style = Style(rect_dict, sheet)
rect_shape.setStyle(style)
place.placeShape(rect_shape, group)
text_dict['value'] = "%s %s" % (pcb_layer, sheet)
text_shape = Shape(text_dict)
text_width = text_shape.getWidth()
style = Style(text_dict, sheet)
text_shape.setStyle(style)
element = place.placeShape(text_shape, group)
element.set("transform", "translate(%s,%s)" % (rect_width/2+rect_gap+text_width/2, 0))
location.y += config.cfg['invert-y']*(rect_height+rect_gap)
location.y += config.cfg['invert-y']*(rect_height+rect_gap*1.5)
def _placeDrillIndex(self):
"""
Adds a drill index
"""
# Get the drills sheet / SVG layer
drill_layer = self._layers['drills']['layer']
ns = {'pcbmode':config.cfg['ns']['pcbmode'],
'svg':config.cfg['ns']['svg']}
drills = drill_layer.findall(".//*[@pcbmode:diameter]", namespaces=ns)
drills_dict = {}
longest_text = 0
largest_drill = 0
drill_count = 0
for drill in drills:
diameter = drill.get('{'+config.cfg['ns']['pcbmode']+'}diameter')
diameter = round(float(diameter), 2)
if diameter not in drills_dict:
drills_dict[diameter] = 1
else:
drills_dict[diameter] += 1
if diameter > largest_drill:
largest_drill = diameter
drill_count += 1
if len(str(diameter)) > longest_text:
longest_text = len(str(diameter))
# Get location, or generate one
try:
location = config.brd['drill-index']['location']
except:
# If not location is specified, put the drill index at the
# bottom left of the board. The 'gap' defines the extra
# spcae between the top of the largest drill and the
# board's edge
gap = 2
location = [-self._width/2, -(self._height/2+gap)]
location = utils.toPoint(location)
# Create group for placing index
transform = "translate(%s,%s)" % (location.x, config.cfg['invert-y']*location.y)
group = et.SubElement(drill_layer, 'g',
transform=transform)
group.set('{'+config.cfg['ns']['pcbmode']+'}type', 'drill-index')
text_style_dict = config.stl['layout']['drill-index'].get('text')
text_style = utils.dictToStyleText(text_style_dict)
count_style_dict = config.stl['layout']['drill-index'].get('count-text')
count_style = utils.dictToStyleText(count_style_dict)
count_style_dict['font-size'] /= 2
drill_size_style = utils.dictToStyleText(count_style_dict)
if drill_count == 0:
text = 'No drills'
elif drill_count == 1:
text = '1 drill: '
else:
text = '%s drills: ' % drill_count
t = et.SubElement(group, 'text',
x=str(0),
y=str(0),
style=text_style)
t.text = text
# "new line"
location.y = -(largest_drill/2 + 1.5)
# TODO: this hack'ish thing for aligning the text isn't going
# to work when the font is changed in the stylesheet
if float(longest_text*0.5) > largest_drill:
location.x = longest_text*0.3
else:
location.x = largest_drill/2
gap = 2
for diameter in reversed(sorted(drills_dict)):
path = svg.drillPath(diameter)
transform = "translate(%s,%s)" % (location.x, config.cfg['invert-y']*location.y)
element = et.SubElement(group, 'path',
d=path,
transform=transform)
element.set("fill-rule", "evenodd")
t = et.SubElement(group, 'text',
x=str(location.x),
y=str(-location.y),
dy="%s" % (config.cfg['invert-y']*0.25),
style=count_style)
t.text = str(drills_dict[diameter])
t = et.SubElement(group, 'text',
x=str(location.x),
y=str(-location.y),
dy="%s" % (config.cfg['invert-y']*-0.5),
style=drill_size_style)
t.text = "%s mm" % diameter
location.x += max(diameter, 2.5)
def _getModuleElement(self):
"""
Returns the skelaton of an Inkscape SVG element
"""
module = et.Element('svg',
width="%s%s" % (self._width, config.brd['config']['units']),
height="%s%s" % (self._height, config.brd['config']['units']),
viewBox='%s, %s, %s, %s' % (0, 0, self._width, self._height),
version='1.1',
nsmap=config.cfg['ns'],
fill='black')
# Set Inkscape options tag
inkscape_opt = et.SubElement(module,
'{'+config.cfg['ns']['sodipodi']+'}%s' % 'namedview',
id="namedview-pcbmode",
showgrid="true")
# Add units definition (only 'mm' is supported)
inkscape_opt.set('{'+config.cfg['ns']['inkscape']+'}%s' % 'document-units',
config.brd['config']['units'])
# Open window maximised
inkscape_opt.set('{'+config.cfg['ns']['inkscape']+'}%s' % 'window-maximized',
'1')
# Define a grid
et.SubElement(inkscape_opt, '{'+config.cfg['ns']['inkscape']+'}%s' % 'grid',
type="xygrid",
id="pcbmode-grid",
visible="true",
enabled="false",
units="mm",
emspacing="5",
spacingx="0.1mm",
spacingy="0.1mm")
# Add a welcome message as a comment in the SVG
welcome_message = """
Hello! This SVG file was generated using PCBmodE version %s on %s GMT.
PCBmodE is open source software
http://pcbmode.com
and is maintained by Boldport
http://boldport.com
""" % (config.cfg['version'], datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
module.append(et.Comment(welcome_message))
return module
def _getComponents(self, components_dict):
"""
Create the components for this module.
Return a list of items of class 'component'
"""
# Store components here
components = []
# Get shapes for each component definition
for refdef in components_dict:
component_dict = components_dict[refdef]
# Show or hide the component.
# This will still account the component for the BoM
show = component_dict.get('show', True)
# Place or do not place the component
# Also ignored for BoM
place = component_dict.get('place', True)
if (show == True) and (place == True):
component = Component(refdef, component_dict)
components.append(component)
return components
def _getOutline(self):
"""
Process the module's outline shape. Modules don't have to have an outline
defined, so in that case return None.
"""
shape = None
outline_dict = self._module_dict.get('outline')
if outline_dict != None:
shape_dict = outline_dict.get('shape')
if shape_dict != None:
shape = Shape(shape_dict)
style = Style(shape_dict, 'outline')
shape.setStyle(style)
return shape
|
|
from textwrap import dedent
import jinja2
import pytest
import salt.serializers.configparser as configparser
import salt.serializers.json as json
import salt.serializers.msgpack as msgpack
import salt.serializers.plist as plist
import salt.serializers.python as python
import salt.serializers.toml as toml
import salt.serializers.yaml as yaml
import salt.serializers.yamlex as yamlex
import yaml as _yaml
from salt.serializers import SerializationError
from salt.serializers.yaml import EncryptedString
from salt.utils.odict import OrderedDict
from tests.support.helpers import ON_PY35
SKIP_MESSAGE = "{} is unavailable, have prerequisites been met?"
@pytest.mark.skipif(json.available is False, reason=SKIP_MESSAGE.format("json"))
def test_serialize_json():
data = {"foo": "bar"}
serialized = json.serialize(data)
assert serialized == '{"foo": "bar"}', serialized
deserialized = json.deserialize(serialized)
assert deserialized == data, deserialized
@pytest.mark.skipif(yaml.available is False, reason=SKIP_MESSAGE.format("yaml"))
def test_serialize_yaml():
data = {"foo": "bar", "encrypted_data": EncryptedString("foo")}
# The C dumper produces unquoted strings when serializing an
# EncryptedString, while the non-C dumper produces quoted strings.
expected = (
"{encrypted_data: !encrypted foo, foo: bar}"
if hasattr(_yaml, "CSafeDumper")
else "{encrypted_data: !encrypted 'foo', foo: bar}"
)
serialized = yaml.serialize(data)
assert serialized == expected, serialized
deserialized = yaml.deserialize(serialized)
assert deserialized == data, deserialized
@pytest.mark.skipif(yaml.available is False, reason=SKIP_MESSAGE.format("sls"))
def test_serialize_sls():
data = {"foo": "bar"}
serialized = yamlex.serialize(data)
assert serialized == "{foo: bar}", serialized
serialized = yamlex.serialize(data, default_flow_style=False)
assert serialized == "foo: bar", serialized
deserialized = yamlex.deserialize(serialized)
assert deserialized == data, deserialized
serialized = yaml.serialize(data)
assert serialized == "{foo: bar}", serialized
deserialized = yaml.deserialize(serialized)
assert deserialized == data, deserialized
serialized = yaml.serialize(data, default_flow_style=False)
assert serialized == "foo: bar", serialized
deserialized = yaml.deserialize(serialized)
assert deserialized == data, deserialized
@pytest.mark.skipif(yamlex.available is False, reason=SKIP_MESSAGE.format("sls"))
def test_serialize_complex_sls():
data = OrderedDict([("foo", 1), ("bar", 2), ("baz", True)])
serialized = yamlex.serialize(data)
assert serialized == "{foo: 1, bar: 2, baz: true}", serialized
deserialized = yamlex.deserialize(serialized)
assert deserialized == data, deserialized
serialized = yaml.serialize(data)
assert serialized == "{bar: 2, baz: true, foo: 1}", serialized
deserialized = yaml.deserialize(serialized)
assert deserialized == data, deserialized
@pytest.mark.skipif(yaml.available is False, reason=SKIP_MESSAGE.format("yaml"))
@pytest.mark.skipif(yamlex.available is False, reason=SKIP_MESSAGE.format("sls"))
def test_compare_sls_vs_yaml():
src = "{foo: 1, bar: 2, baz: {qux: true}}"
sls_data = yamlex.deserialize(src)
yml_data = yaml.deserialize(src)
# ensure that sls & yaml have the same base
assert isinstance(sls_data, dict)
assert isinstance(yml_data, dict)
assert sls_data == yml_data
# ensure that sls is ordered, while yaml not
assert isinstance(sls_data, OrderedDict)
assert not isinstance(yml_data, OrderedDict)
@pytest.mark.skipif(yaml.available is False, reason=SKIP_MESSAGE.format("yaml"))
@pytest.mark.skipif(yamlex.available is False, reason=SKIP_MESSAGE.format("sls"))
@pytest.mark.skipif(ON_PY35 is True, reason="This test is unreliable under Py3.5")
def test_compare_sls_vs_yaml_with_jinja():
tpl = "{{ data }}"
env = jinja2.Environment()
src = "{foo: 1, bar: 2, baz: {qux: true}}"
sls_src = env.from_string(tpl).render(data=yamlex.deserialize(src))
yml_src = env.from_string(tpl).render(data=yaml.deserialize(src))
sls_data = yamlex.deserialize(sls_src)
yml_data = yaml.deserialize(yml_src)
# ensure that sls & yaml have the same base
assert isinstance(sls_data, dict)
assert isinstance(yml_data, dict)
# The below has been commented out because something the loader test
# is modifying the yaml renderer to render things to unicode. Without
# running the loader test, the below passes. Even reloading the module
# from disk does not reset its internal state (per the Python docs).
##
# assert sls_data == yml_data
# ensure that sls is ordered, while yaml not
assert isinstance(sls_data, OrderedDict)
assert not isinstance(yml_data, OrderedDict)
# prove that yaml does not handle well with OrderedDict
# while sls is jinja friendly.
obj = OrderedDict([("foo", 1), ("bar", 2), ("baz", {"qux": True})])
sls_obj = yamlex.deserialize(yamlex.serialize(obj))
try:
yml_obj = yaml.deserialize(yaml.serialize(obj))
except SerializationError:
# BLAAM! yaml was unable to serialize OrderedDict,
# but it's not the purpose of the current test.
yml_obj = obj.copy()
sls_src = env.from_string(tpl).render(data=sls_obj)
yml_src = env.from_string(tpl).render(data=yml_obj)
final_obj = yaml.deserialize(sls_src)
assert obj == final_obj
# BLAAM! yml_src is not valid !
final_obj = OrderedDict(yaml.deserialize(yml_src))
assert obj != final_obj, "Objects matched! {} == {}".format(obj, final_obj)
@pytest.mark.skipif(yamlex.available is False, reason=SKIP_MESSAGE.format("sls"))
def test_sls_aggregate():
src = dedent(
"""
a: lol
foo: !aggregate hello
bar: !aggregate [1, 2, 3]
baz: !aggregate
a: 42
b: 666
c: the beast
"""
).strip()
# test that !aggregate is correctly parsed
sls_obj = yamlex.deserialize(src)
assert sls_obj == {
"a": "lol",
"foo": ["hello"],
"bar": [1, 2, 3],
"baz": {"a": 42, "b": 666, "c": "the beast"},
}, sls_obj
assert (
dedent(
"""
a: lol
foo: [hello]
bar: [1, 2, 3]
baz: {a: 42, b: 666, c: the beast}
"""
).strip()
== yamlex.serialize(sls_obj)
), sls_obj
# test that !aggregate aggregates scalars
src = dedent(
"""
placeholder: !aggregate foo
placeholder: !aggregate bar
placeholder: !aggregate baz
"""
).strip()
sls_obj = yamlex.deserialize(src)
assert sls_obj == {"placeholder": ["foo", "bar", "baz"]}, sls_obj
# test that !aggregate aggregates lists
src = dedent(
"""
placeholder: !aggregate foo
placeholder: !aggregate [bar, baz]
placeholder: !aggregate []
placeholder: !aggregate ~
"""
).strip()
sls_obj = yamlex.deserialize(src)
assert sls_obj == {"placeholder": ["foo", "bar", "baz"]}, sls_obj
# test that !aggregate aggregates dicts
src = dedent(
"""
placeholder: !aggregate {foo: 42}
placeholder: !aggregate {bar: null}
placeholder: !aggregate {baz: inga}
"""
).strip()
sls_obj = yamlex.deserialize(src)
assert sls_obj == {"placeholder": {"foo": 42, "bar": None, "baz": "inga"}}, sls_obj
# test that !aggregate aggregates deep dicts
src = dedent(
"""
placeholder: {foo: !aggregate {foo: 42}}
placeholder: {foo: !aggregate {bar: null}}
placeholder: {foo: !aggregate {baz: inga}}
"""
).strip()
sls_obj = yamlex.deserialize(src)
assert sls_obj == {
"placeholder": {"foo": {"foo": 42, "bar": None, "baz": "inga"}}
}, sls_obj
# test that {foo: !aggregate bar} and {!aggregate foo: bar}
# are roughly equivalent.
src = dedent(
"""
placeholder: {!aggregate foo: {foo: 42}}
placeholder: {!aggregate foo: {bar: null}}
placeholder: {!aggregate foo: {baz: inga}}
"""
).strip()
sls_obj = yamlex.deserialize(src)
assert sls_obj == {
"placeholder": {"foo": {"foo": 42, "bar": None, "baz": "inga"}}
}, sls_obj
@pytest.mark.skipif(yamlex.available is False, reason=SKIP_MESSAGE.format("sls"))
def test_sls_reset():
src = dedent(
"""
placeholder: {!aggregate foo: {foo: 42}}
placeholder: {!aggregate foo: {bar: null}}
!reset placeholder: {!aggregate foo: {baz: inga}}
"""
).strip()
sls_obj = yamlex.deserialize(src)
assert sls_obj == {"placeholder": {"foo": {"baz": "inga"}}}, sls_obj
@pytest.mark.skipif(yamlex.available is False, reason=SKIP_MESSAGE.format("sls"))
def test_sls_repr():
"""
Ensure that obj __repr__ and __str__ methods are yaml friendly.
"""
def convert(obj):
return yamlex.deserialize(yamlex.serialize(obj))
sls_obj = convert(OrderedDict([("foo", "bar"), ("baz", "qux")]))
# ensure that repr and str are yaml friendly
assert sls_obj.__str__() == "{foo: bar, baz: qux}"
assert sls_obj.__repr__() == "{foo: bar, baz: qux}"
# ensure that repr and str are already quoted
assert sls_obj["foo"].__str__() == '"bar"'
assert sls_obj["foo"].__repr__() == '"bar"'
@pytest.mark.skipif(yamlex.available is False, reason=SKIP_MESSAGE.format("sls"))
def test_sls_micking_file_merging():
def convert(obj):
return yamlex.deserialize(yamlex.serialize(obj))
# let say that we have 2 pillar files
src1 = dedent(
"""
a: first
b: !aggregate first
c:
subkey1: first
subkey2: !aggregate first
"""
).strip()
src2 = dedent(
"""
a: second
b: !aggregate second
c:
subkey2: !aggregate second
subkey3: second
"""
).strip()
sls_obj1 = yamlex.deserialize(src1)
sls_obj2 = yamlex.deserialize(src2)
sls_obj3 = yamlex.merge_recursive(sls_obj1, sls_obj2)
assert sls_obj3 == {
"a": "second",
"b": ["first", "second"],
"c": {"subkey2": ["first", "second"], "subkey3": "second"},
}, sls_obj3
@pytest.mark.skipif(msgpack.available is False, reason=SKIP_MESSAGE.format("msgpack"))
def test_msgpack():
data = OrderedDict([("foo", 1), ("bar", 2), ("baz", True)])
serialized = msgpack.serialize(data)
deserialized = msgpack.deserialize(serialized)
assert deserialized == data, deserialized
@pytest.mark.skipif(python.available is False, reason=SKIP_MESSAGE.format("python"))
def test_serialize_python():
data = {"foo": "bar"}
serialized = python.serialize(data)
expected = repr({"foo": "bar"})
assert serialized == expected, serialized
@pytest.mark.skipif(
configparser.available is False, reason=SKIP_MESSAGE.format("configparser")
)
def test_configparser():
data = {"foo": {"bar": "baz"}}
# configparser appends empty lines
serialized = configparser.serialize(data).strip()
assert serialized == "[foo]\nbar = baz", serialized
deserialized = configparser.deserialize(serialized)
assert deserialized == data, deserialized
@pytest.mark.skipif(toml.HAS_TOML is False, reason=SKIP_MESSAGE.format("toml"))
def test_serialize_toml():
data = {"foo": "bar"}
serialized = toml.serialize(data)
assert serialized == 'foo = "bar"\n', serialized
deserialized = toml.deserialize(serialized)
assert deserialized == data, deserialized
@pytest.mark.skipif(plist.available is False, reason=SKIP_MESSAGE.format("plist"))
def test_serialize_plist():
data = {"foo": "bar"}
serialized = plist.serialize(data)
expected = (
b'<?xml version="1.0" encoding="UTF-8"?>\n'
b'<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"'
b' "http://www.apple.com/DTDs/PropertyList-1.0.dtd">\n'
b'<plist version="1.0">\n'
b"<dict>\n"
b"\t<key>foo</key>\n"
b"\t<string>bar</string>\n"
b"</dict>\n"
b"</plist>\n"
)
assert serialized == expected, serialized
deserialized = plist.deserialize(serialized)
assert deserialized == data, deserialized
@pytest.mark.skipif(plist.available is False, reason=SKIP_MESSAGE.format("plist"))
def test_serialize_binary_plist():
data = {"foo": "bar"}
serialized = plist.serialize(data, fmt="FMT_BINARY")
deserialized = plist.deserialize(serialized)
assert deserialized == data, deserialized
|
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Triggers define what causes a Jenkins job to start building.
**Component**: triggers
:Macro: trigger
:Entry Point: jenkins_jobs.triggers
Example::
job:
name: test_job
triggers:
- timed: '@daily'
"""
import six
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.modules import hudson_model
from jenkins_jobs.errors import (InvalidAttributeError,
JenkinsJobsException)
import logging
import re
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
logger = logging.getLogger(str(__name__))
def gerrit_handle_legacy_configuration(data):
hyphenizer = re.compile("[A-Z]")
def hyphenize(attr):
"""Convert strings like triggerOn to trigger-on.
"""
return hyphenizer.sub(lambda x: "-%s" % x.group(0).lower(),
attr)
def convert_dict(d, old_keys):
for old_key in old_keys:
if old_key in d:
new_key = hyphenize(old_key)
logger.warn("'%s' is deprecated and will be removed after "
"1.0.0, please use '%s' instead", old_key, new_key)
d[new_key] = d[old_key]
del d[old_key]
convert_dict(data, [
'triggerOnPatchsetUploadedEvent',
'triggerOnChangeAbandonedEvent',
'triggerOnChangeMergedEvent',
'triggerOnChangeRestoredEvent',
'triggerOnCommentAddedEvent',
'triggerOnDraftPublishedEvent',
'triggerOnRefUpdatedEvent',
'triggerApprovalCategory',
'triggerApprovalValue',
'overrideVotes',
'gerritBuildSuccessfulVerifiedValue',
'gerritBuildFailedVerifiedValue',
'failureMessage',
'skipVote',
])
for project in data['projects']:
convert_dict(project, [
'projectCompareType',
'projectPattern',
'branchCompareType',
'branchPattern',
])
old_format_events = OrderedDict(
(key, should_register) for key, should_register in six.iteritems(data)
if key.startswith('trigger-on-'))
trigger_on = data.setdefault('trigger-on', [])
if old_format_events:
logger.warn("The events: %s; which you used is/are deprecated. "
"Please use 'trigger-on' instead.",
', '.join(old_format_events))
if old_format_events and trigger_on:
raise JenkinsJobsException(
'Both, the new format (trigger-on) and old format (trigger-on-*) '
'gerrit events format found. Please use either the new or the old '
'format of trigger events definition.')
trigger_on.extend(event_name[len('trigger-on-'):]
for event_name, should_register
in six.iteritems(old_format_events) if should_register)
for idx, event in enumerate(trigger_on):
if event == 'comment-added-event':
trigger_on[idx] = events = OrderedDict()
events['comment-added-event'] = OrderedDict((
('approval-category', data['trigger-approval-category']),
('approval-value', data['trigger-approval-value'])
))
def build_gerrit_triggers(xml_parent, data):
available_simple_triggers = {
'change-abandoned-event': 'PluginChangeAbandonedEvent',
'change-merged-event': 'PluginChangeMergedEvent',
'change-restored-event': 'PluginChangeRestoredEvent',
'draft-published-event': 'PluginDraftPublishedEvent',
'patchset-uploaded-event': 'PluginPatchsetCreatedEvent',
'patchset-created-event': 'PluginPatchsetCreatedEvent',
'ref-updated-event': 'PluginRefUpdatedEvent',
}
tag_namespace = 'com.sonyericsson.hudson.plugins.gerrit.trigger.' \
'hudsontrigger.events'
trigger_on_events = XML.SubElement(xml_parent, 'triggerOnEvents')
for event in data.get('trigger-on', []):
if isinstance(event, six.string_types):
tag_name = available_simple_triggers.get(event)
if event == 'patchset-uploaded-event':
logger.warn("'%s' is deprecated. Use 'patchset-created-event' "
"format instead.", event)
if not tag_name:
known = ', '.join(available_simple_triggers.keys()
+ ['comment-added-event',
'comment-added-contains-event'])
msg = ("The event '%s' under 'trigger-on' is not one of the "
"known: %s.") % (event, known)
raise JenkinsJobsException(msg)
XML.SubElement(trigger_on_events,
'%s.%s' % (tag_namespace, tag_name))
else:
if 'patchset-created-event' in event.keys():
pce = event['patchset-created-event']
pc = XML.SubElement(
trigger_on_events,
'%s.%s' % (tag_namespace, 'PluginPatchsetCreatedEvent'))
XML.SubElement(pc, 'excludeDrafts').text = str(
pce.get('exclude-drafts', False)).lower()
XML.SubElement(pc, 'excludeTrivialRebase').text = str(
pce.get('exclude-trivial-rebase', False)).lower()
XML.SubElement(pc, 'excludeNoCodeChange').text = str(
pce.get('exclude-no-code-change', False)).lower()
if 'comment-added-event' in event.keys():
comment_added_event = event['comment-added-event']
cadded = XML.SubElement(
trigger_on_events,
'%s.%s' % (tag_namespace, 'PluginCommentAddedEvent'))
XML.SubElement(cadded, 'verdictCategory').text = \
comment_added_event['approval-category']
XML.SubElement(
cadded,
'commentAddedTriggerApprovalValue').text = \
str(comment_added_event['approval-value'])
if 'comment-added-contains-event' in event.keys():
comment_added_event = event['comment-added-contains-event']
caddedc = XML.SubElement(
trigger_on_events,
'%s.%s' % (tag_namespace,
'PluginCommentAddedContainsEvent'))
XML.SubElement(caddedc, 'commentAddedCommentContains').text = \
comment_added_event['comment-contains-value']
def build_gerrit_skip_votes(xml_parent, data):
outcomes = [('successful', 'onSuccessful'),
('failed', 'onFailed'),
('unstable', 'onUnstable'),
('notbuilt', 'onNotBuilt')]
skip_vote_node = XML.SubElement(xml_parent, 'skipVote')
skip_vote = data.get('skip-vote', {})
for result_kind, tag_name in outcomes:
if skip_vote.get(result_kind, False):
XML.SubElement(skip_vote_node, tag_name).text = 'true'
else:
XML.SubElement(skip_vote_node, tag_name).text = 'false'
def gerrit(parser, xml_parent, data):
"""yaml: gerrit
Trigger on a Gerrit event.
Requires the Jenkins :jenkins-wiki:`Gerrit Trigger Plugin <Gerrit+Trigger>`
version >= 2.6.0.
:arg list trigger-on: Events to react on. Please use either the new
**trigger-on**, or the old **trigger-on-*** events definitions. You
cannot use both at once.
.. _trigger_on:
:Trigger on:
* **patchset-created-event** (`dict`) -- Trigger upon patchset
creation.
:Patchset created:
* **exclude-drafts** (`bool`) -- exclude drafts (Default: False)
* **exclude-trivial-rebase** (`bool`) -- exclude trivial rebase
(Default: False)
* **exclude-no-code-change** (`bool`) -- exclude no code change
(Default: False)
Exclude drafts|trivial-rebase|no-code-change needs
Gerrit Trigger v2.12.0
* **patchset-uploaded-event** -- Trigger upon patchset creation
(this is a alias for `patchset-created-event`).
.. deprecated:: 1.1.0 Please use :ref:`trigger-on <trigger_on>`.
* **change-abandoned-event** -- Trigger on patchset abandoned.
Requires Gerrit Trigger Plugin version >= 2.8.0.
* **change-merged-event** -- Trigger on change merged
* **change-restored-event** -- Trigger on change restored. Requires
Gerrit Trigger Plugin version >= 2.8.0
* **draft-published-event** -- Trigger on draft published event.
* **ref-updated-event** -- Trigger on ref-updated.
* **comment-added-event** (`dict`) -- Trigger on comment added.
:Comment added:
* **approval-category** (`str`) -- Approval (verdict) category
(for example 'APRV', 'CRVW', 'VRIF' -- see `Gerrit access
control
<http://gerrit.googlecode.com/svn/documentation/2.1/
access-control.html#categories>`_
* **approval-value** -- Approval value for the comment added.
* **comment-added-contains-event** (`dict`) -- Trigger on comment
added contains Regular Expression.
:Comment added contains:
* **comment-contains-value** (`str`) -- Comment contains
Regular Expression value.
:arg bool trigger-on-patchset-uploaded-event: Trigger on patchset upload.
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-change-abandoned-event: Trigger on change abandoned.
Requires Gerrit Trigger Plugin version >= 2.8.0
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-change-merged-event: Trigger on change merged
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-change-restored-event: Trigger on change restored.
Requires Gerrit Trigger Plugin version >= 2.8.0
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-comment-added-event: Trigger on comment added
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-draft-published-event: Trigger on draft published
event
.. deprecated:: 1.1.0 Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-ref-updated-event: Trigger on ref-updated
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg str trigger-approval-category: Approval category for comment added
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg int trigger-approval-value: Approval value for comment added
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool override-votes: Override default vote values
:arg int gerrit-build-started-verified-value: Started ''Verified'' value
:arg int gerrit-build-successful-verified-value: Successful ''Verified''
value
:arg int gerrit-build-failed-verified-value: Failed ''Verified'' value
:arg int gerrit-build-unstable-verified-value: Unstable ''Verified'' value
:arg int gerrit-build-notbuilt-verified-value: Not built ''Verified''
value
:arg int gerrit-build-started-codereview-value: Started ''CodeReview''
value
:arg int gerrit-build-successful-codereview-value: Successful
''CodeReview'' value
:arg int gerrit-build-failed-codereview-value: Failed ''CodeReview'' value
:arg int gerrit-build-unstable-codereview-value: Unstable ''CodeReview''
value
:arg int gerrit-build-notbuilt-codereview-value: Not built ''CodeReview''
value
:arg str failure-message: Message to leave on failure (default '')
:arg str successful-message: Message to leave on success (default '')
:arg str unstable-message: Message to leave when unstable (default '')
:arg str notbuilt-message: Message to leave when not built (default '')
:arg str failure-message-file: Sets the filename within the workspace from
which to retrieve the unsuccessful review message. (optional)
:arg list projects: list of projects to match
:Project: * **project-compare-type** (`str`) -- ''PLAIN'', ''ANT'' or
''REG_EXP''
* **project-pattern** (`str`) -- Project name pattern to match
* **branch-compare-type** (`str`) -- ''PLAIN'', ''ANT'' or
''REG_EXP'' (not used if `branches` list is specified)
.. deprecated:: 1.1.0 Please use :ref:`branches <branches>`.
* **branch-pattern** (`str`) -- Branch name pattern to match
(not used if `branches` list is specified)
.. deprecated:: 1.1.0 Please use :ref:`branches <branches>`.
.. _branches:
* **branches** (`list`) -- List of branches to match
(optional)
:Branch: * **branch-compare-type** (`str`) -- ''PLAIN'',
''ANT'' or ''REG_EXP'' (optional) (default
''PLAIN'')
* **branch-pattern** (`str`) -- Branch name pattern
to match
* **file-paths** (`list`) -- List of file paths to match
(optional)
:File Path: * **compare-type** (`str`) -- ''PLAIN'', ''ANT''
or ''REG_EXP'' (optional) (default ''PLAIN'')
* **pattern** (`str`) -- File path pattern to
match
* **topics** (`list`) -- List of topics to match
(optional)
:File Path: * **compare-type** (`str`) -- ''PLAIN'', ''ANT''
or ''REG_EXP'' (optional) (default ''PLAIN'')
* **pattern** (`str`) -- Topic name pattern to
match
:arg dict skip-vote: map of build outcomes for which Jenkins must skip
vote. Requires Gerrit Trigger Plugin version >= 2.7.0
:Outcome: * **successful** (`bool`)
* **failed** (`bool`)
* **unstable** (`bool`)
* **notbuilt** (`bool`)
:arg bool silent: When silent mode is on there will be no communication
back to Gerrit, i.e. no build started/failed/successful approve
messages etc. If other non-silent jobs are triggered by the same
Gerrit event as this job, the result of this job's build will not be
counted in the end result of the other jobs. (default false)
:arg bool silent-start: Sets silent start mode to on or off. When silent
start mode is on there will be no 'build started' messages sent back
to Gerrit. (default false)
:arg bool escape-quotes: escape quotes in the values of Gerrit change
parameters (default true)
:arg bool no-name-and-email: Do not pass compound 'name and email'
parameters (default false)
:arg bool readable-message: If parameters regarding multiline text,
e.g. commit message, should be as human readable or not. If false,
those parameters are Base64 encoded to keep environment variables
clean. (default false)
:arg str dependency-jobs: All jobs on which this job depends. If a commit
should trigger both a dependency and this job, the dependency will be
built first. Use commas to separate job names. Beware of cyclic
dependencies. (optional)
:arg str notification-level: Defines to whom email notifications should be
sent. This can either be nobody ('NONE'), the change owner ('OWNER'),
reviewers and change owner ('OWNER_REVIEWERS'), all interested users
i.e. owning, reviewing, watching, and starring ('ALL') or server
default ('SERVER_DEFAULT'). (default 'SERVER_DEFAULT')
:arg bool dynamic-trigger-enabled: Enable/disable the dynamic trigger
(default false)
:arg str dynamic-trigger-url: if you specify this option, the Gerrit
trigger configuration will be fetched from there on a regular interval
:arg bool trigger-for-unreviewed-patches: trigger patchset-created events
for changes that were uploaded while connection to Gerrit was down
(default false). Requires Gerrit Trigger Plugin version >= 2.11.0
:arg str custom-url: Custom URL for a message sent to Gerrit. Build
details URL will be used if empty. (default '')
:arg str server-name: Name of the server to trigger on, or ''__ANY__'' to
trigger on any configured Gerrit server (default '__ANY__'). Requires
Gerrit Trigger Plugin version >= 2.11.0
You may select one or more Gerrit events upon which to trigger.
You must also supply at least one project and branch, optionally
more. If you select the comment-added trigger, you should also
indicate which approval category and value you want to trigger the
job.
Until version 0.4.0 of Jenkins Job Builder, camelCase keys were used to
configure Gerrit Trigger Plugin, instead of hyphenated-keys. While still
supported, camedCase keys are deprecated and should not be used. Support
for this will be removed after 1.0.0 is released.
Example:
.. literalinclude:: /../../tests/triggers/fixtures/gerrit004.yaml
:language: yaml
"""
gerrit_handle_legacy_configuration(data)
projects = data['projects']
gtrig = XML.SubElement(xml_parent,
'com.sonyericsson.hudson.plugins.gerrit.trigger.'
'hudsontrigger.GerritTrigger')
XML.SubElement(gtrig, 'spec')
gprojects = XML.SubElement(gtrig, 'gerritProjects')
for project in projects:
gproj = XML.SubElement(gprojects,
'com.sonyericsson.hudson.plugins.gerrit.'
'trigger.hudsontrigger.data.GerritProject')
XML.SubElement(gproj, 'compareType').text = \
project['project-compare-type']
XML.SubElement(gproj, 'pattern').text = project['project-pattern']
branches = XML.SubElement(gproj, 'branches')
project_branches = project.get('branches', [])
if 'branch-compare-type' in project and 'branch-pattern' in project:
warning = 'branch-compare-type and branch-pattern at project ' \
'level are deprecated and support will be removed ' \
'in a later version of Jenkins Job Builder; '
if project_branches:
warning += 'discarding values and using values from ' \
'branches section'
else:
warning += 'please use branches section instead'
logger.warn(warning)
if not project_branches:
project_branches = [
{'branch-compare-type': project['branch-compare-type'],
'branch-pattern': project['branch-pattern']}]
for branch in project_branches:
gbranch = XML.SubElement(
branches, 'com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.data.Branch')
XML.SubElement(gbranch, 'compareType').text = \
branch['branch-compare-type']
XML.SubElement(gbranch, 'pattern').text = branch['branch-pattern']
project_file_paths = project.get('file-paths', [])
if project_file_paths:
fps_tag = XML.SubElement(gproj, 'filePaths')
for file_path in project_file_paths:
fp_tag = XML.SubElement(fps_tag,
'com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.data.'
'FilePath')
XML.SubElement(fp_tag, 'compareType').text = \
file_path.get('compare-type', 'PLAIN')
XML.SubElement(fp_tag, 'pattern').text = file_path['pattern']
topics = project.get('topics', [])
if topics:
topics_tag = XML.SubElement(gproj, 'topics')
for topic in topics:
topic_tag = XML.SubElement(topics_tag,
'com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.data.'
'Topic')
XML.SubElement(topic_tag, 'compareType').text = \
topic.get('compare-type', 'PLAIN')
XML.SubElement(topic_tag, 'pattern').text = topic['pattern']
build_gerrit_skip_votes(gtrig, data)
XML.SubElement(gtrig, 'silentMode').text = str(
data.get('silent', False)).lower()
XML.SubElement(gtrig, 'silentStartMode').text = str(
data.get('silent-start', False)).lower()
XML.SubElement(gtrig, 'escapeQuotes').text = str(
data.get('escape-quotes', True)).lower()
XML.SubElement(gtrig, 'noNameAndEmailParameters').text = str(
data.get('no-name-and-email', False)).lower()
XML.SubElement(gtrig, 'readableMessage').text = str(
data.get('readable-message', False)).lower()
XML.SubElement(gtrig, 'dependencyJobsNames').text = str(
data.get('dependency-jobs', ''))
notification_levels = ['NONE', 'OWNER', 'OWNER_REVIEWERS', 'ALL',
'SERVER_DEFAULT']
notification_level = data.get('notification-level', 'SERVER_DEFAULT')
if notification_level not in notification_levels:
raise InvalidAttributeError('notification-level', notification_level,
notification_levels)
if notification_level == 'SERVER_DEFAULT':
XML.SubElement(gtrig, 'notificationLevel').text = ''
else:
XML.SubElement(gtrig, 'notificationLevel').text = notification_level
XML.SubElement(gtrig, 'dynamicTriggerConfiguration').text = str(
data.get('dynamic-trigger-enabled', False))
XML.SubElement(gtrig, 'triggerConfigURL').text = str(
data.get('dynamic-trigger-url', ''))
XML.SubElement(gtrig, 'allowTriggeringUnreviewedPatches').text = str(
data.get('trigger-for-unreviewed-patches', False)).lower()
build_gerrit_triggers(gtrig, data)
override = str(data.get('override-votes', False)).lower()
if override == 'true':
for yamlkey, xmlkey in [('gerrit-build-started-verified-value',
'gerritBuildStartedVerifiedValue'),
('gerrit-build-successful-verified-value',
'gerritBuildSuccessfulVerifiedValue'),
('gerrit-build-failed-verified-value',
'gerritBuildFailedVerifiedValue'),
('gerrit-build-unstable-verified-value',
'gerritBuildUnstableVerifiedValue'),
('gerrit-build-notbuilt-verified-value',
'gerritBuildNotBuiltVerifiedValue'),
('gerrit-build-started-codereview-value',
'gerritBuildStartedCodeReviewValue'),
('gerrit-build-successful-codereview-value',
'gerritBuildSuccessfulCodeReviewValue'),
('gerrit-build-failed-codereview-value',
'gerritBuildFailedCodeReviewValue'),
('gerrit-build-unstable-codereview-value',
'gerritBuildUnstableCodeReviewValue'),
('gerrit-build-notbuilt-codereview-value',
'gerritBuildNotBuiltCodeReviewValue')]:
if data.get(yamlkey) is not None:
# str(int(x)) makes input values like '+1' work
XML.SubElement(gtrig, xmlkey).text = str(
int(data.get(yamlkey)))
XML.SubElement(gtrig, 'buildStartMessage').text = str(
data.get('start-message', ''))
XML.SubElement(gtrig, 'buildFailureMessage').text = \
data.get('failure-message', '')
XML.SubElement(gtrig, 'buildSuccessfulMessage').text = str(
data.get('successful-message', ''))
XML.SubElement(gtrig, 'buildUnstableMessage').text = str(
data.get('unstable-message', ''))
XML.SubElement(gtrig, 'buildNotBuiltMessage').text = str(
data.get('notbuilt-message', ''))
XML.SubElement(gtrig, 'buildUnsuccessfulFilepath').text = str(
data.get('failure-message-file', ''))
XML.SubElement(gtrig, 'customUrl').text = str(data.get('custom-url', ''))
XML.SubElement(gtrig, 'serverName').text = str(
data.get('server-name', '__ANY__'))
def pollscm(parser, xml_parent, data):
"""yaml: pollscm
Poll the SCM to determine if there has been a change.
:arg string pollscm: the polling interval (cron syntax)
Example:
.. literalinclude:: /../../tests/triggers/fixtures/pollscm001.yaml
:language: yaml
"""
scmtrig = XML.SubElement(xml_parent, 'hudson.triggers.SCMTrigger')
XML.SubElement(scmtrig, 'spec').text = data
def build_pollurl_content_type(xml_parent, entries, prefix,
collection_name, element_name):
namespace = 'org.jenkinsci.plugins.urltrigger.content'
content_type = XML.SubElement(
xml_parent, '{0}.{1}ContentType'.format(namespace, prefix))
if entries:
collection = XML.SubElement(content_type, collection_name)
for entry in entries:
content_entry = XML.SubElement(
collection, '{0}.{1}ContentEntry'.format(namespace, prefix))
XML.SubElement(content_entry, element_name).text = entry
def pollurl(parser, xml_parent, data):
"""yaml: pollurl
Trigger when the HTTP response from a URL changes.
Requires the Jenkins :jenkins-wiki:`URLTrigger Plugin <URLTrigger+Plugin>`.
:arg string cron: cron syntax of when to run (default '')
:arg string polling-node: Restrict where the polling should run.
(optional)
:arg list urls: List of URLs to monitor
:URL: * **url** (`str`) -- URL to monitor for changes (required)
* **proxy** (`bool`) -- Activate the Jenkins proxy (default false)
* **timeout** (`int`) -- Connect/read timeout in seconds
(default 300)
* **username** (`string`) -- User name for basic authentication
(optional)
* **password** (`string`) -- Password for basic authentication
(optional)
* **check-status** (`int`) -- Check for a specific HTTP status
code (optional)
* **check-etag** (`bool`) -- Check the HTTP ETag for changes
(default false)
* **check-date** (`bool`) -- Check the last modification date of
the URL (default false)
* **check-content** (`list`) -- List of content type changes to
monitor
:Content Type: * **simple** (`bool`) -- Trigger on any change to
the content of the URL (default false)
* **json** (`list`) -- Trigger on any change to
the listed JSON paths
* **text** (`list`) -- Trigger on any change to
the listed regular expressions
* **xml** (`list`) -- Trigger on any change to
the listed XPath expressions
Example:
.. literalinclude:: /../../tests/triggers/fixtures/pollurl001.yaml
"""
valid_content_types = {
'simple': ['Simple', '', '', []],
'json': ['JSON', 'jsonPaths', 'jsonPath', None],
'text': ['TEXT', 'regExElements', 'regEx', None],
'xml': ['XML', 'xPaths', 'xPath', None]
}
urltrig = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.urltrigger.URLTrigger')
node = data.get('polling-node')
XML.SubElement(urltrig, 'spec').text = data.get('cron', '')
XML.SubElement(urltrig, 'labelRestriction').text = str(bool(node)).lower()
if node:
XML.SubElement(urltrig, 'triggerLabel').text = node
entries = XML.SubElement(urltrig, 'entries')
urls = data.get('urls', [])
if not urls:
raise JenkinsJobsException('At least one url must be provided')
for url in urls:
entry = XML.SubElement(entries,
'org.jenkinsci.plugins.urltrigger.'
'URLTriggerEntry')
XML.SubElement(entry, 'url').text = url['url']
XML.SubElement(entry, 'proxyActivated').text = \
str(url.get('proxy', False)).lower()
if 'username' in url:
XML.SubElement(entry, 'username').text = url['username']
if 'password' in url:
XML.SubElement(entry, 'password').text = url['password']
if 'check-status' in url:
XML.SubElement(entry, 'checkStatus').text = 'true'
XML.SubElement(entry, 'statusCode').text = \
str(url.get('check-status'))
else:
XML.SubElement(entry, 'checkStatus').text = 'false'
XML.SubElement(entry, 'statusCode').text = '200'
XML.SubElement(entry, 'timeout').text = \
str(url.get('timeout', 300))
XML.SubElement(entry, 'checkETag').text = \
str(url.get('check-etag', False)).lower()
XML.SubElement(entry, 'checkLastModificationDate').text = \
str(url.get('check-date', False)).lower()
check_content = url.get('check-content', [])
XML.SubElement(entry, 'inspectingContent').text = \
str(bool(check_content)).lower()
content_types = XML.SubElement(entry, 'contentTypes')
for entry in check_content:
type_name = next(iter(entry.keys()))
if type_name not in valid_content_types:
raise JenkinsJobsException('check-content must be one of : %s'
% ', '.join(valid_content_types.
keys()))
content_type = valid_content_types.get(type_name)
if entry[type_name]:
sub_entries = content_type[3]
if sub_entries is None:
sub_entries = entry[type_name]
build_pollurl_content_type(content_types,
sub_entries,
*content_type[0:3])
def timed(parser, xml_parent, data):
"""yaml: timed
Trigger builds at certain times.
:Parameter: when to run the job (cron syntax)
Example::
triggers:
- timed: "@midnight"
"""
scmtrig = XML.SubElement(xml_parent, 'hudson.triggers.TimerTrigger')
XML.SubElement(scmtrig, 'spec').text = data
def github(parser, xml_parent, data):
"""yaml: github
Trigger a job when github repository is pushed to.
Requires the Jenkins :jenkins-wiki:`GitHub Plugin <GitHub+Plugin>`.
Example::
triggers:
- github
"""
ghtrig = XML.SubElement(xml_parent, 'com.cloudbees.jenkins.'
'GitHubPushTrigger')
XML.SubElement(ghtrig, 'spec').text = ''
def github_pull_request(parser, xml_parent, data):
"""yaml: github-pull-request
Build pull requests in github and report results.
Requires the Jenkins :jenkins-wiki:`GitHub Pull Request Builder Plugin
<GitHub+pull+request+builder+plugin>`.
:arg list admin-list: the users with admin rights (optional)
:arg list white-list: users whose pull requests build (optional)
:arg list org-list: orgs whose users should be white listed (optional)
:arg bool allow-whitelist-orgs-as-admins: members of white listed orgs
will have admin rights. (default false)
:arg string cron: cron syntax of when to run (optional)
:arg string trigger-phrase: when filled, commenting this phrase
in the pull request will trigger a build (optional)
:arg bool only-trigger-phrase: only commenting the trigger phrase
in the pull request will trigger a build (default false)
:arg bool github-hooks: use github hook (default false)
:arg bool permit-all: build every pull request automatically
without asking (default false)
:arg bool auto-close-on-fail: close failed pull request automatically
(default false)
:arg list white-list-target-branches: Adding branches to this whitelist
allows you to selectively test pull requests destined for these
branches only. Supports regular expressions (e.g. 'master',
'feature-.*'). (optional)
:arg string commit-status-context: Context to tag a build on GitHub
(default default)
Example:
.. literalinclude:: /../../tests/triggers/fixtures/github-pull-request.yaml
"""
ghprb = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.ghprb.'
'GhprbTrigger')
XML.SubElement(ghprb, 'spec').text = data.get('cron', '')
admin_string = "\n".join(data.get('admin-list', []))
XML.SubElement(ghprb, 'adminlist').text = admin_string
XML.SubElement(ghprb, 'allowMembersOfWhitelistedOrgsAsAdmin').text = str(
data.get('allow-whitelist-orgs-as-admins', False)).lower()
white_string = "\n".join(data.get('white-list', []))
XML.SubElement(ghprb, 'whitelist').text = white_string
org_string = "\n".join(data.get('org-list', []))
XML.SubElement(ghprb, 'orgslist').text = org_string
XML.SubElement(ghprb, 'cron').text = data.get('cron', '')
XML.SubElement(ghprb, 'triggerPhrase').text = \
data.get('trigger-phrase', '')
XML.SubElement(ghprb, 'onlyTriggerPhrase').text = str(
data.get('only-trigger-phrase', False)).lower()
XML.SubElement(ghprb, 'useGitHubHooks').text = str(
data.get('github-hooks', False)).lower()
XML.SubElement(ghprb, 'permitAll').text = str(
data.get('permit-all', False)).lower()
XML.SubElement(ghprb, 'autoCloseFailedPullRequests').text = str(
data.get('auto-close-on-fail', False)).lower()
white_list_target_branches = data.get('white-list-target-branches', [])
if white_list_target_branches:
ghprb_wltb = XML.SubElement(ghprb, 'whiteListTargetBranches')
for branch in white_list_target_branches:
be = XML.SubElement(ghprb_wltb, 'org.jenkinsci.plugins.'
'ghprb.GhprbBranch')
XML.SubElement(be, 'branch').text = str(branch)
if data.get('commit-status-context'):
extensions = XML.SubElement(ghprb, 'extensions')
extension_tag = ('org.jenkinsci.plugins.ghprb.extensions'
'.status.GhprbSimpleStatus')
extension_status = XML.SubElement(extensions, extension_tag)
status = XML.SubElement(extension_status, 'commitStatusContext')
status.text = str(data.get('commit-status-context'))
def gitlab_merge_request(parser, xml_parent, data):
"""yaml: gitlab-merge-request
Build merge requests in gitlab and report results.
Requires the Jenkins :jenkins-wiki:`Gitlab MergeRequest Builder Plugin.
<Gitlab+Merge+Request+Builder+Plugin>`.
:arg string cron: cron syntax of when to run (required)
:arg string project-path: gitlab-relative path to project (required)
Example:
.. literalinclude:: \
/../../tests/triggers/fixtures/gitlab-merge-request.yaml
"""
ghprb = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.gitlab.'
'GitlabBuildTrigger')
if not data.get('cron', None):
raise jenkins_jobs.errors.JenkinsJobsException(
'gitlab-merge-request is missing "cron"')
if not data.get('project-path', None):
raise jenkins_jobs.errors.JenkinsJobsException(
'gitlab-merge-request is missing "project-path"')
# Because of a design limitation in the GitlabBuildTrigger Jenkins plugin
# both 'spec' and '__cron' have to be set to the same value to have them
# take effect. Also, cron and projectPath are prefixed with underscores
# in the plugin, but spec is not.
XML.SubElement(ghprb, 'spec').text = data.get('cron')
XML.SubElement(ghprb, '__cron').text = data.get('cron')
XML.SubElement(ghprb, '__projectPath').text = data.get('project-path')
def build_result(parser, xml_parent, data):
"""yaml: build-result
Configure jobB to monitor jobA build result. A build is scheduled if there
is a new build result that matches your criteria (unstable, failure, ...).
Requires the Jenkins :jenkins-wiki:`BuildResultTrigger Plugin
<BuildResultTrigger+Plugin>`.
:arg list groups: List groups of jobs and results to monitor for
:arg list jobs: The jobs to monitor (required)
:arg list results: Build results to monitor for (default success)
:arg bool combine: Combine all job information. A build will be
scheduled only if all conditions are met (default false)
:arg str cron: The cron syntax with which to poll the jobs for the
supplied result (default '')
Example::
triggers:
- build-result:
combine: true
cron: '* * * * *'
groups:
- jobs:
- foo
- example
results:
- unstable
- jobs:
- foo2
results:
- not-built
- aborted
"""
brt = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.'
'buildresulttrigger.BuildResultTrigger')
XML.SubElement(brt, 'spec').text = data.get('cron', '')
XML.SubElement(brt, 'combinedJobs').text = str(
data.get('combine', False)).lower()
jobs_info = XML.SubElement(brt, 'jobsInfo')
result_dict = {'success': 'SUCCESS',
'unstable': 'UNSTABLE',
'failure': 'FAILURE',
'not-built': 'NOT_BUILT',
'aborted': 'ABORTED'}
for group in data['groups']:
brti = XML.SubElement(jobs_info, 'org.jenkinsci.plugins.'
'buildresulttrigger.model.'
'BuildResultTriggerInfo')
if not group.get('jobs', []):
raise jenkins_jobs.errors.\
JenkinsJobsException('Jobs is missing and a required'
' element')
jobs_string = ",".join(group['jobs'])
XML.SubElement(brti, 'jobNames').text = jobs_string
checked_results = XML.SubElement(brti, 'checkedResults')
for result in group.get('results', ['success']):
if result not in result_dict:
raise jenkins_jobs.errors.\
JenkinsJobsException('Result entered is not valid,'
' must be one of: '
+ ', '.join(result_dict.keys()))
model_checked = XML.SubElement(checked_results, 'org.jenkinsci.'
'plugins.buildresulttrigger.model.'
'CheckedResult')
XML.SubElement(model_checked, 'checked').text = result_dict[result]
def reverse(parser, xml_parent, data):
"""yaml: reverse
This trigger can be configured in the UI using the checkbox with the
following text: 'Build after other projects are built'.
Set up a trigger so that when some other projects finish building, a new
build is scheduled for this project. This is convenient for running an
extensive test after a build is complete, for example.
This configuration complements the "Build other projects" section in the
"Post-build Actions" of an upstream project, but is preferable when you
want to configure the downstream project.
:arg str jobs: List of jobs to watch. Can be either a comma separated
list or a list.
:arg str result: Build results to monitor for between the following
options: success, unstable and failure. (default 'success').
Example:
.. literalinclude:: /../../tests/triggers/fixtures/reverse.yaml
Example List:
.. literalinclude:: /../../tests/triggers/fixtures/reverse-list.yaml
"""
reserveBuildTrigger = XML.SubElement(
xml_parent, 'jenkins.triggers.ReverseBuildTrigger')
supported_thresholds = ['SUCCESS', 'UNSTABLE', 'FAILURE']
XML.SubElement(reserveBuildTrigger, 'spec').text = ''
jobs = data.get('jobs')
if isinstance(jobs, list):
jobs = ",".join(jobs)
XML.SubElement(reserveBuildTrigger, 'upstreamProjects').text = \
jobs
threshold = XML.SubElement(reserveBuildTrigger, 'threshold')
result = data.get('result').upper()
if result not in supported_thresholds:
raise jenkins_jobs.errors.JenkinsJobsException(
"Choice should be one of the following options: %s." %
", ".join(supported_thresholds))
XML.SubElement(threshold, 'name').text = \
hudson_model.THRESHOLDS[result]['name']
XML.SubElement(threshold, 'ordinal').text = \
hudson_model.THRESHOLDS[result]['ordinal']
XML.SubElement(threshold, 'color').text = \
hudson_model.THRESHOLDS[result]['color']
XML.SubElement(threshold, 'completeBuild').text = \
str(hudson_model.THRESHOLDS[result]['complete']).lower()
def script(parser, xml_parent, data):
"""yaml: script
Triggers the job using shell or batch script.
Requires the Jenkins :jenkins-wiki:`ScriptTrigger Plugin
<ScriptTrigger+Plugin>`.
:arg str label: Restrict where the polling should run. (default '')
:arg str script: A shell or batch script. (default '')
:arg str script-file-path: A shell or batch script path. (default '')
:arg str cron: cron syntax of when to run (default '')
:arg bool enable-concurrent: Enables triggering concurrent builds.
(default false)
:arg int exit-code: If the exit code of the script execution returns this
expected exit code, a build is scheduled. (default 0)
Example:
.. literalinclude:: /../../tests/triggers/fixtures/script.yaml
"""
data = data if data else {}
st = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.scripttrigger.ScriptTrigger'
)
label = data.get('label')
XML.SubElement(st, 'script').text = str(data.get('script', ''))
XML.SubElement(st, 'scriptFilePath').text = str(
data.get('script-file-path', ''))
XML.SubElement(st, 'spec').text = str(data.get('cron', ''))
XML.SubElement(st, 'labelRestriction').text = str(bool(label)).lower()
if label:
XML.SubElement(st, 'triggerLabel').text = label
XML.SubElement(st, 'enableConcurrentBuild').text = str(
data.get('enable-concurrent', False)).lower()
XML.SubElement(st, 'exitCode').text = str(data.get('exit-code', 0))
def groovy_script(parser, xml_parent, data):
"""yaml: groovy-script
Triggers the job using a groovy script.
Requires the Jenkins :jenkins-wiki:`ScriptTrigger Plugin
<ScriptTrigger+Plugin>`.
:arg bool system-script: If true, run the groovy script as a system script,
the script will have access to the same variables as the Groovy Console.
If false, run the groovy script on the executor node, the script will not
have access to the hudson or job model. (default false)
:arg str script: Content of the groovy script. If the script result is
evaluated to true, a build is scheduled. (default '')
:arg str script-file-path: Groovy script path. (default '')
:arg str property-file-path: Property file path. All properties will be set
as parameters for the triggered build. (optional)
:arg bool enable-concurrent: Enable concurrent build. (default false)
:arg str label: Restrict where the polling should run. (default '')
:arg str cron: cron syntax of when to run (default '')
Example:
.. literalinclude:: /../../tests/triggers/fixtures/groovy-script.yaml
"""
gst = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.scripttrigger.groovy.GroovyScriptTrigger'
)
XML.SubElement(gst, 'groovySystemScript').text = str(
data.get('system-script', False)).lower()
XML.SubElement(gst, 'groovyExpression').text = str(data.get('script', ''))
XML.SubElement(gst, 'groovyFilePath').text = str(data.get(
'script-file-path', ''))
if 'property-file-path' in data:
XML.SubElement(gst, 'propertiesFilePath').text = str(
data.get('property-file-path'))
XML.SubElement(gst, 'enableConcurrentBuild').text = str(
data.get('enable-concurrent', False)).lower()
label = data.get('label')
XML.SubElement(gst, 'labelRestriction').text = str(bool(label)).lower()
if label:
XML.SubElement(gst, 'triggerLabel').text = label
XML.SubElement(gst, 'spec').text = str(data.get('cron', ''))
class Triggers(jenkins_jobs.modules.base.Base):
sequence = 50
component_type = 'trigger'
component_list_type = 'triggers'
def gen_xml(self, parser, xml_parent, data):
triggers = data.get('triggers', [])
if not triggers:
return
trig_e = XML.SubElement(xml_parent, 'triggers', {'class': 'vector'})
for trigger in triggers:
self.registry.dispatch('trigger', parser, trig_e, trigger)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class ManagementLocksOperations(object):
"""ManagementLocksOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def create_or_update_at_resource_group_level(
self, resource_group_name, lock_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Create or update a management lock at the resource group level.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param lock_name: The lock name.
:type lock_name: str
:param parameters: The management lock parameters.
:type parameters: :class:`ManagementLockObject
<azure.mgmt.resource.locks.models.ManagementLockObject>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ManagementLockObject
<azure.mgmt.resource.locks.models.ManagementLockObject>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks/{lockName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ManagementLockObject')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ManagementLockObject', response)
if response.status_code == 201:
deserialized = self._deserialize('ManagementLockObject', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_at_resource_group_level(
self, resource_group_name, lock_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the management lock of a resource group.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param lock_name: The name of lock.
:type lock_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks/{lockName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_at_resource_group_level(
self, resource_group_name, lock_name, custom_headers=None, raw=False, **operation_config):
"""Gets a management lock at the resource group level.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param lock_name: The lock name.
:type lock_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ManagementLockObject
<azure.mgmt.resource.locks.models.ManagementLockObject>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks/{lockName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ManagementLockObject', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update_at_resource_level(
self, resource_group_name, resource_provider_namespace, parent_resource_path, resource_type, resource_name, lock_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Create or update a management lock at the resource level or any level
below resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_provider_namespace: Resource identity.
:type resource_provider_namespace: str
:param parent_resource_path: Resource identity.
:type parent_resource_path: str
:param resource_type: Resource identity.
:type resource_type: str
:param resource_name: Resource identity.
:type resource_name: str
:param lock_name: The name of lock.
:type lock_name: str
:param parameters: Create or update management lock parameters.
:type parameters: :class:`ManagementLockObject
<azure.mgmt.resource.locks.models.ManagementLockObject>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ManagementLockObject
<azure.mgmt.resource.locks.models.ManagementLockObject>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks/{lockName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ManagementLockObject')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ManagementLockObject', response)
if response.status_code == 201:
deserialized = self._deserialize('ManagementLockObject', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_at_resource_level(
self, resource_group_name, resource_provider_namespace, parent_resource_path, resource_type, resource_name, lock_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the management lock of a resource or any level below resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_provider_namespace: Resource identity.
:type resource_provider_namespace: str
:param parent_resource_path: Resource identity.
:type parent_resource_path: str
:param resource_type: Resource identity.
:type resource_type: str
:param resource_name: Resource identity.
:type resource_name: str
:param lock_name: The name of lock.
:type lock_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks/{lockName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def create_or_update_at_subscription_level(
self, lock_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Create or update a management lock at the subscription level.
:param lock_name: The name of lock.
:type lock_name: str
:param parameters: The management lock parameters.
:type parameters: :class:`ManagementLockObject
<azure.mgmt.resource.locks.models.ManagementLockObject>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ManagementLockObject
<azure.mgmt.resource.locks.models.ManagementLockObject>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}'
path_format_arguments = {
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ManagementLockObject')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('ManagementLockObject', response)
if response.status_code == 200:
deserialized = self._deserialize('ManagementLockObject', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_at_subscription_level(
self, lock_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the management lock of a subscription.
:param lock_name: The name of lock.
:type lock_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}'
path_format_arguments = {
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, lock_name, custom_headers=None, raw=False, **operation_config):
"""Gets the management lock of a scope.
:param lock_name: Name of the management lock.
:type lock_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ManagementLockObject
<azure.mgmt.resource.locks.models.ManagementLockObject>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}'
path_format_arguments = {
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ManagementLockObject', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_at_resource_group_level(
self, resource_group_name, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets all the management locks of a resource group.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ManagementLockObjectPaged
<azure.mgmt.resource.locks.models.ManagementLockObjectPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ManagementLockObjectPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ManagementLockObjectPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_at_resource_level(
self, resource_group_name, resource_provider_namespace, parent_resource_path, resource_type, resource_name, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets all the management locks of a resource or any level below
resource.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: Resource identity.
:type resource_provider_namespace: str
:param parent_resource_path: Resource identity.
:type parent_resource_path: str
:param resource_type: Resource identity.
:type resource_type: str
:param resource_name: Resource identity.
:type resource_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ManagementLockObjectPaged
<azure.mgmt.resource.locks.models.ManagementLockObjectPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ManagementLockObjectPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ManagementLockObjectPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_at_subscription_level(
self, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets all the management locks of a subscription.
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ManagementLockObjectPaged
<azure.mgmt.resource.locks.models.ManagementLockObjectPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ManagementLockObjectPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ManagementLockObjectPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
import copy
from hearthbreaker.tags.base import Status, Action, Aura, Condition, AuraUntil, CardQuery, \
CARD_SOURCE, Effect, Buff, BuffUntil, Amount, Picker, Selector
from hearthbreaker.tags.condition import IsSecret
from hearthbreaker.tags.selector import AllPicker, ConstantSelector
class Give(Action):
def __init__(self, buffs, picker=AllPicker()):
if isinstance(buffs, Status):
self.buffs = [Buff(buffs)]
elif isinstance(buffs, list):
self.buffs = buffs
if isinstance(buffs[0], Aura):
raise TypeError("Aura passed where buff was expected")
elif isinstance(buffs, Aura):
raise TypeError("Aura passed where buff was expected")
else:
self.buffs = [buffs]
self.picker = picker
def act(self, actor, target, other=None):
buffs = self.picker.pick(actor, self.buffs)
for buff in buffs:
target.add_buff(buff.to_instance(target))
def __to_json__(self):
if isinstance(self.picker, AllPicker):
return {
'name': 'give',
'buffs': self.buffs
}
return {
'name': 'give',
'buffs': self.buffs,
'picker': self.picker,
}
def __from_json__(self, buffs=None, effects=None, auras=None, picker=None):
if effects: # To allow for give to work with effects as well, we check at load time
return GiveEffect.__new__(GiveEffect).__from_json__(effects)
if auras: # To allow for give to work with auras as well, we check at load time
return GiveAura.__new__(GiveAura).__from_json__(auras)
self.buffs = []
for buff in buffs:
if "until" in buff:
self.buffs.append(BuffUntil.from_json(**buff))
else:
self.buffs.append(Buff.from_json(**buff))
if not picker:
self.picker = AllPicker()
else:
self.picker = Picker.from_json(**picker)
return self
class GiveAura(Action):
def __init__(self, auras):
if isinstance(auras, list):
self.auras = auras
else:
self.auras = [auras]
def act(self, actor, target, other=None):
for aura in self.auras:
target.add_aura(aura)
def __to_json__(self):
return {
'name': 'give',
'auras': self.auras
}
def __from_json__(self, auras):
self.auras = []
for aura in auras:
if "until" in aura:
self.auras.append(AuraUntil.from_json(**aura))
else:
self.auras.append(Aura.from_json(**aura))
return self
class GiveEffect(Action):
def __init__(self, effects):
if isinstance(effects, Effect):
self.effects = [effects]
else:
self.effects = effects
def act(self, actor, target, other=None):
for effect in self.effects:
for tag in effect.tags:
for action in tag.actions:
if hasattr(action, "selector"):
action.selector = ConstantSelector([obj.born for obj in
action.selector.choose_targets(actor, target)])
target.add_effect(effect)
def __to_json__(self):
return {
'name': 'give',
'effects': self.effects
}
def __from_json__(self, effects):
self.effects = [Effect.from_json(**effect) for effect in effects]
return self
class Summon(Action):
def __init__(self, card, count=1):
if isinstance(card, CardQuery):
self.card = card
else:
self.card = CardQuery(card.ref_name)
self.count = count
def act(self, actor, target, other=None):
card = self.card.get_card(target, target, actor)
if card is None:
return
if actor.is_minion() and actor.player is target:
# When a minion is summoned around another minion, they alternate between left and right,
# starting on the right
if actor.removed:
c = 0
else:
c = 1
for summon in range(self.count):
index = actor.index + (c % 2)
card.summon(target, target.game, index)
if not actor.removed:
c += 1
else:
for summon in range(self.count):
card.summon(target, target.game, len(target.minions))
def __to_json__(self):
if self.count > 1:
return {
'name': 'summon',
'card': self.card,
'count': self.count
}
return {
'name': 'summon',
'card': self.card
}
def __from_json__(self, card, count=1):
self.card = CardQuery.from_json(**card)
self.count = count
return self
class Transform(Action):
def __init__(self, card):
if isinstance(card, CardQuery):
self.card = card
else:
self.card = CardQuery(card.ref_name)
def act(self, actor, target, other=None):
card = self.card.get_card(target, target.player, actor)
if target.is_card():
target.replace(card)
elif target.is_minion():
minion = card.create_minion(target.player)
minion.card = card
target.replace(minion)
elif target.is_hero():
hero = card.create_hero(target.player)
hero.card = card
target.player.trigger("minion_played", actor)
hero.buffs = copy.deepcopy(actor.buffs)
hero.health = actor.health
target.replace(hero)
if hero.health <= 0:
hero.die(None)
def __to_json__(self):
return {
'name': 'transform',
'card': self.card
}
def __from_json__(self, card):
self.card = CardQuery.from_json(**card)
return self
class Kill(Action):
def act(self, actor, target, other=None):
target.die(None)
def __to_json__(self):
return {
'name': 'kill'
}
class Heal(Action, metaclass=Amount):
def __init__(self):
super().__init__()
def act(self, actor, target, other=None):
target.heal(actor.player.effective_heal_power(self.get_amount(actor, target, other)), actor)
def __to_json__(self):
return {
'name': 'heal',
}
class SetHealth(Action, metaclass=Amount):
def __init__(self):
super().__init__()
def act(self, actor, target, other=None):
target.health = self.get_amount(actor, target, other)
def __to_json__(self):
return {
'name': 'set_health'
}
class Damage(Action, metaclass=Amount):
def __init__(self):
super().__init__()
def act(self, actor, target, other=None):
target.damage(self.get_amount(actor, target, other), actor)
def __to_json__(self):
return {
'name': 'damage',
}
class Draw(Action, metaclass=Amount):
def __init__(self):
super().__init__()
def act(self, actor, target, other=None):
for draw in range(0, self.get_amount(actor, target, other)):
target.draw()
def __to_json__(self):
return {
'name': 'draw',
}
class Discard(Action, metaclass=Amount):
def __init__(self, query=CardQuery(source=CARD_SOURCE.MY_HAND)):
super().__init__()
self.query = query
def act(self, actor, target, other=None):
for index in range(0, self.get_amount(actor, target, other)):
card = self.query.get_card(target, actor.player, actor)
if card:
actor.player.trigger("discard", card)
def __to_json__(self):
return {
'name': 'discard',
'query': self.query,
}
def __from_json__(self, query):
self.query = CardQuery.from_json(**query)
return self
class IncreaseArmor(Action, metaclass=Amount):
def __init__(self):
super().__init__()
def act(self, actor, target, other=None):
target.armor += self.get_amount(actor, target, other)
def __to_json__(self):
return {
'name': 'increase_armor'
}
class ChangeTarget(Action):
def __init__(self, selector):
self.selector = selector
def act(self, actor, target, other=None):
possible_targets = [t for t in self.selector.choose_targets(target, target.current_target)]
if len(possible_targets) > 0:
target.current_target = possible_targets[0]
def __to_json__(self):
return {
'name': 'change_target',
'selector': self.selector,
}
def __from_json__(self, selector):
from hearthbreaker.tags.base import Selector
self.selector = Selector.from_json(**selector)
return self
class AddCard(Action):
def __init__(self, card, count=1, add_to_deck=False):
if isinstance(card, CardQuery):
self.card = card
else:
self.card = CardQuery(card.ref_name)
self.add_to_deck = add_to_deck
self.count = count
def act(self, actor, target, other=None):
if self.add_to_deck:
for i in range(self.count):
target.deck.put_back(self.card.get_card(target, target, actor))
else:
for i in range(self.count):
if len(target.hand) < 10:
card = self.card.get_card(target, target, actor)
if card:
target.hand.append(card)
def __to_json__(self):
if self.add_to_deck:
return {
'name': 'add_card',
'card': self.card,
'count': self.count,
'add_to_deck': self.add_to_deck,
}
return {
'name': 'add_card',
'card': self.card,
'count': self.count
}
def __from_json__(self, card, count=1, add_to_deck=False):
self.card = CardQuery.from_json(**card)
self.count = count
self.add_to_deck = add_to_deck
return self
class ResurrectFriendly(Action):
def __to_json__(self):
return {
'name': 'resurrect_friendly'
}
def act(self, actor, target, other=None):
# Will be called once per Kel'Thuzad on the board
# http://www.hearthhead.com/card=1794/kelthuzad#comments
for minion in sorted(target.dead_this_turn, key=lambda m: m.born):
minion.card.summon(target, target.game, len(target.minions))
class Bounce(Action):
def __init__(self, bounce_to_deck=False):
self.bounce_to_deck = bounce_to_deck
def act(self, actor, target, other=None):
target.bounce(self.bounce_to_deck)
def __to_json__(self):
if self.bounce_to_deck:
return {
'name': 'bounce',
'bounce_to_deck': True,
}
return {
'name': 'bounce'
}
class SwapWithHand(Action):
def __init__(self, condition=None):
self.condition = condition
def act(self, actor, target, other=None):
if actor.is_valid():
if self.condition:
chosen_card = target.game.random_draw(target.hand,
lambda c: self.condition.evaluate(c) and c.is_minion())
else:
chosen_card = target.game.random_draw(target.hand, lambda c: c.is_minion())
if chosen_card:
chosen_card.summon(target, target.game, len(target.minions))
chosen_card.unattach()
target.hand.remove(chosen_card)
actor.bounce()
def __to_json__(self):
if self.condition:
return {
'name': 'swap_with_hand',
'condition': self.condition
}
return {
'name': 'swap_with_hand'
}
def __from_json__(self, condition=None):
if condition:
self.condition = Condition.from_json(**condition)
else:
self.condition = None
return self
class ApplySecret(Action):
def __init__(self, source):
self.source = source
self._query = CardQuery(conditions=[IsSecret()], source=source)
def act(self, actor, target, other=None):
secret = self._query.get_card(target, target, actor)
if secret:
target.secrets.append(secret)
secret.player = target
if target is target.game.other_player:
secret.player = target
# To allow for Mad Scientist not to be redeemed or duplicated as a result of its death,
# but still allow other minions that die during the same cycle to be duplicated.
# Based on testing for patch 2.1.0.7785
if actor.dead:
target.bind_once("after_death", secret.activate)
else:
secret.activate(target)
def __to_json__(self):
return {
'name': 'apply_secret',
'source': CARD_SOURCE.to_str(self.source)
}
def __from_json__(self, source):
self.source = CARD_SOURCE.from_str(source)
self._query = CardQuery(conditions=[IsSecret()], source=self.source)
return self
class Equip(Action):
def __init__(self, weapon):
if isinstance(weapon, CardQuery):
self.weapon = weapon
else:
self.weapon = CardQuery(weapon.ref_name)
def act(self, actor, target, other=None):
card = self.weapon.get_card(target, target, actor)
weapon = card.create_weapon(target)
weapon.card = card
weapon.equip(target)
def __to_json__(self):
return {
'name': 'equip',
'weapon': self.weapon
}
def __from_json__(self, weapon):
self.weapon = CardQuery.from_json(**weapon)
return self
class Destroy(Action):
def act(self, actor, target, other=None):
target.destroy()
def __to_json__(self):
return {
'name': 'destroy'
}
class Steal(Action):
def act(self, actor, target, other=None):
new_minion = target.copy(actor.player)
target.unattach()
target.remove_from_board()
new_minion.add_to_board(len(actor.player.minions))
def __to_json__(self):
return {
'name': 'steal'
}
class Duplicate(Action):
def __init__(self, selector):
super().__init__()
self.selector = selector
def act(self, actor, target, other=None):
for minion in self.selector.choose_targets(actor, target):
if len(minion.player.minions) < 7:
dup = minion.copy(minion.player)
dup.add_to_board(minion.index + 1)
def __to_json__(self):
return {
"name": "duplicate",
"selector": self.selector,
}
def __from_json__(self, selector):
self.selector = Selector.from_json(**selector)
return self
class Replace(Action):
def act(self, actor, target, other=None):
new_minion = target.copy(actor.player)
actor.replace(new_minion)
def __to_json__(self):
return {
'name': 'replace'
}
class Silence(Action):
def act(self, actor, target, other=None):
target.silence()
def __to_json__(self):
return {
'name': 'silence'
}
class DestroyManaCrystal(Action):
def act(self, actor, target, other=None):
target.max_mana -= 1
if target.mana > 0:
target.mana -= 1
def __to_json__(self):
return {
'name': 'destroy_mana_crystal'
}
class GiveManaCrystal(Action):
def __init__(self, count=1, empty=False):
self.count = count
self.empty = empty
def act(self, actor, target, other=None):
target.max_mana = min(self.count + target.max_mana, 10)
if not self.empty:
target.mana += self.count
def __to_json__(self):
return {
'name': 'give_mana_crystal',
'count': self.count,
'empty': self.empty,
}
class IncreaseDurability(Action):
def act(self, actor, target, other=None):
if target.weapon:
target.weapon.durability += 1
def __to_json__(self):
return {
'name': 'increase_durability',
}
class DecreaseDurability(Action):
def act(self, actor, target, other=None):
if target.weapon:
target.weapon.durability -= 1
if target.weapon.durability <= 0:
target.weapon.destroy()
def __to_json__(self):
return {
'name': 'decrease_durability',
}
class IncreaseWeaponAttack(Action, metaclass=Amount):
def __init__(self):
pass
def act(self, actor, target, other=None):
if target.weapon:
target.weapon.base_attack += self.get_amount(actor, target, other)
def __to_json__(self):
return {
'name': 'increase_weapon_attack'
}
class RemoveDivineShields(Action):
def act(self, actor, target, other=None):
from hearthbreaker.tags.status import DivineShield
if target.divine_shield:
target.buffs = [buff for buff in target.buffs if not isinstance(buff.status, DivineShield)]
target.divine_shield = 0
def __to_json__(self):
return {
"name": "remove_divine_shields"
}
# class SwapStats(Action):
# def act(self, actor, target, other=None):
# temp_attack = target.calculate_attack()
# temp_health = target.health
# if temp_attack == 0:
# target.die(None)
# else:
# target.set_attack_to(temp_health)
# target.set_health_to(temp_attack)
#
# def __to_json__(self):
# return {
# 'name': 'swap_stats',
# }
class Remove(Action):
def act(self, actor, target, other=None):
target.unattach()
target.remove_from_board()
def __to_json__(self):
return {
'name': 'remove'
}
class SwapStats(Action):
def __init__(self, source_stat, dest_stat, swap_with_owner):
self.source_stat = source_stat
self.dest_stat = dest_stat
self.swap_with_owner = swap_with_owner
def act(self, actor, target, other=None):
if self.swap_with_owner:
source = actor
else:
source = target
temp = self.get_attribute(source, self.source_stat)
self.set_attribute(source, self.source_stat, self.get_attribute(target, self.dest_stat))
self.set_attribute(target, self.dest_stat, temp)
if source.health == 0:
source.die(None)
if target is not source and target.health == 0:
target.die(None)
actor.player.game.check_delayed()
@staticmethod
def get_attribute(obj, attribute):
if attribute == "damage":
return obj.calculate_max_health() - obj.health
elif attribute == 'mana':
return obj.card.mana
elif attribute == "attack":
return obj.calculate_attack()
elif attribute == "health":
return obj.health
@staticmethod
def set_attribute(obj, attribute, value):
from hearthbreaker.tags.status import ManaChange, SetAttack
if attribute == "damage":
was_enraged = obj.enraged
obj.health = max(0, obj.clculate_max_health() - value)
if value > 0:
obj.enraged = True
if not was_enraged:
obj._do_enrage()
elif attribute == 'mana':
obj.add_buff(Buff(ManaChange(value - obj.mana_cost())))
elif attribute == "attack":
obj.add_buff(Buff(SetAttack(value)))
elif attribute == "health":
obj.set_health_to(value)
def __to_json__(self):
return {
'name': 'swap_stats',
'source_stat': self.source_stat,
'dest_stat': self.dest_stat,
'swap_with_owner': self.swap_with_owner,
}
|
|
from unittest import mock
import pytest
from aiohttp import errors, parsers
@pytest.fixture
def stream():
return mock.Mock()
@pytest.fixture
def buf():
return parsers.ParserBuffer()
def test_feed_data(buf):
buf.feed_data(b'')
assert len(buf) == 0
buf.feed_data(b'data')
assert len(buf) == 4
assert bytes(buf), b'data'
def test_feed_data_after_exception(buf):
buf.feed_data(b'data')
exc = ValueError()
buf.set_exception(exc)
buf.feed_data(b'more')
assert len(buf) == 4
assert bytes(buf) == b'data'
def test_read_exc(buf):
p = buf.read(3)
next(p)
p.send(b'1')
exc = ValueError()
buf.set_exception(exc)
assert buf.exception() is exc
with pytest.raises(ValueError):
p.send(b'1')
def test_read_exc_multiple(buf):
p = buf.read(3)
next(p)
p.send(b'1')
exc = ValueError()
buf.set_exception(exc)
assert buf.exception() is exc
p = buf.read(3)
with pytest.raises(ValueError):
next(p)
def test_read(buf):
p = buf.read(3)
next(p)
p.send(b'1')
try:
p.send(b'234')
except StopIteration as exc:
res = exc.value
assert res == b'123'
assert b'4' == bytes(buf)
def test_readsome(buf):
p = buf.readsome(3)
next(p)
try:
p.send(b'1')
except StopIteration as exc:
res = exc.value
assert res == b'1'
p = buf.readsome(2)
next(p)
try:
p.send(b'234')
except StopIteration as exc:
res = exc.value
assert res == b'23'
assert b'4' == bytes(buf)
def test_readsome_exc(buf):
buf.set_exception(ValueError())
p = buf.readsome(3)
with pytest.raises(ValueError):
next(p)
def test_wait(buf):
p = buf.wait(3)
next(p)
p.send(b'1')
try:
p.send(b'234')
except StopIteration as exc:
res = exc.value
assert res == b'123'
assert b'1234' == bytes(buf)
def test_wait_exc(buf):
buf.set_exception(ValueError())
p = buf.wait(3)
with pytest.raises(ValueError):
next(p)
def test_skip(buf):
p = buf.skip(3)
next(p)
p.send(b'1')
try:
p.send(b'234')
except StopIteration as exc:
res = exc.value
assert res is None
assert b'4' == bytes(buf)
def test_skip_exc(buf):
buf.set_exception(ValueError())
p = buf.skip(3)
with pytest.raises(ValueError):
next(p)
def test_readuntil_limit(buf):
p = buf.readuntil(b'\n', 4)
next(p)
p.send(b'1')
p.send(b'234')
with pytest.raises(errors.LineLimitExceededParserError):
p.send(b'5')
def test_readuntil_limit2(buf):
p = buf.readuntil(b'\n', 4)
next(p)
with pytest.raises(errors.LineLimitExceededParserError):
p.send(b'12345\n6')
def test_readuntil_limit3(buf):
p = buf.readuntil(b'\n', 4)
next(p)
with pytest.raises(errors.LineLimitExceededParserError):
p.send(b'12345\n6')
def test_readuntil(buf):
p = buf.readuntil(b'\n', 4)
next(p)
p.send(b'123')
try:
p.send(b'\n456')
except StopIteration as exc:
res = exc.value
assert res == b'123\n'
assert b'456' == bytes(buf)
def test_readuntil_exc(buf):
buf.set_exception(ValueError())
p = buf.readuntil(b'\n', 4)
with pytest.raises(ValueError):
next(p)
def test_waituntil_limit(buf):
p = buf.waituntil(b'\n', 4)
next(p)
p.send(b'1')
p.send(b'234')
with pytest.raises(errors.LineLimitExceededParserError):
p.send(b'5')
def test_waituntil_limit2(buf):
p = buf.waituntil(b'\n', 4)
next(p)
with pytest.raises(errors.LineLimitExceededParserError):
p.send(b'12345\n6')
def test_waituntil_limit3(buf):
p = buf.waituntil(b'\n', 4)
next(p)
with pytest.raises(errors.LineLimitExceededParserError):
p.send(b'12345\n6')
def test_waituntil(buf):
p = buf.waituntil(b'\n', 4)
next(p)
p.send(b'123')
try:
p.send(b'\n456')
except StopIteration as exc:
res = exc.value
assert res == b'123\n'
assert b'123\n456' == bytes(buf)
def test_waituntil_exc(buf):
buf.set_exception(ValueError())
p = buf.waituntil(b'\n', 4)
with pytest.raises(ValueError):
next(p)
def test_skipuntil(buf):
p = buf.skipuntil(b'\n')
next(p)
p.send(b'123')
try:
p.send(b'\n456\n')
except StopIteration:
pass
assert b'456\n' == bytes(buf)
p = buf.skipuntil(b'\n')
try:
next(p)
except StopIteration:
pass
assert b'' == bytes(buf)
def test_skipuntil_exc(buf):
buf.set_exception(ValueError())
p = buf.skipuntil(b'\n')
with pytest.raises(ValueError):
next(p)
|
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic TFX model evaluator executor."""
import os
from typing import Any, Dict, List
from absl import logging
import apache_beam as beam
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis import constants as tfma_constants
# Need to import the following module so that the fairness indicator post-export
# metric is registered.
import tensorflow_model_analysis.addons.fairness.post_export_metrics.fairness_indicators # pylint: disable=unused-import
from tfx import types
from tfx.components.evaluator import constants
from tfx.components.util import udf_utils
from tfx.components.util import tfxio_utils
from tfx.dsl.components.base import base_beam_executor
from tfx.proto import evaluator_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import json_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx_bsl.tfxio import tensor_adapter
_TELEMETRY_DESCRIPTORS = ['Evaluator']
class Executor(base_beam_executor.BaseBeamExecutor):
"""Executor for [Evaluator](https://www.tensorflow.org/tfx/guide/evaluator)."""
def _get_slice_spec_from_feature_slicing_spec(
self, spec: evaluator_pb2.FeatureSlicingSpec
) -> List[tfma.slicer.SingleSliceSpec]:
"""Given a feature slicing spec, returns a List of SingleSliceSpecs.
Args:
spec: slice specification.
Returns:
List of corresponding SingleSliceSpecs. Always includes the overall slice,
even if it was not specified in the given spec.
"""
result = []
for single_spec in spec.specs:
columns = single_spec.column_for_slicing
result.append(tfma.slicer.SingleSliceSpec(columns=columns))
# Always include the overall slice.
if tfma.slicer.SingleSliceSpec() not in result:
result.append(tfma.slicer.SingleSliceSpec())
return result
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
"""Runs a batch job to evaluate the eval_model against the given input.
Args:
input_dict: Input dict from input key to a list of Artifacts.
- model: exported model.
- examples: examples for eval the model.
output_dict: Output dict from output key to a list of Artifacts.
- evaluation: model evaluation results.
exec_properties: A dict of execution properties.
- eval_config: JSON string of tfma.EvalConfig.
- feature_slicing_spec: JSON string of evaluator_pb2.FeatureSlicingSpec
instance, providing the way to slice the data. Deprecated, use
eval_config.slicing_specs instead.
- example_splits: JSON-serialized list of names of splits on which the
metrics are computed. Default behavior (when example_splits is set to
None) is using the 'eval' split.
Returns:
None
"""
if standard_component_specs.EXAMPLES_KEY not in input_dict:
raise ValueError('EXAMPLES_KEY is missing from input dict.')
if standard_component_specs.EVALUATION_KEY not in output_dict:
raise ValueError('EVALUATION_KEY is missing from output dict.')
if standard_component_specs.MODEL_KEY in input_dict and len(
input_dict[standard_component_specs.MODEL_KEY]) > 1:
raise ValueError('There can be only one candidate model, there are %d.' %
(len(input_dict[standard_component_specs.MODEL_KEY])))
if standard_component_specs.BASELINE_MODEL_KEY in input_dict and len(
input_dict[standard_component_specs.BASELINE_MODEL_KEY]) > 1:
raise ValueError(
'There can be only one baseline model, there are %d.' %
(len(input_dict[standard_component_specs.BASELINE_MODEL_KEY])))
self._log_startup(input_dict, output_dict, exec_properties)
# Add fairness indicator metric callback if necessary.
fairness_indicator_thresholds = json_utils.loads(
exec_properties.get(
standard_component_specs.FAIRNESS_INDICATOR_THRESHOLDS_KEY, 'null'))
add_metrics_callbacks = None
if fairness_indicator_thresholds:
add_metrics_callbacks = [
tfma.post_export_metrics.fairness_indicators( # pytype: disable=module-attr
thresholds=fairness_indicator_thresholds),
]
output_uri = artifact_utils.get_single_uri(
output_dict[constants.EVALUATION_KEY])
# Make sure user packages get propagated to the remote Beam worker.
unused_module_path, extra_pip_packages = udf_utils.decode_user_module_key(
exec_properties.get(standard_component_specs.MODULE_PATH_KEY, None))
for pip_package_path in extra_pip_packages:
local_pip_package_path = io_utils.ensure_local(pip_package_path)
self._beam_pipeline_args.append('--extra_package=%s' %
local_pip_package_path)
eval_shared_model_fn = udf_utils.try_get_fn(
exec_properties=exec_properties,
fn_name='custom_eval_shared_model') or tfma.default_eval_shared_model
run_validation = False
models = []
if (standard_component_specs.EVAL_CONFIG_KEY in exec_properties
and exec_properties[standard_component_specs.EVAL_CONFIG_KEY]):
slice_spec = None
has_baseline = bool(
input_dict.get(standard_component_specs.BASELINE_MODEL_KEY))
eval_config = tfma.EvalConfig()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.EVAL_CONFIG_KEY],
eval_config)
# rubber_stamp is always assumed true, i.e., change threshold will always
# be ignored when a baseline model is missing.
if hasattr(tfma, 'utils'):
eval_config = tfma.utils.update_eval_config_with_defaults(
eval_config, has_baseline=has_baseline, rubber_stamp=True)
tfma.utils.verify_eval_config(eval_config)
else:
# TODO(b/171992041): Replaced by tfma.utils.
eval_config = tfma.update_eval_config_with_defaults(
eval_config, has_baseline=has_baseline, rubber_stamp=True)
tfma.verify_eval_config(eval_config)
# Do not validate model when there is no thresholds configured. This is to
# avoid accidentally blessing models when users forget to set thresholds.
run_validation = bool(
tfma.metrics.metric_thresholds_from_metrics_specs(
eval_config.metrics_specs, eval_config=eval_config))
if len(eval_config.model_specs) > 2:
raise ValueError(
"""Cannot support more than two models. There are %d models in this
eval_config.""" % (len(eval_config.model_specs)))
# Extract model artifacts.
for model_spec in eval_config.model_specs:
if standard_component_specs.MODEL_KEY not in input_dict:
if not model_spec.prediction_key:
raise ValueError(
'model_spec.prediction_key required if model not provided')
continue
if model_spec.is_baseline:
model_artifact = artifact_utils.get_single_instance(
input_dict[standard_component_specs.BASELINE_MODEL_KEY])
else:
model_artifact = artifact_utils.get_single_instance(
input_dict[standard_component_specs.MODEL_KEY])
# TODO(b/171992041): tfma.get_model_type replaced by tfma.utils.
if ((hasattr(tfma, 'utils') and
tfma.utils.get_model_type(model_spec) == tfma.TF_ESTIMATOR) or
hasattr(tfma, 'get_model_type') and
tfma.get_model_type(model_spec) == tfma.TF_ESTIMATOR):
model_path = path_utils.eval_model_path(
model_artifact.uri,
path_utils.is_old_model_artifact(model_artifact))
else:
model_path = path_utils.serving_model_path(
model_artifact.uri,
path_utils.is_old_model_artifact(model_artifact))
logging.info('Using %s as %s model.', model_path, model_spec.name)
models.append(
eval_shared_model_fn(
eval_saved_model_path=model_path,
model_name=model_spec.name,
eval_config=eval_config,
add_metrics_callbacks=add_metrics_callbacks))
else:
eval_config = None
assert (standard_component_specs.FEATURE_SLICING_SPEC_KEY
in exec_properties and
exec_properties[standard_component_specs.FEATURE_SLICING_SPEC_KEY]
), 'both eval_config and feature_slicing_spec are unset.'
feature_slicing_spec = evaluator_pb2.FeatureSlicingSpec()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.FEATURE_SLICING_SPEC_KEY],
feature_slicing_spec)
slice_spec = self._get_slice_spec_from_feature_slicing_spec(
feature_slicing_spec)
model_artifact = artifact_utils.get_single_instance(
input_dict[standard_component_specs.MODEL_KEY])
model_path = path_utils.eval_model_path(
model_artifact.uri, path_utils.is_old_model_artifact(model_artifact))
logging.info('Using %s for model eval.', model_path)
models.append(
eval_shared_model_fn(
eval_saved_model_path=model_path,
model_name='',
eval_config=None,
add_metrics_callbacks=add_metrics_callbacks))
eval_shared_model = models[0] if len(models) == 1 else models
schema = None
if standard_component_specs.SCHEMA_KEY in input_dict:
schema = io_utils.SchemaReader().read(
io_utils.get_only_uri_in_dir(
artifact_utils.get_single_uri(
input_dict[standard_component_specs.SCHEMA_KEY])))
# Load and deserialize example splits from execution properties.
example_splits = json_utils.loads(
exec_properties.get(standard_component_specs.EXAMPLE_SPLITS_KEY,
'null'))
if not example_splits:
example_splits = ['eval']
logging.info("The 'example_splits' parameter is not set, using 'eval' "
'split.')
logging.info('Evaluating model.')
# TempPipInstallContext is needed here so that subprocesses (which
# may be created by the Beam multi-process DirectRunner) can find the
# needed dependencies.
# TODO(b/187122662): Move this to the ExecutorOperator or Launcher.
with udf_utils.TempPipInstallContext(extra_pip_packages):
with self._make_beam_pipeline() as pipeline:
examples_list = []
tensor_adapter_config = None
# pylint: disable=expression-not-assigned
if tfma.is_batched_input(eval_shared_model, eval_config):
tfxio_factory = tfxio_utils.get_tfxio_factory_from_artifact(
examples=input_dict[standard_component_specs.EXAMPLES_KEY],
telemetry_descriptors=_TELEMETRY_DESCRIPTORS,
schema=schema,
raw_record_column_name=tfma_constants.ARROW_INPUT_COLUMN)
# TODO(b/161935932): refactor after TFXIO supports multiple patterns.
for split in example_splits:
split_uris = artifact_utils.get_split_uris(
input_dict[standard_component_specs.EXAMPLES_KEY], split)
for index in range(len(split_uris)):
split_uri = split_uris[index]
file_pattern = io_utils.all_files_pattern(split_uri)
tfxio = tfxio_factory(file_pattern)
data = (
pipeline
| f'ReadFromTFRecordToArrow[{split}][{index}]' >>
tfxio.BeamSource())
examples_list.append(data)
if schema is not None:
# Use last tfxio as TensorRepresentations and ArrowSchema are fixed.
tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
arrow_schema=tfxio.ArrowSchema(),
tensor_representations=tfxio.TensorRepresentations())
else:
for split in example_splits:
split_uris = artifact_utils.get_split_uris(
input_dict[standard_component_specs.EXAMPLES_KEY], split)
for index in range(len(split_uris)):
split_uri = split_uris[index]
file_pattern = io_utils.all_files_pattern(split_uri)
data = (
pipeline
| f'ReadFromTFRecord[{split}][{index}]' >>
beam.io.ReadFromTFRecord(file_pattern=file_pattern))
examples_list.append(data)
custom_extractors = udf_utils.try_get_fn(
exec_properties=exec_properties, fn_name='custom_extractors')
extractors = None
if custom_extractors:
extractors = custom_extractors(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
tensor_adapter_config=tensor_adapter_config)
(examples_list | 'FlattenExamples' >> beam.Flatten()
| 'ExtractEvaluateAndWriteResults' >>
(tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=models[0] if len(models) == 1 else models,
eval_config=eval_config,
extractors=extractors,
output_path=output_uri,
slice_spec=slice_spec,
tensor_adapter_config=tensor_adapter_config)))
logging.info('Evaluation complete. Results written to %s.', output_uri)
if not run_validation:
# TODO(jinhuang): delete the BLESSING_KEY from output_dict when supported.
logging.info('No threshold configured, will not validate model.')
return
# Set up blessing artifact
blessing = artifact_utils.get_single_instance(
output_dict[standard_component_specs.BLESSING_KEY])
blessing.set_string_custom_property(
constants.ARTIFACT_PROPERTY_CURRENT_MODEL_URI_KEY,
artifact_utils.get_single_uri(
input_dict[standard_component_specs.MODEL_KEY]))
blessing.set_int_custom_property(
constants.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY,
input_dict[standard_component_specs.MODEL_KEY][0].id)
if input_dict.get(standard_component_specs.BASELINE_MODEL_KEY):
baseline_model = input_dict[
standard_component_specs.BASELINE_MODEL_KEY][0]
blessing.set_string_custom_property(
constants.ARTIFACT_PROPERTY_BASELINE_MODEL_URI_KEY,
baseline_model.uri)
blessing.set_int_custom_property(
constants.ARTIFACT_PROPERTY_BASELINE_MODEL_ID_KEY, baseline_model.id)
if 'current_component_id' in exec_properties:
blessing.set_string_custom_property(
'component_id', exec_properties['current_component_id'])
# Check validation result and write BLESSED file accordingly.
logging.info('Checking validation results.')
validation_result = tfma.load_validation_result(output_uri)
if validation_result.validation_ok:
io_utils.write_string_file(
os.path.join(blessing.uri, constants.BLESSED_FILE_NAME), '')
blessing.set_int_custom_property(constants.ARTIFACT_PROPERTY_BLESSED_KEY,
constants.BLESSED_VALUE)
else:
io_utils.write_string_file(
os.path.join(blessing.uri, constants.NOT_BLESSED_FILE_NAME), '')
blessing.set_int_custom_property(constants.ARTIFACT_PROPERTY_BLESSED_KEY,
constants.NOT_BLESSED_VALUE)
logging.info('Blessing result %s written to %s.',
validation_result.validation_ok, blessing.uri)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import math
import time
from xml.dom import minidom
from xml.parsers import expat
from lxml import etree
import webob
from nova import exception
from nova import log as logging
from nova.openstack.common import jsonutils
from nova import wsgi
XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
LOG = logging.getLogger(__name__)
# The vendor content types should serialize identically to the non-vendor
# content types. So to avoid littering the code with both options, we
# map the vendor to the other when looking up the type
_CONTENT_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'application/json',
'application/vnd.openstack.compute+xml': 'application/xml',
}
SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.compute+json',
'application/xml',
'application/vnd.openstack.compute+xml',
)
_MEDIA_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'json',
'application/json': 'json',
'application/vnd.openstack.compute+xml': 'xml',
'application/xml': 'xml',
'application/atom+xml': 'atom',
}
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'nova.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in SUPPORTED_CONTENT_TYPES:
content_type = possible_type
if not content_type:
content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)
self.environ['nova.best_content_type'] = (content_type or
'application/json')
return self.environ['nova.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if not "Content-Type" in self.headers:
return None
allowed_types = SUPPORTED_CONTENT_TYPES
content_type = self.content_type
if content_type not in allowed_types:
raise exception.InvalidContentType(content_type=content_type)
return content_type
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization"""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class XMLDeserializer(TextDeserializer):
def __init__(self, metadata=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
"""
super(XMLDeserializer, self).__init__()
self.metadata = metadata or {}
def _from_xml(self, datastring):
plurals = set(self.metadata.get('plurals', {}))
try:
node = minidom.parseString(datastring).childNodes[0]
return {node.nodeName: self._from_xml_node(node, plurals)}
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
:param listnames: list of XML node names whose subnodes should
be considered list items.
"""
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
return node.childNodes[0].nodeValue
elif node.nodeName in listnames:
return [self._from_xml_node(n, listnames) for n in node.childNodes]
else:
result = dict()
for attr in node.attributes.keys():
result[attr] = node.attributes[attr].nodeValue
for child in node.childNodes:
if child.nodeType != node.TEXT_NODE:
result[child.nodeName] = self._from_xml_node(child,
listnames)
return result
def find_first_child_named(self, parent, name):
"""Search a nodes children for the first child with a given name"""
for node in parent.childNodes:
if node.nodeName == name:
return node
return None
def find_children_named(self, parent, name):
"""Return all of a nodes children who have the given name"""
for node in parent.childNodes:
if node.nodeName == name:
yield node
def extract_text(self, node):
"""Get the text field contained by the given node"""
if len(node.childNodes) == 1:
child = node.childNodes[0]
if child.nodeType == child.TEXT_NODE:
return child.nodeValue
return ""
def find_attribute_or_element(self, parent, name):
"""Get an attribute value; fallback to an element if not found"""
if parent.hasAttribute(name):
return parent.getAttribute(name)
node = self.find_first_child_named(parent, name)
if node:
return self.extract_text(node)
return None
def default(self, datastring):
return {'body': self._from_xml(datastring)}
class MetadataXMLDeserializer(XMLDeserializer):
def extract_metadata(self, metadata_node):
"""Marshal the metadata attribute of a parsed request"""
metadata = {}
if metadata_node is not None:
for meta_node in self.find_children_named(metadata_node, "meta"):
key = meta_node.getAttribute("key")
metadata[key] = self.extract_text(meta_node)
return metadata
class DictSerializer(ActionDispatcher):
"""Default request body serialization"""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization"""
def default(self, data):
return jsonutils.dumps(data)
class XMLDictSerializer(DictSerializer):
def __init__(self, metadata=None, xmlns=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
:param xmlns: XML namespace to include with serialized xml
"""
super(XMLDictSerializer, self).__init__()
self.metadata = metadata or {}
self.xmlns = xmlns
def default(self, data):
# We expect data to contain a single key which is the XML root.
root_key = data.keys()[0]
doc = minidom.Document()
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
return self.to_xml_string(node)
def to_xml_string(self, node, has_atom=False):
self._add_xmlns(node, has_atom)
return node.toxml('UTF-8')
#NOTE (ameade): the has_atom should be removed after all of the
# xml serializers and view builders have been updated to the current
# spec that required all responses include the xmlns:atom, the has_atom
# flag is to prevent current tests from breaking
def _add_xmlns(self, node, has_atom=False):
if self.xmlns is not None:
node.setAttribute('xmlns', self.xmlns)
if has_atom:
node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom")
def _to_xml_node(self, doc, metadata, nodename, data):
"""Recursive method to convert data members to XML nodes."""
result = doc.createElement(nodename)
# Set the xml namespace if one is specified
# TODO(justinsb): We could also use prefixes on the keys
xmlns = metadata.get('xmlns', None)
if xmlns:
result.setAttribute('xmlns', xmlns)
#TODO(bcwaldon): accomplish this without a type-check
if isinstance(data, list):
collections = metadata.get('list_collections', {})
if nodename in collections:
metadata = collections[nodename]
for item in data:
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(item))
result.appendChild(node)
return result
singular = metadata.get('plurals', {}).get(nodename, None)
if singular is None:
if nodename.endswith('s'):
singular = nodename[:-1]
else:
singular = 'item'
for item in data:
node = self._to_xml_node(doc, metadata, singular, item)
result.appendChild(node)
#TODO(bcwaldon): accomplish this without a type-check
elif isinstance(data, dict):
collections = metadata.get('dict_collections', {})
if nodename in collections:
metadata = collections[nodename]
for k, v in data.items():
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(k))
text = doc.createTextNode(str(v))
node.appendChild(text)
result.appendChild(node)
return result
attrs = metadata.get('attributes', {}).get(nodename, {})
for k, v in data.items():
if k in attrs:
result.setAttribute(k, str(v))
else:
node = self._to_xml_node(doc, metadata, k, v)
result.appendChild(node)
else:
# Type is atom
node = doc.createTextNode(str(data))
result.appendChild(node)
return result
def _create_link_nodes(self, xml_doc, links):
link_nodes = []
for link in links:
link_node = xml_doc.createElement('atom:link')
link_node.setAttribute('rel', link['rel'])
link_node.setAttribute('href', link['href'])
if 'type' in link:
link_node.setAttribute('type', link['type'])
link_nodes.append(link_node)
return link_nodes
def _to_xml(self, root):
"""Convert the xml object to an xml string."""
return etree.tostring(root, encoding='UTF-8', xml_declaration=True)
def serializers(**serializers):
"""Attaches serializers to a method.
This decorator associates a dictionary of serializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_serializers'):
func.wsgi_serializers = {}
func.wsgi_serializers.update(serializers)
return func
return decorator
def deserializers(**deserializers):
"""Attaches deserializers to a method.
This decorator associates a dictionary of deserializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_deserializers'):
func.wsgi_deserializers = {}
func.wsgi_deserializers.update(deserializers)
return func
return decorator
def response(code):
"""Attaches response code to a method.
This decorator associates a response code with a method. Note
that the function attributes are directly manipulated; the method
is not wrapped.
"""
def decorator(func):
func.wsgi_code = code
return func
return decorator
class ResponseObject(object):
"""Bundles a response object with appropriate serializers.
Object that app methods may return in order to bind alternate
serializers with a response object to be serialized. Its use is
optional.
"""
def __init__(self, obj, code=None, **serializers):
"""Binds serializers with an object.
Takes keyword arguments akin to the @serializer() decorator
for specifying serializers. Serializers specified will be
given preference over default serializers or method-specific
serializers on return.
"""
self.obj = obj
self.serializers = serializers
self._default_code = 200
self._code = code
self._headers = {}
self.serializer = None
self.media_type = None
def __getitem__(self, key):
"""Retrieves a header with the given name."""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""Sets a header with the given name to the given value."""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""Deletes the header with the given name."""
del self._headers[key.lower()]
def _bind_method_serializers(self, meth_serializers):
"""Binds method serializers with the response object.
Binds the method serializers with the response object.
Serializers specified to the constructor will take precedence
over serializers specified to this method.
:param meth_serializers: A dictionary with keys mapping to
response types and values containing
serializer objects.
"""
# We can't use update because that would be the wrong
# precedence
for mtype, serializer in meth_serializers.items():
self.serializers.setdefault(mtype, serializer)
def get_serializer(self, content_type, default_serializers=None):
"""Returns the serializer for the wrapped object.
Returns the serializer for the wrapped object subject to the
indicated content type. If no serializer matching the content
type is attached, an appropriate serializer drawn from the
default serializers will be used. If no appropriate
serializer is available, raises InvalidContentType.
"""
default_serializers = default_serializers or {}
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in self.serializers:
return mtype, self.serializers[mtype]
else:
return mtype, default_serializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
def preserialize(self, content_type, default_serializers=None):
"""Prepares the serializer that will be used to serialize.
Determines the serializer that will be used and prepares an
instance of it for later call. This allows the serializer to
be accessed by extensions for, e.g., template extension.
"""
mtype, serializer = self.get_serializer(content_type,
default_serializers)
self.media_type = mtype
self.serializer = serializer()
def attach(self, **kwargs):
"""Attach subordinate templates to serializers."""
if self.media_type in kwargs:
self.serializer.attach(kwargs[self.media_type])
def serialize(self, request, content_type, default_serializers=None):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
if self.serializer:
serializer = self.serializer
else:
_mtype, _serializer = self.get_serializer(content_type,
default_serializers)
serializer = _serializer()
response = webob.Response()
response.status_int = self.code
for hdr, value in self._headers.items():
response.headers[hdr] = value
response.headers['Content-Type'] = content_type
if self.obj is not None:
response.body = serializer.serialize(self.obj)
return response
@property
def code(self):
"""Retrieve the response status."""
return self._code or self._default_code
@property
def headers(self):
"""Retrieve the headers."""
return self._headers.copy()
def action_peek_json(body):
"""Determine action to invoke."""
try:
decoded = jsonutils.loads(body)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action and the decoded body...
return decoded.keys()[0]
def action_peek_xml(body):
"""Determine action to invoke."""
dom = minidom.parseString(body)
action_node = dom.childNodes[0]
return action_node.tagName
class ResourceExceptionHandler(object):
"""Context manager to handle Resource exceptions.
Used when processing exceptions generated by API implementation
methods (or their extensions). Converts most exceptions to Fault
exceptions, with the appropriate logging.
"""
def __enter__(self):
return None
def __exit__(self, ex_type, ex_value, ex_traceback):
if not ex_value:
return True
if isinstance(ex_value, exception.NotAuthorized):
msg = unicode(ex_value)
raise Fault(webob.exc.HTTPForbidden(explanation=msg))
elif isinstance(ex_value, exception.Invalid):
raise Fault(exception.ConvertedException(
code=ex_value.code, explanation=unicode(ex_value)))
elif isinstance(ex_value, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_('Exception handling resource: %s') % ex_value,
exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_("Fault thrown: %s"), unicode(ex_value))
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value))
raise Fault(ex_value)
# We didn't handle the exception
return False
class Resource(wsgi.Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses.
"""
def __init__(self, controller, action_peek=None, **deserializers):
"""
:param controller: object that implement methods created by routes lib
:param action_peek: dictionary of routines for peeking into an action
request body to determine the desired action
"""
self.controller = controller
default_deserializers = dict(xml=XMLDeserializer,
json=JSONDeserializer)
default_deserializers.update(deserializers)
self.default_deserializers = default_deserializers
self.default_serializers = dict(xml=XMLDictSerializer,
json=JSONDictSerializer)
self.action_peek = dict(xml=action_peek_xml,
json=action_peek_json)
self.action_peek.update(action_peek or {})
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
self.register_actions(controller)
# Save a mapping of extensions
self.wsgi_extensions = {}
self.wsgi_action_extensions = {}
def register_actions(self, controller):
"""Registers controller actions with this resource."""
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
def register_extensions(self, controller):
"""Registers controller extensions with this resource."""
extensions = getattr(controller, 'wsgi_extensions', [])
for method_name, action_name in extensions:
# Look up the extending method
extension = getattr(controller, method_name)
if action_name:
# Extending an action...
if action_name not in self.wsgi_action_extensions:
self.wsgi_action_extensions[action_name] = []
self.wsgi_action_extensions[action_name].append(extension)
else:
# Extending a regular method
if method_name not in self.wsgi_extensions:
self.wsgi_extensions[method_name] = []
self.wsgi_extensions[method_name].append(extension)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def get_body(self, request):
try:
content_type = request.get_content_type()
except exception.InvalidContentType:
LOG.debug(_("Unrecognized Content-Type provided in request"))
return None, ''
if not content_type:
LOG.debug(_("No Content-Type provided in request"))
return None, ''
if len(request.body) <= 0:
LOG.debug(_("Empty body provided in request"))
return None, ''
return content_type, request.body
def deserialize(self, meth, content_type, body):
meth_deserializers = getattr(meth, 'wsgi_deserializers', {})
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in meth_deserializers:
deserializer = meth_deserializers[mtype]
else:
deserializer = self.default_deserializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
return deserializer().deserialize(body)
def pre_process_extensions(self, extensions, request, action_args):
# List of callables for post-processing extensions
post = []
for ext in extensions:
if inspect.isgeneratorfunction(ext):
response = None
# If it's a generator function, the part before the
# yield is the preprocessing stage
try:
with ResourceExceptionHandler():
gen = ext(req=request, **action_args)
response = gen.next()
except Fault as ex:
response = ex
# We had a response...
if response:
return response, []
# No response, queue up generator for post-processing
post.append(gen)
else:
# Regular functions only perform post-processing
post.append(ext)
# Run post-processing in the reverse order
return None, reversed(post)
def post_process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
if inspect.isgenerator(ext):
# If it's a generator, run the second half of
# processing
try:
with ResourceExceptionHandler():
response = ext.send(resp_obj)
except StopIteration:
# Normal exit of generator
continue
except Fault as ex:
response = ex
else:
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except Fault as ex:
response = ex
# We had a response...
if response:
return response
return None
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.info("%(method)s %(url)s" % {"method": request.method,
"url": request.url})
# Identify the action, its arguments, and the requested
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
content_type, body = self.get_body(request)
accept = request.best_match_content_type()
# NOTE(Vek): Splitting the function up this way allows for
# auditing by external tools that wrap the existing
# function. If we try to audit __call__(), we can
# run into troubles due to the @webob.dec.wsgify()
# decorator.
return self._process_stack(request, action, action_args,
content_type, body, accept)
def _process_stack(self, request, action, action_args,
content_type, body, accept):
"""Implement the processing stack."""
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, TypeError):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = _("There is no such action: %s") % ex.args[0]
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Now, deserialize the request body...
try:
if content_type:
contents = self.deserialize(meth, content_type, body)
else:
contents = {}
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
context = request.environ.get('nova.context')
if (context and project_id and (project_id != context.project_id)):
msg = _("Malformed request url")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Run pre-processing extensions
response, post = self.pre_process_extensions(extensions,
request, action_args)
if not response:
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if type(action_result) is dict or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
_set_request_id_header(request, resp_obj)
# Do a preserialize to set up the response object
serializers = getattr(meth, 'wsgi_serializers', {})
resp_obj._bind_method_serializers(serializers)
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
resp_obj.preserialize(accept, self.default_serializers)
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept,
self.default_serializers)
try:
msg_dict = dict(url=request.url, status=response.status_int)
msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
except AttributeError, e:
msg_dict = dict(url=request.url, e=e)
msg = _("%(url)s returned a fault: %(e)s") % msg_dict
LOG.info(msg)
return response
def get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
action not in ['action', 'create', 'delete']):
# Propagate the error
raise
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
# OK, it's an action; figure out which action...
mtype = _MEDIA_TYPE_MAP.get(content_type)
action_name = self.action_peek[mtype](body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, []))
def dispatch(self, method, request, action_args):
"""Dispatch a call to the action-specific method."""
return method(req=request, **action_args)
def action(name):
"""Mark a function as an action.
The given name will be taken as the action key in the body.
This is also overloaded to allow extensions to provide
non-extending definitions of create and delete operations.
"""
def decorator(func):
func.wsgi_action = name
return func
return decorator
def extends(*args, **kwargs):
"""Indicate a function extends an operation.
Can be used as either::
@extends
def index(...):
pass
or as::
@extends(action='resize')
def _action_resize(...):
pass
"""
def decorator(func):
# Store enough information to find what we're extending
func.wsgi_extends = (func.__name__, kwargs.get('action'))
return func
# If we have positional arguments, call the decorator
if args:
return decorator(*args)
# OK, return the decorator instead
return decorator
class ControllerMetaclass(type):
"""Controller metaclass.
This metaclass automates the task of assembling a dictionary
mapping action keys to method names.
"""
def __new__(mcs, name, bases, cls_dict):
"""Adds the wsgi_actions dictionary to the class."""
# Find all actions
actions = {}
extensions = []
for key, value in cls_dict.items():
if not callable(value):
continue
if getattr(value, 'wsgi_action', None):
actions[value.wsgi_action] = key
elif getattr(value, 'wsgi_extends', None):
extensions.append(value.wsgi_extends)
# Add the actions and extensions to the class dict
cls_dict['wsgi_actions'] = actions
cls_dict['wsgi_extensions'] = extensions
return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,
cls_dict)
class Controller(object):
"""Default controller."""
__metaclass__ = ControllerMetaclass
_view_builder_class = None
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {
400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
501: "notImplemented",
503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
self.status_int = exception.status_int
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
fault_data = {
fault_name: {
'code': code,
'message': self.wrapped_exc.explanation}}
if code == 413:
retry = self.wrapped_exc.headers['Retry-After']
fault_data[fault_name]['retryAfter'] = retry
# 'code' is an attribute on the fault tag itself
metadata = {'attributes': {fault_name: 'code'}}
xml_serializer = XMLDictSerializer(metadata, XMLNS_V11)
content_type = req.best_match_content_type()
serializer = {
'application/xml': xml_serializer,
'application/json': JSONDictSerializer(),
}[content_type]
self.wrapped_exc.body = serializer.serialize(fault_data)
self.wrapped_exc.content_type = content_type
_set_request_id_header(req, self.wrapped_exc.headers)
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
class OverLimitFault(webob.exc.HTTPException):
"""
Rate-limited request response.
"""
def __init__(self, message, details, retry_time):
"""
Initialize new `OverLimitFault` with relevant information.
"""
hdrs = OverLimitFault._retry_after(retry_time)
self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs)
self.content = {
"overLimitFault": {
"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
},
}
@staticmethod
def _retry_after(retry_time):
delay = int(math.ceil(retry_time - time.time()))
retry_after = delay if delay > 0 else 0
headers = {'Retry-After': '%d' % retry_after}
return headers
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""
Return the wrapped exception with a serialized body conforming to our
error format.
"""
content_type = request.best_match_content_type()
metadata = {"attributes": {"overLimitFault": "code"}}
xml_serializer = XMLDictSerializer(metadata, XMLNS_V11)
serializer = {
'application/xml': xml_serializer,
'application/json': JSONDictSerializer(),
}[content_type]
content = serializer.serialize(self.content)
self.wrapped_exc.body = content
return self.wrapped_exc
def _set_request_id_header(req, headers):
context = req.environ.get('nova.context')
if context:
headers['x-compute-request-id'] = context.request_id
|
|
#!/usr/bin/env python
''':command:`streamcorpus_pipeline` is the command-line entry point to the
pipeline.
.. This software is released under an MIT/X11 open source license.
Copyright 2012-2014 Diffeo, Inc.
.. program:: streamcorpus_pipeline
The supplied configuration must have a :mod:`streamcorpus_pipeline`
section. Other required configuration depends on the specific stages
in use and the rest of the operating environment; for instance, if the
:class:`~streamcorpus_pipeline._kvlayer.to_kvlayer` writer stage is in
use, then the configuration must also contain a top-level
:mod:`kvlayer` section.
This supports the standard :option:`--config <yakonfig --config>`,
:option:`--dump-config <yakonfig --dump-config>`, :option:`--verbose
<dblogger --verbose>`, :option:`--quiet <dblogger --quiet>`, and
:option:`--debug <dblogger --debug>` options. It supports the
following additional options:
.. option:: --input <file.sc>, -i <file.sc>
Names the input file. If the file name is ``-`` then standard
input is used, if this makes sense. Some reader stages may use the
file name specially.
.. option:: --in-glob <pattern>
Runs the pipeline once for each file matching the shell :mod:`glob`
`pattern`.
'''
from __future__ import absolute_import
import copy
import glob
import importlib
import itertools
import logging
import json
import os
import re
import sys
import time
import yaml
import dblogger
import kvlayer
import yakonfig
from yakonfig.toplevel import assemble_default_config
import streamcorpus_pipeline
from streamcorpus_pipeline._exceptions import ConfigurationError
from streamcorpus_pipeline._pipeline import PipelineFactory, Pipeline
from streamcorpus_pipeline.stages import PipelineStages
logger = logging.getLogger(__name__)
def make_absolute_paths(config):
'''given a config dict with streamcorpus_pipeline as a key, find all
keys under streamcorpus_pipeline that end with "_path" and if the
value of that key is a relative path, convert it to an absolute
path using the value provided by root_path
'''
if not 'streamcorpus_pipeline' in config:
logger.critical('bad config: %r', config)
raise ConfigurationError('missing "streamcorpus_pipeline" from config')
## remove the root_path, so it does not get extended itself
root_path = config['streamcorpus_pipeline'].pop('root_path', None)
if not root_path:
root_path = os.getcwd()
if not root_path.startswith('/'):
root_path = os.path.join( os.getcwd(), root_path )
def recursive_abs_path( sub_config, root_path ):
for key, val in sub_config.items():
if isinstance(val, basestring):
if key.endswith('path'):
## ignore URLs in *_path parameters
if re.match('^http.?://', val): continue
## we have a path... is it already absolute?
if not val.startswith('/'):
## make the path absolute
sub_config[key] = os.path.join(root_path, val)
elif isinstance(val, dict):
recursive_abs_path( val, root_path )
recursive_abs_path( config, root_path )
## put the root_path back
config['root_path'] = root_path
def make_hash(obj):
'''
Makes a hash from a dictionary, list, tuple or set to any level,
that contains only other hashable types (including any lists,
tuples, sets, and dictionaries). See second answer (not the
accepted answer):
http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary
'''
if isinstance(obj, (set, tuple, list)):
return tuple([make_hash(e) for e in obj])
elif not isinstance(obj, dict):
return hash(obj)
new_obj = copy.deepcopy(obj)
for k, v in new_obj.items():
## call self recursively
new_obj[k] = make_hash(v)
return hash(tuple(frozenset(new_obj.items())))
def instantiate_config(config):
'''setup the config and load external modules
This updates 'config' as follows:
* All paths are replaced with absolute paths
* A hash and JSON dump of the config are stored in the config
* If 'pythonpath' is in the config, it is added to sys.path
* If 'setup_modules' is in the config, all modules named in it are loaded
'''
make_absolute_paths(config)
pipeline_config = config['streamcorpus_pipeline']
pipeline_config['config_hash'] = make_hash(config)
pipeline_config['config_json'] = json.dumps(config)
logger.debug('running config: {0} = {1!r}'
.format(pipeline_config['config_hash'], config))
## Load modules
# This is a method of using settings in yaml configs to load plugins.
die = False
for pathstr in pipeline_config.get('pythonpath', {}).itervalues():
if pathstr not in sys.path:
sys.path.append(pathstr)
for modname in pipeline_config.get('setup_modules', {}).itervalues():
try:
m = importlib.import_module(modname)
if not m:
logger.critical('could not load module %r', modname)
die = True
continue
if hasattr(m, 'setup'):
m.setup()
logger.debug('loaded and setup %r', modname)
else:
logger.debug('loaded %r', modname)
except Exception:
logger.critical('error loading and initting module %r', modname, exc_info=True)
die = True
if die:
sys.exit(1)
def pathfile_iter(path):
if path.endswith('.gz'):
fin = gzip.open(path, 'rb')
else:
fin = open(path, 'rb')
for line in fin:
line = line.strip()
yield line
def main():
import argparse
parser = argparse.ArgumentParser(
description='process a sequence of stream items',
usage='streamcorpus_pipeline --config config.yaml --input file.in')
parser.add_argument('-i', '--input', action='append',
help='file paths to input instead of reading from stdin')
parser.add_argument('--in-glob', action='append', default=[], help='path glob specifying input files')
parser.add_argument('--third-dir-path', help='path to third-party tools directory')
parser.add_argument('--tmp-dir-path', help='path to temporary directory for scratch files, can be large')
parser.add_argument('-f', '--file-of-paths', dest='file_of_paths', default=None, help='path to file with list of paths for input, one per line')
parser.add_argument('--skip', type=int, default=0,
help='Skip the first N stream items.')
modules = [yakonfig, kvlayer, dblogger, streamcorpus_pipeline]
args = yakonfig.parse_args(parser, modules)
config = yakonfig.get_global_config()
## this modifies the global config, passed by reference
instantiate_config(config)
input_paths = []
if args.in_glob:
for pattern in args.in_glob:
input_paths.extend(glob.glob(pattern))
if args.input:
if '-' in args.input:
if args.in_glob:
sys.exit('cannot use "-i -" and --in-glob together')
if len(args.input) > 1:
sys.exit('cannot use "-i -" with multiple inputs')
input_paths = sys.stdin
else:
input_paths.extend(args.input)
if args.file_of_paths:
input_paths = itertools.chain(input_paths, pathfile_iter(args.file_of_paths))
scp_config = config['streamcorpus_pipeline']
stages = PipelineStages()
if 'external_stages_path' in scp_config:
stages.load_external_stages(scp_config['external_stages_path'])
if 'external_stages_modules' in scp_config:
for mod in scp_config['external_stages_modules']:
stages.load_module_stages(mod)
factory = PipelineFactory(stages)
pipeline = factory(scp_config)
for i_str in input_paths:
logger.info('input path %r', i_str)
work_unit = SimpleWorkUnit(i_str.strip())
work_unit.data['start_chunk_time'] = time.time()
work_unit.data['start_count'] = args.skip
pipeline._process_task(work_unit)
class SimpleWorkUnit(object):
'''partially duck-typed coordinate.WorkUnit that wraps strings from
stdin and provides only the methods used by the Pipeline
'''
def __init__(self, i_str):
self.key = i_str
self.data = dict()
def update(self):
## a real WorkUnit would send self.data to the registry and
## renew the lease time
pass
def terminate(self):
pass
def fail(self, exc=None):
logger.critical('failing SimpleWorkUnit(%r) = %r: %r', self.key, self.data, exc, exc_info=True)
sys.exit(-1)
if __name__ == '__main__':
main()
|
|
# /usr/bin/env python
'''
Written by Kong Xiaolu and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
'''
import os
import scipy.io as sio
import numpy as np
import time
import torch
import CBIG_pMFM_basic_functions as fc
def get_init(gradient_data, highest_order, init_para):
'''
This function is implemented to calculate the initial parametrized coefficients
'''
n_node = gradient_data.shape[0]
amatrix = np.zeros((n_node, highest_order + 1))
for i in range(highest_order + 1):
amatrix[:, i] = gradient_data**(i)
para = np.linalg.inv(amatrix.T @ amatrix) @ amatrix.T @ init_para
return para, amatrix
def CBIG_mfm_optimization_desikan_main(random_seed=1, gpu_index=0):
'''
This function is to implement the optimization processes of mean field model.
The objective function is the summation of FC correlation cost and FCD KS statistics cost.
The optimization process is highly automatic and generate 500 candidate parameter sets for
main results.
Args:
gpu_index: index of gpu used for optimization
random_seed: random seed for optimization
output_path: output directory for saving optimized model parameters
Returns:
None
'''
output_path = '../output/rsfcpc2/training'
if not os.path.isdir(output_path):
os.makedirs(output_path)
# Setting random seed and GPU
torch.cuda.set_device(gpu_index)
random_seed_cuda = random_seed
random_seed_np = random_seed
torch.manual_seed(random_seed_cuda)
rng = np.random.Generator(np.random.PCG64(random_seed_np))
# Initializing input parameters
highest_order = 1
N = 3 * (highest_order + 1) + 1
gradient_data = fc.csv_matrix_read(
'../../../input/Desikan_input/rsfc_gradient_pc2.csv')
gradient_data = gradient_data[:, 0]
n_node = gradient_data.shape[0]
dim = n_node * 3 + 1
search_range = np.zeros((dim, 2))
search_range[0:n_node, :] = [0, 1]
search_range[n_node:n_node * 2, :] = [0, 0.5]
search_range[n_node * 2, :] = [1, 10]
search_range[n_node * 2 + 1:dim, :] = [0.0005, 0.01]
init_para = rng.uniform(0, 1, dim) * (
search_range[:, 1] - search_range[:, 0]) + search_range[:, 0]
start_point_w, template_mat = get_init(gradient_data, highest_order,
init_para[0:n_node])
start_point_i, template_mat = get_init(gradient_data, highest_order,
init_para[n_node:n_node * 2])
start_point_sigma, template_mat = get_init(gradient_data, highest_order,
init_para[n_node * 2 + 1:dim])
# Initializing childrens
xmean = np.zeros(N)
xmean[0:highest_order + 1] = start_point_w
xmean[highest_order + 1:2 * (highest_order + 1)] = start_point_i
xmean[2 * (highest_order + 1)] = init_para[2 * n_node]
xmean[2 * (highest_order + 1) + 1:N] = start_point_sigma
# Initializing optimization hyper-parameters
sigma = 0.15
sigmaS = 0.15
stoppoint = 0.3
maxloop = 400
n_dup = 3
# CMA-ES parameters setting
Lambda = 500
mu = 40
weights = np.log(mu + 1 / 2) - np.log(np.arange(1, mu + 1))
weights = weights / np.sum(weights)
mueff = 1 / np.sum(weights**2)
# Strategy parameter setting: adaptation
cc = (4 + mueff / N) / (N + 4 + 2 * mueff / N)
cs = (mueff + 2) / (N + mueff + 5)
c1 = 2 / ((N + 1.3)**2 + mueff)
cmu = np.minimum(1 - c1,
2 * (mueff - 2 + 1 / mueff) / ((N + 2)**2 + mueff))
damps = 1 + 2 * np.maximum(0, np.sqrt((mueff - 1) / (N + 1)) - 1) + cs
# Initializing dynamic strategy parameters and constants'''
pc = np.zeros(N)
ps = np.zeros(N)
B = np.eye(N)
D = np.zeros(N)
D[0:highest_order + 1] = start_point_w[0] / 2
D[highest_order + 1:2 * (highest_order + 1)] = start_point_i[0] / 2
D[2 * (highest_order + 1)] = 0.4
D[2 * (highest_order + 1) + 1:N] = 0.001 / 2
C = np.dot(np.dot(B, np.diag(np.power(D, 2))), B.T)
invsqrtC = np.dot(np.dot(B, np.diag(np.power(D, -1))), B.T)
chiN = N**0.5 * (1 - 1 / (4 * N) + 1 / (21 * N ^ 2))
# Evolution loop
countloop = 0
arx = np.zeros([N, Lambda])
input_para = np.zeros((dim, Lambda))
xmin = np.zeros([N + 3, maxloop])
stop_count = 0
while countloop < maxloop:
start_time = time.time()
# Generating lambda offspring
arx[:, 0] = xmean
j = 0
while j < Lambda:
arx[:, j] = xmean + sigma * np.dot(B, (D * rng.standard_normal(N)))
input_para[0:n_node, j] = template_mat @ arx[0:highest_order +
1, j]
input_para[n_node:2 *
n_node, j] = template_mat @ arx[highest_order + 1:2 *
(highest_order + 1), j]
input_para[2 * n_node:2 * n_node +
1, j] = arx[2 * (highest_order + 1), j]
input_para[2 * n_node +
1:dim, j] = template_mat @ arx[2 * (highest_order + 1) +
1:N, j]
if (input_para[:, j] < search_range[:, 0]).any() or (
input_para[:, j] > search_range[:, 1]).any():
j = j - 1
j = j + 1
# Calculating costs of offspring
total_cost, fc_cost, fcd_cost = fc.CBIG_combined_cost_train(
input_para, n_dup)
countloop = countloop + 1
# Sort by total cost and compute weighted mean
arfitsort = np.sort(total_cost)
arindex = np.argsort(total_cost)
xold = xmean
xmean = np.dot(arx[:, arindex[0:mu]], weights)
xshow = xmean - xold
# Cumulation
ps = (1 - cs) * ps + np.sqrt(cs * (2 - cs) * mueff) * np.dot(
invsqrtC, xshow) / sigma
hsig = (np.linalg.norm(ps) / np.sqrt(1 - (1 - cs)**
(2 * countloop)) / chiN <
(1.4 + 2 / (N + 1))) * 1
pc = (1 - cc) * pc + hsig * np.sqrt(cc *
(2 - cc) * mueff) * xshow / sigma
# Adapting covariance matrix C
artmp = (1 / sigma) * (
arx[:, arindex[0:mu]] - np.tile(xold, [mu, 1]).T)
C = (1 - c1 - cmu) * C + c1 * (
np.outer(pc, pc) + (1 - hsig) * cc * (2 - cc) * C) + cmu * np.dot(
artmp, np.dot(np.diag(weights), artmp.T))
# Adapting step size
sigma = sigma * np.exp((cs / damps) * (np.linalg.norm(ps) / chiN - 1))
sigma = min(sigma, sigmaS)
# Decomposition
if 1 > 1 / (c1 + cmu) / N / 10:
C = np.triu(C, k=1) + np.triu(C).T
D, B = np.linalg.eigh(C)
D = D.real
B = B.real
D = np.sqrt(D)
invsqrtC = np.dot(B, np.dot(np.diag(D**(-1)), B.T))
# Monitoring the evolution status
ps_norm = np.linalg.norm(ps)
print('******** Generation: ' + str(countloop) + ' ********')
print('Norm of P-sigma: ', ps_norm)
print('The mean of total cost: ', np.mean(arfitsort[0:mu]))
print('Sigma: ', sigma)
xmin[0:N, countloop - 1] = arx[:, arindex[0]]
xmin[N, countloop - 1] = fc_cost[arindex[0]]
xmin[N + 1, countloop - 1] = fcd_cost[arindex[0]]
xmin[N + 2, countloop - 1] = np.min(total_cost)
print('Best total cost: ', np.min(total_cost))
print('FC correlation cost: ', fc_cost[arindex[0]])
print('FCD KS statistics cost: ', fcd_cost[arindex[0]])
elapsed_time = time.time() - start_time
print('Elapsed time for this evolution is : ', elapsed_time)
print('******************************************')
# break
if arfitsort[0] < stoppoint and ps_norm < 11:
stop_count = stop_count + 1
if stop_count >= 5 or sigma < 0.001:
break
save_name = [output_path] + ['/random_seed_', str(random_seed), '.csv']
np.savetxt(''.join(save_name), xmin, delimiter=',')
if __name__ == "__main__":
CBIG_mfm_optimization_desikan_main(random_seed=1, gpu_index=0)
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl.testing import parameterized
import tensorflow as tf
def cycle(obj, cycles, signatures=None):
to_save = obj
# TODO(vbardiovsky): It would be nice if exported protos reached a fixed
# point w.r.t. saving/restoring, ideally after 2nd saving.
for _ in range(cycles):
path = tempfile.mkdtemp(prefix=tf.compat.v1.test.get_temp_dir())
# If available, we'll run the save and restore preferring the GPU. This
# just makes sure we aren't throwing errors and have enough
# device("CPU") blocks to satisfy the placer.
device = "/device:GPU:0" if tf.test.is_gpu_available() else "/device:CPU:0"
with tf.device(device):
tf.saved_model.save(to_save, path, signatures)
loaded = tf.saved_model.load(path)
to_save = loaded
return loaded
class _ModelWithOptimizer(tf.train.Checkpoint):
def __init__(self):
self.dense = tf.keras.layers.Dense(1)
self.optimizer = tf.keras.optimizers.Adam(0.01)
@tf.function(
input_signature=(tf.TensorSpec([None, 2], tf.float32),
tf.TensorSpec([None], tf.float32)))
def call(self, x, y):
with tf.GradientTape() as tape:
loss = tf.math.reduce_mean((self.dense(x) - y) ** 2.)
trainable_variables = self.dense.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
return {"loss": loss}
def _import_and_infer(save_dir, inputs, signature_key="serving_default"):
"""Import a SavedModel into a TF 1.x-style graph and run `signature_key`."""
graph = tf.Graph()
with graph.as_default(), tf.compat.v1.Session() as session:
model = tf.compat.v1.saved_model.load(session, ["serve"], save_dir)
return _run_signature(session, model, inputs, signature_key)
def _run_signature(session, meta_graph_def, inputs, signature_key):
signature = meta_graph_def.signature_def[signature_key]
assert set(inputs.keys()) == set(signature.inputs.keys())
feed_dict = {}
for arg_name in inputs.keys():
input_tensor = session.graph.get_tensor_by_name(
signature.inputs[arg_name].name)
feed_dict[input_tensor] = inputs[arg_name]
output_dict = {}
for output_name, output_tensor_info in signature.outputs.items():
output_dict[output_name] = session.graph.get_tensor_by_name(
output_tensor_info.name)
return session.run(output_dict, feed_dict=feed_dict)
class SaveTest(tf.test.TestCase):
def test_unbuilt_model_does_not_prevent_saving(self):
root = tf.train.Checkpoint(
model=tf.keras.Sequential([tf.keras.layers.Dense(2)]))
tf.saved_model.save(root, os.path.join(self.get_temp_dir(), "saved_model"))
def test_optimizer(self):
x = tf.constant([[3., 4.]])
y = tf.constant([2.])
model = _ModelWithOptimizer()
first_loss = model.call(x, y)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
tf.saved_model.save(model, save_dir, model.call)
second_loss = model.call(x, y)
self.assertNotEqual(first_loss, second_loss)
self.assertAllClose(
second_loss,
_import_and_infer(save_dir, {"x": [[3., 4.]], "y": [2.]}))
def test_single_method_default_signature(self):
model = _ModelWithOptimizer()
x = tf.constant([[3., 4.]])
y = tf.constant([2.])
model.call(x, y)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
tf.saved_model.save(model, save_dir)
self.assertIn("loss",
_import_and_infer(save_dir,
{"x": [[3., 4.]], "y": [2.]}))
@parameterized.named_parameters(
dict(testcase_name="ReloadOnce", cycles=1),
dict(testcase_name="ReloadTwice", cycles=2),
dict(testcase_name="ReloadThrice", cycles=3))
class LoadTest(tf.test.TestCase, parameterized.TestCase):
def test_optimizer(self, cycles):
class _HasOptimizer(tf.Module):
def __init__(self):
super(_HasOptimizer, self).__init__()
self.layer = tf.keras.layers.Dense(1)
self.optimizer = tf.keras.optimizers.Adam(0.01)
@tf.function
def __call__(self, x):
return self.layer(x)
@tf.function
def train(self, x, y):
with tf.GradientTape() as tape:
predicted = self(x)
loss = tf.math.reduce_sum(tf.math.abs(y - predicted))
train_vars = self.layer.trainable_variables
grads = tape.gradient(loss, train_vars)
self.optimizer.apply_gradients(zip(grads, train_vars))
root = _HasOptimizer()
train_input = dict(x=tf.constant([[1.]]),
y=tf.constant([[2.]]))
root.train(**train_input)
imported = cycle(root, cycles)
self.assertAllClose(root.optimizer.learning_rate.numpy(),
imported.optimizer.learning_rate.numpy())
self.assertAllClose(root(tf.constant([[-0.5]])),
imported(tf.constant([[-0.5]])))
root.train(**train_input)
imported.train(**train_input)
self.assertAllClose(root(tf.constant([[-0.5]])),
imported(tf.constant([[-0.5]])))
def test_model_with_custom_function_attached(self, cycles):
root = tf.train.Checkpoint(
model=tf.keras.Sequential([tf.keras.layers.Dense(2)]))
@tf.function
def _use_sequential(x):
return root.model.call(x)
root.model.traced_call = _use_sequential
original = root.model.traced_call(tf.zeros([1, 1])).numpy()
root = cycle(root, cycles)
self.assertAllEqual(
original,
root.model.traced_call(tf.zeros([1, 1])).numpy())
@parameterized.named_parameters(
dict(testcase_name="ReloadOnce", cycles=1),
dict(testcase_name="ReloadTwice", cycles=2),
dict(testcase_name="ReloadThrice", cycles=3))
class KerasLoadTest(tf.test.TestCase, parameterized.TestCase):
def test_dense_features_layer(self, cycles):
columns = [
tf.feature_column.numeric_column("x"),
tf.feature_column.numeric_column("y")
]
layer = tf.keras.layers.DenseFeatures(columns)
model = tf.keras.Sequential([layer])
model_input = {"x": tf.constant([[1.]]),
"y": tf.constant([[2.]])}
self.assertAllClose([[1., 2.]], model.predict(model_input, steps=1))
loaded = cycle(model, cycles)
output, = loaded._default_save_signature(model_input).values()
self.assertAllClose([[1., 2.]], output)
signature_output, = loaded.signatures["serving_default"](
**model_input).values()
self.assertAllClose([[1., 2.]], signature_output)
def test_dense_features_layer_fit(self, cycles):
columns = [tf.feature_column.numeric_column("x")]
model = tf.keras.Sequential(
[tf.keras.layers.DenseFeatures(columns),
tf.keras.layers.Dense(1)])
model_input = {"x": tf.constant([[1.]])}
model.compile(optimizer="adam", loss="mse", run_eagerly=True)
model.fit(model_input, tf.constant([[3.]]))
loaded = cycle(model, cycles)
loaded._default_save_signature(model_input)
loaded.signatures["serving_default"](**model_input)
def test_multi_output_layer(self, cycles):
inp = tf.keras.Input(name="inp", shape=(None,), dtype=tf.float32)
class _MultiOutput(tf.keras.layers.Layer):
def call(self, x):
return x + 1., x + 2.
out = _MultiOutput(name="out")(inp) # pylint: disable=not-callable
model = tf.keras.Model(inp, out)
loaded = cycle(model, cycles)
self.assertAllClose(
dict(out=2., out_1=3.),
loaded.signatures["serving_default"](tf.constant(1.)))
def test_functional_model_with_conv(self, cycles):
x = tf.keras.Input(name="x", shape=(None, None, 3), dtype=tf.float32)
conved = tf.keras.layers.Conv2D(
filters=3, kernel_size=3, dilation_rate=2)(x)
model = tf.keras.Model([x], conved)
model_input = tf.ones((1, 10, 10, 3))
initial_output = model.predict([model_input])
model = cycle(model, cycles)
self.assertAllClose(
[initial_output],
list(model.signatures["serving_default"](model_input).values()))
if __name__ == "__main__":
tf.test.main()
|
|
"""
An entity tracker
"""
from spockbot.mcdata.utils import Info
from spockbot.plugins.base import PluginBase, pl_announce
class MCEntity(Info):
eid = 0
status = 0
nbt = None
metadata = None
class MovementEntity(MCEntity):
x = 0
y = 0
z = 0
yaw = 0
pitch = 0
on_ground = True
class PlayerEntity(MovementEntity):
uuid = 0
current_item = 0
metadata = None
class ObjectEntity(MovementEntity):
obj_type = 0
obj_data = 0
speed_x = 0
speed_y = 0
speed_z = 0
class MobEntity(MovementEntity):
mob_type = 0
head_pitch = 0
head_yaw = 0
velocity_x = 0
velocity_y = 0
velocity_z = 0
metadata = None
class PaintingEntity(MCEntity):
title = ""
location = {
'x': 0,
'y': 0,
'z': 0,
}
direction = 0
class ExpEntity(MCEntity):
x = 0
y = 0
z = 0
count = 0
class GlobalEntity(MCEntity):
global_type = 0
x = 0
y = 0
z = 0
class EntitiesCore(object):
def __init__(self):
self.client_player = MCEntity()
self.entities = {}
self.players = {}
self.mobs = {}
self.objects = {}
self.paintings = {}
self.exp_orbs = {}
self.global_entities = {}
@pl_announce('Entities')
class EntitiesPlugin(PluginBase):
requires = 'Event'
events = {
'PLAY<Join Game': 'handle_join_game',
'PLAY<Spawn Player': 'handle_spawn_player',
'PLAY<Spawn Object': 'handle_spawn_object',
'PLAY<Spawn Mob': 'handle_spawn_mob',
'PLAY<Spawn Painting': 'handle_spawn_painting',
'PLAY<Spawn Experience Orb': 'handle_spawn_experience_orb',
'PLAY<Destroy Entities': 'handle_destroy_entities',
'PLAY<Entity Equipment': 'handle_unhandled',
'PLAY<Entity Velocity': 'handle_velocity',
'PLAY<Entity Relative Move': 'handle_relative_move',
'PLAY<Entity Look': 'handle_set_dict',
'PLAY<Entity Look and Relative Move': 'handle_relative_move',
'PLAY<Entity Teleport': 'handle_set_dict',
'PLAY<Entity Head Look': 'handle_set_dict',
'PLAY<Entity Status': 'handle_set_dict',
'PLAY<Entity Metadata': 'handle_set_dict',
'PLAY<Entity Effect': 'handle_unhandled',
'PLAY<Remove Entity Effect': 'handle_unhandled',
'PLAY<Entity Properties': 'handle_unhandled',
'PLAY<Spawn Global Entity': 'handle_spawn_global_entity',
'PLAY<Update Entity NBT': 'handle_set_dict',
}
def __init__(self, ploader, settings):
super(EntitiesPlugin, self).__init__(ploader, settings)
self.ec = EntitiesCore()
ploader.provides('Entities', self.ec)
# TODO: Implement all these things
def handle_unhandled(self, event, packet):
pass
def handle_join_game(self, event, packet):
self.ec.client_player.set_dict(packet.data)
self.ec.entities[packet.data['eid']] = self.ec.client_player
def handle_spawn_player(self, event, packet):
entity = PlayerEntity()
entity.set_dict(packet.data)
self.ec.entities[packet.data['eid']] = entity
self.ec.players[packet.data['eid']] = entity
self.event.emit('entity_spawn', {'entity': entity})
self.event.emit('entity_player_spawn', entity)
def handle_spawn_object(self, event, packet):
entity = ObjectEntity()
entity.set_dict(packet.data)
self.ec.entities[packet.data['eid']] = entity
self.ec.objects[packet.data['eid']] = entity
self.event.emit('entity_spawn', {'entity': entity})
def handle_spawn_mob(self, event, packet):
entity = MobEntity()
entity.set_dict(packet.data)
self.ec.entities[packet.data['eid']] = entity
self.ec.mobs[packet.data['eid']] = entity
self.event.emit('entity_spawn', {'entity': entity})
self.event.emit('entity_mob_spawn', entity)
def handle_spawn_painting(self, event, packet):
entity = PaintingEntity()
entity.set_dict(packet.data)
self.ec.entities[packet.data['eid']] = entity
self.ec.paintings[packet.data['eid']] = entity
self.event.emit('entity_spawn', {'entity': entity})
def handle_spawn_experience_orb(self, event, packet):
entity = ExpEntity()
entity.set_dict(packet.data)
self.ec.entities[packet.data['eid']] = entity
self.ec.exp_orbs[packet.data['eid']] = entity
self.event.emit('entity_spawn', {'entity': entity})
def handle_spawn_global_entity(self, event, packet):
entity = GlobalEntity()
entity.set_dict(packet.data)
self.ec.entities[packet.data['eid']] = entity
self.ec.global_entities[packet.data['eid']] = entity
self.event.emit('entity_spawn', {'entity': entity})
def handle_destroy_entities(self, event, packet):
for eid in packet.data['eids']:
if eid in self.ec.entities:
entity = self.ec.entities[eid]
del self.ec.entities[eid]
if eid in self.ec.players:
del self.ec.players[eid]
elif eid in self.ec.objects:
del self.ec.objects[eid]
elif eid in self.ec.mobs:
del self.ec.mobs[eid]
elif eid in self.ec.paintings:
del self.ec.paintings[eid]
elif eid in self.ec.exp_orbs:
del self.ec.exp_orbs[eid]
elif eid in self.ec.global_entities:
del self.ec.global_entities[eid]
self.event.emit('entity_destroy', {'entity': entity})
def handle_relative_move(self, event, packet):
if packet.data['eid'] in self.ec.entities:
entity = self.ec.entities[packet.data['eid']]
old_pos = [entity.x, entity.y, entity.z]
entity.set_dict(packet.data)
entity.x = entity.x + packet.data['dx']
entity.y = entity.y + packet.data['dy']
entity.z = entity.z + packet.data['dz']
self.event.emit('entity_move',
{'entity': entity, 'old_pos': old_pos})
def handle_velocity(self, event, packet):
if packet.data['eid'] in self.ec.entities:
self.ec.entities[packet.data['eid']].set_dict(packet.data)
if packet.data['eid'] == self.ec.client_player.eid:
self.event.emit('entity_player_velocity', packet.data)
def handle_set_dict(self, event, packet):
if packet.data['eid'] in self.ec.entities:
self.ec.entities[packet.data['eid']].set_dict(packet.data)
|
|
#!/usr/bin/python
'''
/******************************************************************
*
* Copyright 2018 Samsung Electronics All Rights Reserved.
*
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************/
'''
import sys
import os
from os import listdir
from os.path import isfile, join
import re
import optparse
from configuration import *
import datetime as dt
from time import time
import subprocess
import glob
oparser = optparse.OptionParser()
oparser.add_option("-m", action="store", dest="module_name")
oparser.add_option("--module", action="store", dest="module_name")
oparser.add_option("-k", action="store", dest="type_name")
oparser.add_option("--kind", action="store", dest="type_name")
oparser.add_option("-p", action="store", dest="platform")
oparser.add_option("--platform", action="store", dest="platform")
oparser.add_option("-t", action="store", dest="target")
oparser.add_option("--target", action="store", dest="target")
oparser.add_option("-s", action="store", dest="suite")
oparser.add_option("--suite", action="store", dest="suite")
oparser.add_option("-c", action="store", dest="testcase")
oparser.add_option("--case", action="store", dest="testcase")
oparser.add_option("-d", action="store", dest="device_name")
oparser.add_option("--device", action="store", dest="device_name")
oparser.add_option("-r", action="store", dest="is_marge")
oparser.add_option("--merge", action="store", dest="is_marge")
oparser.add_option("-b", action="store", dest="build_target")
oparser.add_option("--buildtarget", action="store", dest="build_target")
oparser.set_defaults(platform='android', target='21', build_target='target')
opts, args = oparser.parse_args()
platform = opts.platform
target = opts.target
module_name = opts.module_name
type_name = opts.type_name
suite_name = opts.suite
testcase_name = opts.testcase
device_name = opts.device_name
build_target_dir = opts.build_target
is_marge = opts.is_marge
EXECUTABLE_DIRECTORY = ""
suites = []
tc_count = 0
if module_name == 'ic' :
EXECUTABLE_DIRECTORY = 'bin/android/%s/intermediates/classes/debug/org/iotivity/cloud/%s/test/%s' % (module_name, module_name,
type_name)
base_package = "org.iotivity.cloud."
else :
EXECUTABLE_DIRECTORY = 'bin/android/%s/intermediates/classes/debug/org/iotivity/service/%s/test/%s' % (module_name, module_name,
type_name)
base_package = "org.iotivity.service."
if build_target_dir == 'sec':
EXECUTABLE_ROOT_DIR = '../IotivitySECTest_New/'
else :
EXECUTABLE_ROOT_DIR = "../"
build_dir = EXECUTABLE_ROOT_DIR+EXECUTABLE_DIRECTORY
TEST_RESULT_DIR_ANDROID = TEST_RESULT_DIR + "/android/"+module_name+"/"+type_name
LOG_DIRECTORY = TEST_RESULT_DIR + "/android/"+module_name+"/"+"log"+"/"+type_name
date = dt.datetime.today().strftime("%Y%m%d")
time_stamp = str(time())
result_file_name = platform+"_"+target+"_"+date+"_"+time_stamp+"_iotivity_"+module_name+"_"+type_name+".xml"
package_name = base_package+module_name+".test"
print "Build Directory "+build_dir
full_suite_name= package_name+".%s.%s"%(type_name, suite_name)
savedPath = os.getcwd()
if device_name == None:
device_name = ""
else:
device_name = "-s " + device_name
#adb shell pm list instrumentation
def add_suites_to_list() :
global suites
suites = []
if suite_name is not None and testcase_name is None:
suites = opts.suite.split(',')
elif suite_name is None and testcase_name is None:
temp_suites = [f for f in listdir(build_dir) if isfile(join(build_dir, f))]
for suite in temp_suites :
if "$" not in suite :
suite = suite.split('.',1)[0]
suites.append(suite)
print "\nSuites List going to execute : \n"
print suites
elif suite_name is not None and testcase_name is not None :
suites.append(suite_name)
def clean_test_result_directory(directory) :
if not os.path.exists(directory):
os.makedirs(directory)
else:
if is_marge is None:
files = glob.glob('%s/*'%(directory))
for xml_file in files:
os.remove(xml_file)
def write_log_into_file(tc_name) :
rc = os.system("adb %s logcat -d > %s_log.txt" %(device_name, LOG_DIRECTORY+tc_name))
os.chdir(savedPath)
def pull_result_file_from_device(result_file_name) :
os.system("adb %s pull /data/data/%s%s.test/files/junit-report.xml %s" %(device_name, base_package, module_name, TEST_RESULT_DIR_ANDROID))
os.chdir(TEST_RESULT_DIR_ANDROID)
os.rename("junit-report.xml", result_file_name)
os.chdir(savedPath)
def run_module() :
global tc_count
#add suites as requested
add_suites_to_list()
#getting suites fro suites list
for suite in suites:
#change directory to build directory
os.chdir(build_dir)
testcases = []
full_suite_name= package_name+".%s.%s"%(type_name, suite)
print("Full suite name : "+full_suite_name)
#checking if suite exist in build directory
if os.path.isfile(suite+".class") :
rc = os.system("find -name '%s.class' | xargs javap -p > %s.txt " % (suite,suite))
classFile = open("%s.txt" % suite, "r")
if rc != 0 :
continue
else :
print "Suite : " +suite +" does not exist -----> Moving to next Suite"
continue
print "\nStart Running Suite : "+suite
#Getting testcase list from suite
for line in classFile:
words = line.split()
for word in words:
if word.startswith('Test') or word.startswith('test'):
testname = word.split('(')
testcases.append(testname[0])
#close test suite class file
classFile.close()
#delete temp txt file
if os.path.isfile(suite+".txt") :
os.remove(suite+".txt")
#this if block is for single test execution
if testcase_name is not None :
if testcase_name in testcases :
testcases = []
testcases.append(testcase_name)
else :
print "TestCase : "+testcase_name +" does not exist------->ending test"
system.exit(0)
#getting testcase from testcase list and execute
for method_name in testcases:
print "\nStarted Test : " +method_name
p = subprocess.Popen(["adb %s shell am instrument -w -e class %s#%s %s/com.zutubi.android.junitreport.JUnitReportTestRunner"%(device_name,full_suite_name,method_name,package_name)], stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)
out, err = p.communicate()
print out
print err
tc_count = tc_count+1;
print "Completed Test : " +method_name+"\n"
time_stamp = str(time())
result_file_name = method_name+"-"+suite +"-"+platform+"-"+target+"-"+date+"-"+time_stamp+"-iotivity-"+module_name+"-"+type_name+".xml"
os.chdir(savedPath)
write_log_into_file(method_name)
os.chdir(savedPath)
#Pulling testcase xml result file from device
pull_result_file_from_device(result_file_name)
print "Completed Running Suite : "+suite
def make_excel_report() :
subprocess.call("python junit_reporter.py -m %s"%(module_name), shell=True)
if __name__ == '__main__':
clean_test_result_directory(TEST_RESULT_DIR_ANDROID)
clean_test_result_directory(LOG_DIRECTORY)
run_module()
print "\nTotal TC Count : ",tc_count
print "\n=============== Test Execution Completed ===============\n"
make_excel_report()
'''
#adb shell am instrument -w org.iotivity.service.phy.test/com.zutubi.android.junitreport.JUnitReportTestRunner
#Script running procedure
#python ./junit_runner.py -m phy -k stc -p android -t 21 -s PHYWiFiOnBoardingConfigTest -c testSetSharedKeyAndGetSharedKeyByGetConfig_GSRV_P
#./junit_runner.py -m pm -k itc -p android -t 21 -s PMOcProvisioningTest
#./junit_runner.py -m re -k utc -p android -t 21 -s REResourceBrokerTest
#./junit_runner.py -m rh -k utc -p android -t 21 -s ResourceHostingTest
#./junit_runner.py -m tm -k itc -p android -t 21 -s TMManagerTest
#/home/sk/gittest/oictest_repo/IotivityOrgSource/new/iotivity/IotivitySECTest/bin/android/phy -s PHYWiFiOnBoardingConfigTest -c TestSetSharedKeyAndGetSharedKeyByGetConfig_GSRV_P
#./junit_runner.py -m es -k btc -p android -t 21 -b target -s ESDevicePropTest -c testESSetWiFiPropWithInvalidArguments_ESV_NV_ETC_N
'''
|
|
# Copyright 2013 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
"""Provides functions: get_native_path_case(), isabs() and safe_join().
This module assumes that filesystem is not changing while current process
is running and thus it caches results of functions that depend on FS state.
"""
import ctypes
import getpass
import logging
import os
import posixpath
import re
import shlex
import shutil
import stat
import sys
import unicodedata
import time
from utils import tools
# Types of action accepted by link_file().
HARDLINK, HARDLINK_WITH_FALLBACK, SYMLINK, COPY = range(1, 5)
## OS-specific imports
if sys.platform == 'win32':
from ctypes.wintypes import create_unicode_buffer
from ctypes.wintypes import windll, FormatError # pylint: disable=E0611
from ctypes.wintypes import GetLastError # pylint: disable=E0611
elif sys.platform == 'darwin':
import Carbon.File # pylint: disable=F0401
import MacOS # pylint: disable=F0401
if sys.platform == 'win32':
def QueryDosDevice(drive_letter):
"""Returns the Windows 'native' path for a DOS drive letter."""
assert re.match(r'^[a-zA-Z]:$', drive_letter), drive_letter
assert isinstance(drive_letter, unicode)
# Guesswork. QueryDosDeviceW never returns the required number of bytes.
chars = 1024
drive_letter = drive_letter
p = create_unicode_buffer(chars)
if 0 == windll.kernel32.QueryDosDeviceW(drive_letter, p, chars):
err = GetLastError()
if err:
# pylint: disable=E0602
msg = u'QueryDosDevice(%s): %s (%d)' % (
drive_letter, FormatError(err), err)
raise WindowsError(err, msg.encode('utf-8'))
return p.value
def GetShortPathName(long_path):
"""Returns the Windows short path equivalent for a 'long' path."""
assert isinstance(long_path, unicode), repr(long_path)
# Adds '\\\\?\\' when given an absolute path so the MAX_PATH (260) limit is
# not enforced.
if os.path.isabs(long_path) and not long_path.startswith('\\\\?\\'):
long_path = '\\\\?\\' + long_path
chars = windll.kernel32.GetShortPathNameW(long_path, None, 0)
if chars:
p = create_unicode_buffer(chars)
if windll.kernel32.GetShortPathNameW(long_path, p, chars):
return p.value
err = GetLastError()
if err:
# pylint: disable=E0602
msg = u'GetShortPathName(%s): %s (%d)' % (
long_path, FormatError(err), err)
raise WindowsError(err, msg.encode('utf-8'))
def GetLongPathName(short_path):
"""Returns the Windows long path equivalent for a 'short' path."""
assert isinstance(short_path, unicode)
# Adds '\\\\?\\' when given an absolute path so the MAX_PATH (260) limit is
# not enforced.
if os.path.isabs(short_path) and not short_path.startswith('\\\\?\\'):
short_path = '\\\\?\\' + short_path
chars = windll.kernel32.GetLongPathNameW(short_path, None, 0)
if chars:
p = create_unicode_buffer(chars)
if windll.kernel32.GetLongPathNameW(short_path, p, chars):
return p.value
err = GetLastError()
if err:
# pylint: disable=E0602
msg = u'GetLongPathName(%s): %s (%d)' % (
short_path, FormatError(err), err)
raise WindowsError(err, msg.encode('utf-8'))
class DosDriveMap(object):
"""Maps \Device\HarddiskVolumeN to N: on Windows."""
# Keep one global cache.
_MAPPING = {}
def __init__(self):
"""Lazy loads the cache."""
if not self._MAPPING:
# This is related to UNC resolver on windows. Ignore that.
self._MAPPING[u'\\Device\\Mup'] = None
self._MAPPING[u'\\SystemRoot'] = os.environ[u'SystemRoot']
for letter in (chr(l) for l in xrange(ord('C'), ord('Z')+1)):
try:
letter = u'%s:' % letter
mapped = QueryDosDevice(letter)
if mapped in self._MAPPING:
logging.warn(
('Two drives: \'%s\' and \'%s\', are mapped to the same disk'
'. Drive letters are a user-mode concept and the kernel '
'traces only have NT path, so all accesses will be '
'associated with the first drive letter, independent of the '
'actual letter used by the code') % (
self._MAPPING[mapped], letter))
else:
self._MAPPING[mapped] = letter
except WindowsError: # pylint: disable=E0602
pass
def to_win32(self, path):
"""Converts a native NT path to Win32/DOS compatible path."""
match = re.match(r'(^\\Device\\[a-zA-Z0-9]+)(\\.*)?$', path)
if not match:
raise ValueError(
'Can\'t convert %s into a Win32 compatible path' % path,
path)
if not match.group(1) in self._MAPPING:
# Unmapped partitions may be accessed by windows for the
# fun of it while the test is running. Discard these.
return None
drive = self._MAPPING[match.group(1)]
if not drive or not match.group(2):
return drive
return drive + match.group(2)
def isabs(path):
"""Accepts X: as an absolute path, unlike python's os.path.isabs()."""
return os.path.isabs(path) or len(path) == 2 and path[1] == ':'
def find_item_native_case(root, item):
"""Gets the native path case of a single item based at root_path."""
if item == '..':
return item
root = get_native_path_case(root)
return os.path.basename(get_native_path_case(os.path.join(root, item)))
@tools.profile
@tools.cached
def get_native_path_case(p):
"""Returns the native path case for an existing file.
On Windows, removes any leading '\\?\'.
"""
assert isinstance(p, unicode), repr(p)
if not isabs(p):
raise ValueError(
'get_native_path_case(%r): Require an absolute path' % p, p)
# Make sure it is normalized to os.path.sep. Do not do it here to keep the
# function fast
assert '/' not in p, p
suffix = ''
count = p.count(':')
if count > 1:
# This means it has an alternate-data stream. There could be 3 ':', since
# it could be the $DATA datastream of an ADS. Split the whole ADS suffix
# off and add it back afterward. There is no way to know the native path
# case of an alternate data stream.
items = p.split(':')
p = ':'.join(items[0:2])
suffix = ''.join(':' + i for i in items[2:])
# TODO(maruel): Use os.path.normpath?
if p.endswith('.\\'):
p = p[:-2]
# Windows used to have an option to turn on case sensitivity on non Win32
# subsystem but that's out of scope here and isn't supported anymore.
# Go figure why GetShortPathName() is needed.
try:
out = GetLongPathName(GetShortPathName(p))
except OSError, e:
if e.args[0] in (2, 3, 5):
# The path does not exist. Try to recurse and reconstruct the path.
base = os.path.dirname(p)
rest = os.path.basename(p)
return os.path.join(get_native_path_case(base), rest)
raise
if out.startswith('\\\\?\\'):
out = out[4:]
# Always upper case the first letter since GetLongPathName() will return the
# drive letter in the case it was given.
return out[0].upper() + out[1:] + suffix
def enum_processes_win():
"""Returns all processes on the system that are accessible to this process.
Returns:
Win32_Process COM objects. See
http://msdn.microsoft.com/library/aa394372.aspx for more details.
"""
import win32com.client # pylint: disable=F0401
wmi_service = win32com.client.Dispatch('WbemScripting.SWbemLocator')
wbem = wmi_service.ConnectServer('.', 'root\\cimv2')
return [proc for proc in wbem.ExecQuery('SELECT * FROM Win32_Process')]
def filter_processes_dir_win(processes, root_dir):
"""Returns all processes which has their main executable located inside
root_dir.
"""
def normalize_path(filename):
try:
return GetLongPathName(unicode(filename)).lower()
except: # pylint: disable=W0702
return unicode(filename).lower()
root_dir = normalize_path(root_dir)
def process_name(proc):
if proc.ExecutablePath:
return normalize_path(proc.ExecutablePath)
# proc.ExecutablePath may be empty if the process hasn't finished
# initializing, but the command line may be valid.
if proc.CommandLine is None:
return None
parsed_line = shlex.split(proc.CommandLine)
if len(parsed_line) >= 1 and os.path.isabs(parsed_line[0]):
return normalize_path(parsed_line[0])
return None
long_names = ((process_name(proc), proc) for proc in processes)
return [
proc for name, proc in long_names
if name is not None and name.startswith(root_dir)
]
def filter_processes_tree_win(processes):
"""Returns all the processes under the current process."""
# Convert to dict.
processes = dict((p.ProcessId, p) for p in processes)
root_pid = os.getpid()
out = {root_pid: processes[root_pid]}
while True:
found = set()
for pid in out:
found.update(
p.ProcessId for p in processes.itervalues()
if p.ParentProcessId == pid)
found -= set(out)
if not found:
break
out.update((p, processes[p]) for p in found)
return out.values()
elif sys.platform == 'darwin':
# On non-windows, keep the stdlib behavior.
isabs = os.path.isabs
def _native_case(p):
"""Gets the native path case. Warning: this function resolves symlinks."""
try:
rel_ref, _ = Carbon.File.FSPathMakeRef(p.encode('utf-8'))
# The OSX underlying code uses NFD but python strings are in NFC. This
# will cause issues with os.listdir() for example. Since the dtrace log
# *is* in NFC, normalize it here.
out = unicodedata.normalize(
'NFC', rel_ref.FSRefMakePath().decode('utf-8'))
if p.endswith(os.path.sep) and not out.endswith(os.path.sep):
return out + os.path.sep
return out
except MacOS.Error, e:
if e.args[0] in (-43, -120):
# The path does not exist. Try to recurse and reconstruct the path.
# -43 means file not found.
# -120 means directory not found.
base = os.path.dirname(p)
rest = os.path.basename(p)
return os.path.join(_native_case(base), rest)
raise OSError(
e.args[0], 'Failed to get native path for %s' % p, p, e.args[1])
def _split_at_symlink_native(base_path, rest):
"""Returns the native path for a symlink."""
base, symlink, rest = split_at_symlink(base_path, rest)
if symlink:
if not base_path:
base_path = base
else:
base_path = safe_join(base_path, base)
symlink = find_item_native_case(base_path, symlink)
return base, symlink, rest
def find_item_native_case(root_path, item):
"""Gets the native path case of a single item based at root_path.
There is no API to get the native path case of symlinks on OSX. So it
needs to be done the slow way.
"""
if item == '..':
return item
item = item.lower()
for element in listdir(root_path):
if element.lower() == item:
return element
@tools.profile
@tools.cached
def get_native_path_case(path):
"""Returns the native path case for an existing file.
Technically, it's only HFS+ on OSX that is case preserving and
insensitive. It's the default setting on HFS+ but can be changed.
"""
assert isinstance(path, unicode), repr(path)
if not isabs(path):
raise ValueError(
'get_native_path_case(%r): Require an absolute path' % path, path)
if path.startswith('/dev'):
# /dev is not visible from Carbon, causing an exception.
return path
# Starts assuming there is no symlink along the path.
resolved = _native_case(path)
if path.lower() in (resolved.lower(), resolved.lower() + './'):
# This code path is incredibly faster.
logging.debug('get_native_path_case(%s) = %s' % (path, resolved))
return resolved
# There was a symlink, process it.
base, symlink, rest = _split_at_symlink_native(None, path)
if not symlink:
# TODO(maruel): This can happen on OSX because we use stale APIs on OSX.
# Fixing the APIs usage will likely fix this bug. The bug occurs due to
# hardlinked files, where the API may return one file path or the other
# depending on how it feels.
return base
prev = base
base = safe_join(_native_case(base), symlink)
assert len(base) > len(prev)
while rest:
prev = base
relbase, symlink, rest = _split_at_symlink_native(base, rest)
base = safe_join(base, relbase)
assert len(base) > len(prev), (prev, base, symlink)
if symlink:
base = safe_join(base, symlink)
assert len(base) > len(prev), (prev, base, symlink)
# Make sure no symlink was resolved.
assert base.lower() == path.lower(), (base, path)
logging.debug('get_native_path_case(%s) = %s' % (path, base))
return base
else: # OSes other than Windows and OSX.
# On non-windows, keep the stdlib behavior.
isabs = os.path.isabs
def find_item_native_case(root, item):
"""Gets the native path case of a single item based at root_path."""
if item == '..':
return item
root = get_native_path_case(root)
return os.path.basename(get_native_path_case(os.path.join(root, item)))
@tools.profile
@tools.cached
def get_native_path_case(path):
"""Returns the native path case for an existing file.
On OSes other than OSX and Windows, assume the file system is
case-sensitive.
TODO(maruel): This is not strictly true. Implement if necessary.
"""
assert isinstance(path, unicode), repr(path)
if not isabs(path):
raise ValueError(
'get_native_path_case(%r): Require an absolute path' % path, path)
# Give up on cygwin, as GetLongPathName() can't be called.
# Linux traces tends to not be normalized so use this occasion to normalize
# it. This function implementation already normalizes the path on the other
# OS so this needs to be done here to be coherent between OSes.
out = os.path.normpath(path)
if path.endswith(os.path.sep) and not out.endswith(os.path.sep):
out = out + os.path.sep
# In 99.99% of cases on Linux out == path. Since a return value is cached
# forever, reuse (also cached) |path| object. It safes approx 7MB of ram
# when isolating Chromium tests. It's important on memory constrained
# systems running ARM.
return path if out == path else out
if sys.platform != 'win32': # All non-Windows OSes.
def safe_join(*args):
"""Joins path elements like os.path.join() but doesn't abort on absolute
path.
os.path.join('foo', '/bar') == '/bar'
but safe_join('foo', '/bar') == 'foo/bar'.
"""
out = ''
for element in args:
if element.startswith(os.path.sep):
if out.endswith(os.path.sep):
out += element[1:]
else:
out += element
else:
if out.endswith(os.path.sep):
out += element
else:
out += os.path.sep + element
return out
@tools.profile
def split_at_symlink(base_dir, relfile):
"""Scans each component of relfile and cut the string at the symlink if
there is any.
Returns a tuple (base_path, symlink, rest), with symlink == rest == None if
not symlink was found.
"""
if base_dir:
assert relfile
assert os.path.isabs(base_dir)
index = 0
else:
assert os.path.isabs(relfile)
index = 1
def at_root(rest):
if base_dir:
return safe_join(base_dir, rest)
return rest
while True:
try:
index = relfile.index(os.path.sep, index)
except ValueError:
index = len(relfile)
full = at_root(relfile[:index])
if os.path.islink(full):
# A symlink!
base = os.path.dirname(relfile[:index])
symlink = os.path.basename(relfile[:index])
rest = relfile[index:]
logging.debug(
'split_at_symlink(%s, %s) -> (%s, %s, %s)' %
(base_dir, relfile, base, symlink, rest))
return base, symlink, rest
if index == len(relfile):
break
index += 1
return relfile, None, None
@tools.profile
def listdir(abspath):
"""Lists a directory given an absolute path to it."""
if not isabs(abspath):
raise ValueError(
'list_dir(%r): Require an absolute path' % abspath, abspath)
return os.listdir(abspath)
def relpath(path, root):
"""os.path.relpath() that keeps trailing os.path.sep."""
out = os.path.relpath(path, root)
if path.endswith(os.path.sep):
out += os.path.sep
return out
def safe_relpath(filepath, basepath):
"""Do not throw on Windows when filepath and basepath are on different drives.
Different than relpath() above since this one doesn't keep the trailing
os.path.sep and it swallows exceptions on Windows and return the original
absolute path in the case of different drives.
"""
try:
return os.path.relpath(filepath, basepath)
except ValueError:
assert sys.platform == 'win32'
return filepath
def normpath(path):
"""os.path.normpath() that keeps trailing os.path.sep."""
out = os.path.normpath(path)
if path.endswith(os.path.sep):
out += os.path.sep
return out
def posix_relpath(path, root):
"""posix.relpath() that keeps trailing slash.
It is different from relpath() since it can be used on Windows.
"""
out = posixpath.relpath(path, root)
if path.endswith('/'):
out += '/'
return out
def cleanup_path(x):
"""Cleans up a relative path. Converts any os.path.sep to '/' on Windows."""
if x:
x = x.rstrip(os.path.sep).replace(os.path.sep, '/')
if x == '.':
x = ''
if x:
x += '/'
return x
def is_url(path):
"""Returns True if it looks like an HTTP url instead of a file path."""
return bool(re.match(r'^https?://.+$', path))
def path_starts_with(prefix, path):
"""Returns true if the components of the path |prefix| are the same as the
initial components of |path| (or all of the components of |path|). The paths
must be absolute.
"""
assert os.path.isabs(prefix) and os.path.isabs(path)
prefix = os.path.normpath(prefix)
path = os.path.normpath(path)
assert prefix == get_native_path_case(prefix), prefix
assert path == get_native_path_case(path), path
prefix = prefix.rstrip(os.path.sep) + os.path.sep
path = path.rstrip(os.path.sep) + os.path.sep
return path.startswith(prefix)
@tools.profile
def fix_native_path_case(root, path):
"""Ensures that each component of |path| has the proper native case.
It does so by iterating slowly over the directory elements of |path|. The file
must exist.
"""
native_case_path = root
for raw_part in path.split(os.sep):
if not raw_part or raw_part == '.':
break
part = find_item_native_case(native_case_path, raw_part)
if not part:
raise OSError(
'File %s doesn\'t exist' %
os.path.join(native_case_path, raw_part))
native_case_path = os.path.join(native_case_path, part)
return os.path.normpath(native_case_path)
def ensure_command_has_abs_path(command, cwd):
"""Ensures that an isolate command uses absolute path.
This is needed since isolate can specify a command relative to 'cwd' and
subprocess.call doesn't consider 'cwd' when searching for executable.
"""
if not os.path.isabs(command[0]):
command[0] = os.path.abspath(os.path.join(cwd, command[0]))
def is_same_filesystem(path1, path2):
"""Returns True if both paths are on the same filesystem.
This is required to enable the use of hardlinks.
"""
assert os.path.isabs(path1), path1
assert os.path.isabs(path2), path2
if sys.platform == 'win32':
# If the drive letter mismatches, assume it's a separate partition.
# TODO(maruel): It should look at the underlying drive, a drive letter could
# be a mount point to a directory on another drive.
assert re.match(r'^[a-zA-Z]\:\\.*', path1), path1
assert re.match(r'^[a-zA-Z]\:\\.*', path2), path2
if path1[0].lower() != path2[0].lower():
return False
return os.stat(path1).st_dev == os.stat(path2).st_dev
def get_free_space(path):
"""Returns the number of free bytes."""
if sys.platform == 'win32':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(path), None, None, ctypes.pointer(free_bytes))
return free_bytes.value
# For OSes other than Windows.
f = os.statvfs(path) # pylint: disable=E1101
return f.f_bfree * f.f_frsize
### Write file functions.
def hardlink(source, link_name):
"""Hardlinks a file.
Add support for os.link() on Windows.
"""
if sys.platform == 'win32':
if not ctypes.windll.kernel32.CreateHardLinkW(
unicode(link_name), unicode(source), 0):
raise OSError()
else:
os.link(source, link_name)
def readable_copy(outfile, infile):
"""Makes a copy of the file that is readable by everyone."""
shutil.copy2(infile, outfile)
read_enabled_mode = (os.stat(outfile).st_mode | stat.S_IRUSR |
stat.S_IRGRP | stat.S_IROTH)
os.chmod(outfile, read_enabled_mode)
def set_read_only(path, read_only):
"""Sets or resets the write bit on a file or directory.
Zaps out access to 'group' and 'others'.
"""
assert isinstance(read_only, bool), read_only
mode = os.lstat(path).st_mode
# TODO(maruel): Stop removing GO bits.
if read_only:
mode = mode & 0500
else:
mode = mode | 0200
if hasattr(os, 'lchmod'):
os.lchmod(path, mode) # pylint: disable=E1101
else:
if stat.S_ISLNK(mode):
# Skip symlink without lchmod() support.
logging.debug(
'Can\'t change %sw bit on symlink %s',
'-' if read_only else '+', path)
return
# TODO(maruel): Implement proper DACL modification on Windows.
os.chmod(path, mode)
def try_remove(filepath):
"""Removes a file without crashing even if it doesn't exist."""
try:
# TODO(maruel): Not do it unless necessary since it slows this function
# down.
if sys.platform == 'win32':
# Deleting a read-only file will fail if it is read-only.
set_read_only(filepath, False)
else:
# Deleting a read-only file will fail if the directory is read-only.
set_read_only(os.path.dirname(filepath), False)
os.remove(filepath)
except OSError:
pass
def link_file(outfile, infile, action):
"""Links a file. The type of link depends on |action|."""
if action not in (HARDLINK, HARDLINK_WITH_FALLBACK, SYMLINK, COPY):
raise ValueError('Unknown mapping action %s' % action)
if not os.path.isfile(infile):
raise OSError('%s is missing' % infile)
if os.path.isfile(outfile):
raise OSError(
'%s already exist; insize:%d; outsize:%d' %
(outfile, os.stat(infile).st_size, os.stat(outfile).st_size))
if action == COPY:
readable_copy(outfile, infile)
elif action == SYMLINK and sys.platform != 'win32':
# On windows, symlink are converted to hardlink and fails over to copy.
os.symlink(infile, outfile) # pylint: disable=E1101
else:
# HARDLINK or HARDLINK_WITH_FALLBACK.
try:
hardlink(infile, outfile)
except OSError as e:
if action == HARDLINK:
raise OSError('Failed to hardlink %s to %s: %s' % (infile, outfile, e))
# Probably a different file system.
logging.warning(
'Failed to hardlink, failing back to copy %s to %s' % (
infile, outfile))
readable_copy(outfile, infile)
### Write directory functions.
def make_tree_read_only(root):
"""Makes all the files in the directories read only.
Also makes the directories read only, only if it makes sense on the platform.
This means no file can be created or deleted.
"""
logging.debug('make_tree_read_only(%s)', root)
assert os.path.isabs(root), root
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
for filename in filenames:
set_read_only(os.path.join(dirpath, filename), True)
if sys.platform != 'win32':
# It must not be done on Windows.
for dirname in dirnames:
set_read_only(os.path.join(dirpath, dirname), True)
if sys.platform != 'win32':
set_read_only(root, True)
def make_tree_files_read_only(root):
"""Makes all the files in the directories read only but not the directories
themselves.
This means files can be created or deleted.
"""
logging.debug('make_tree_files_read_only(%s)', root)
assert os.path.isabs(root), root
if sys.platform != 'win32':
set_read_only(root, False)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
for filename in filenames:
set_read_only(os.path.join(dirpath, filename), True)
if sys.platform != 'win32':
# It must not be done on Windows.
for dirname in dirnames:
set_read_only(os.path.join(dirpath, dirname), False)
def make_tree_writeable(root):
"""Makes all the files in the directories writeable.
Also makes the directories writeable, only if it makes sense on the platform.
It is different from make_tree_deleteable() because it unconditionally affects
the files.
"""
logging.debug('make_tree_writeable(%s)', root)
assert os.path.isabs(root), root
if sys.platform != 'win32':
set_read_only(root, False)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
for filename in filenames:
set_read_only(os.path.join(dirpath, filename), False)
if sys.platform != 'win32':
# It must not be done on Windows.
for dirname in dirnames:
set_read_only(os.path.join(dirpath, dirname), False)
def make_tree_deleteable(root):
"""Changes the appropriate permissions so the files in the directories can be
deleted.
On Windows, the files are modified. On other platforms, modify the directory.
It only does the minimum so the files can be deleted safely.
Warning on Windows: since file permission is modified, the file node is
modified. This means that for hard-linked files, every directory entry for the
file node has its file permission modified.
"""
logging.debug('make_tree_deleteable(%s)', root)
assert os.path.isabs(root), root
if sys.platform != 'win32':
set_read_only(root, False)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
if sys.platform == 'win32':
for filename in filenames:
set_read_only(os.path.join(dirpath, filename), False)
else:
for dirname in dirnames:
set_read_only(os.path.join(dirpath, dirname), False)
def change_acl_for_delete_win(path):
"""Zaps the SECURITY_DESCRIPTOR's DACL on a directory entry that is tedious to
delete.
This function is a heavy hammer. It discards the SECURITY_DESCRIPTOR and
creates a new one with only one DACL set to user:FILE_ALL_ACCESS.
Used as last resort.
"""
STANDARD_RIGHTS_REQUIRED = 0xf0000
SYNCHRONIZE = 0x100000
FILE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3ff
import win32security
user, _domain, _type = win32security.LookupAccountName('', getpass.getuser())
sd = win32security.SECURITY_DESCRIPTOR()
sd.Initialize()
sd.SetSecurityDescriptorOwner(user, False)
dacl = win32security.ACL()
dacl.Initialize()
dacl.AddAccessAllowedAce(win32security.ACL_REVISION_DS, FILE_ALL_ACCESS, user)
sd.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(
path, win32security.DACL_SECURITY_INFORMATION, sd)
def rmtree(root):
"""Wrapper around shutil.rmtree() to retry automatically on Windows.
On Windows, forcibly kills processes that are found to interfere with the
deletion.
Returns:
True on normal execution, False if berserk techniques (like killing
processes) had to be used.
"""
make_tree_deleteable(root)
logging.info('rmtree(%s)', root)
if sys.platform != 'win32':
shutil.rmtree(root)
return True
# Windows is more 'challenging'. First tries the soft way: tries 3 times to
# delete and sleep a bit in between.
max_tries = 3
for i in xrange(max_tries):
# errors is a list of tuple(function, path, excinfo).
errors = []
shutil.rmtree(root, onerror=lambda *args: errors.append(args))
if not errors:
return True
if not i:
for _, path, _ in errors:
try:
change_acl_for_delete_win(path)
except Exception as e:
sys.stderr.write('- %s (failed to update ACL: %s)\n' % (path, e))
if i == max_tries - 1:
sys.stderr.write(
'Failed to delete %s. The following files remain:\n' % root)
for _, path, _ in errors:
sys.stderr.write('- %s\n' % path)
else:
delay = (i+1)*2
sys.stderr.write(
'Failed to delete %s (%d files remaining).\n'
' Maybe the test has a subprocess outliving it.\n'
' Sleeping %d seconds.\n' %
(root, len(errors), delay))
time.sleep(delay)
# The soft way was not good enough. Try the hard way. Enumerates both:
# - all child processes from this process.
# - processes where the main executable in inside 'root'. The reason is that
# the ancestry may be broken so stray grand-children processes could be
# undetected by the first technique.
# This technique is not fool-proof but gets mostly there.
def get_processes():
processes = enum_processes_win()
tree_processes = filter_processes_tree_win(processes)
dir_processes = filter_processes_dir_win(processes, root)
# Convert to dict to remove duplicates.
processes = dict((p.ProcessId, p) for p in tree_processes)
processes.update((p.ProcessId, p) for p in dir_processes)
processes.pop(os.getpid())
return processes
for i in xrange(3):
sys.stderr.write('Enumerating processes:\n')
processes = get_processes()
if not processes:
break
for _, proc in sorted(processes.iteritems()):
sys.stderr.write(
'- pid %d; Handles: %d; Exe: %s; Cmd: %s\n' % (
proc.ProcessId,
proc.HandleCount,
proc.ExecutablePath,
proc.CommandLine))
sys.stderr.write('Terminating %d processes.\n' % len(processes))
for pid in sorted(processes):
try:
# Killing is asynchronous.
os.kill(pid, 9)
sys.stderr.write('- %d killed\n' % pid)
except OSError:
sys.stderr.write('- failed to kill %s\n' % pid)
if i < 2:
time.sleep((i+1)*2)
else:
processes = get_processes()
if processes:
sys.stderr.write('Failed to terminate processes.\n')
raise errors[0][2][0], errors[0][2][1], errors[0][2][2]
# Now that annoying processes in root are evicted, try again.
errors = []
shutil.rmtree(root, onerror=lambda *args: errors.append(args))
if errors:
# There's no hope.
sys.stderr.write(
'Failed to delete %s. The following files remain:\n' % root)
for _, path, _ in errors:
sys.stderr.write('- %s\n' % path)
raise errors[0][2][0], errors[0][2][1], errors[0][2][2]
return False
|
|
"""archvyrt domain module"""
# stdlib
import logging
# 3rd-party
import libvirt
# archvyrt
from archvyrt.libvirt import LibvirtDomain
from archvyrt.libvirt import LibvirtDisk
from archvyrt.libvirt import LibvirtNetwork
from archvyrt.libvirt import LibvirtRng
LOG = logging.getLogger(__name__)
class Domain:
"""
High-level domain object
"""
def __init__(self, domain_info, libvirt_url=None):
"""
Initialize libvirt domain
:param domain_info - JSON definition of domain
:param libvirt_url - URL for libvirt connection
"""
self._conn = libvirt.open(libvirt_url)
self._domain_info = domain_info
self._domain = LibvirtDomain(self.fqdn)
self._domain.memory = int(self.memory)
self._domain.vcpu = int(self.vcpu)
self._disks = []
self._init_disks()
self._networks = []
self._init_networks()
self._init_rng()
self._conn.defineXML(str(self._domain))
self._domain.xml = self._conn.lookupByName(self.fqdn).XMLDesc()
LOG.info('New domain %s', self.fqdn)
LOG.debug(
'Define new domain %s: %s',
self.fqdn,
str(self._domain).replace('\n', ' ').replace('\r', '')
)
def __del__(self):
"""
Make sure to cleanup connection when object is destroyed
"""
try:
if self._conn:
try:
self._conn.close()
except libvirt.libvirtError:
pass
except libvirt.libvirtError:
pass
def _init_disks(self):
"""
Initialize disks
will create libvirt disks and attach them to the domain
"""
for alias, details in sorted(self._domain_info['disks'].items()):
disk_name = '%s-%s' % (self.fqdn, alias)
self._disks.append(
LibvirtDisk(
self._conn,
disk_name,
alias,
**details
)
)
for disk in self._disks:
self._domain.add_device(disk.xml)
LOG.debug('Add disk %s to domain %s', disk.name, self.fqdn)
def _init_networks(self):
"""
Initialize networks
"""
for alias, details in sorted(self._domain_info['networks'].items()):
self._networks.append(
LibvirtNetwork(
alias,
**details
)
)
for network in self._networks:
self._domain.add_device(network.xml)
LOG.debug('Add network %s to domain %s', network.name, self.fqdn)
def _init_rng(self):
"""Initialize rng"""
if 'rng' in self._domain_info:
rng_bytes = self._domain_info['rng'].get('bytes', 2048)
rng = LibvirtRng(rng_bytes=rng_bytes)
self._domain.add_device(rng.xml)
LOG.debug('Add rng to domain %s', self.fqdn)
def start(self):
"""
Start domain
Warning: Will not check if the domain is provisioned yet...
"""
domain = self._conn.lookupByName(self.fqdn)
domain.create()
def stop(self):
"""
Stop domain
"""
domain = self._conn.lookupByName(self.fqdn)
domain.destroy()
def autostart(self, autostart):
"""
Set autostart option of domain
:param autostart - True/False
"""
domain = self._conn.lookupByName(self.fqdn)
domain.setAutostart(autostart)
@property
def sshkeys(self):
"""
sshkeys (from JSON representation)
"""
if self._domain_info.get('access', {}):
return self._domain_info.get('access').get('ssh-keys', {})
return None
@property
def password(self):
"""
password (encrypted, salted hash from JSON representation)
"""
if self._domain_info.get('access', {}):
return self._domain_info.get('access').get('password', None)
return None
@property
def guesttype(self):
"""
Type of domain (archlinux, plain, ...)
"""
return self._domain_info.get('guesttype')
@property
def disks(self):
"""
Disks attached to this domain
"""
return self._disks
@property
def networks(self):
"""
Networks attached to this domain
"""
return self._networks
@property
def fqdn(self):
"""
FQDN of this domain
"""
return self._domain_info.get('fqdn')
@property
def hostname(self):
"""
hostname of this domain
"""
return self._domain_info.get('hostname')
@property
def memory(self):
"""
Memory (in MB) of this domain
"""
return self._domain_info.get('memory')
@property
def vcpu(self):
"""
Number of virtual cpus for this domain
"""
return self._domain_info.get('vcpu')
@property
def xml(self):
"""
Libvirt XML for this domain (provisioned state)
"""
return self._domain.xml
|
|
import logging
import xml.etree.ElementTree as ET
from .metadata import TDSCatalogMetadata
from .http_util import create_http_session, urlopen
try:
from urlparse import urljoin
except ImportError:
# Python 3
from urllib.parse import urljoin
log = logging.getLogger("siphon.catalog")
log.setLevel(logging.WARNING)
class TDSCatalog(object):
r"""
An object for holding information from a THREDDS Client Catalog.
Attributes
----------
catalog_url : string
The url path of the catalog to parse.
base_tds_url : string
The top level server address
datasets : Dataset
A dictionary of Dataset object, whose keys are the name of the
dataset's name
services : List
A list of SimpleServices listed in the catalog
catalog_refs : dict
A dictionary of CatalogRef objects whose keys are the name of the
catalog ref title.
"""
def __init__(self, catalog_url):
r"""
Initialize the TDSCatalog object.
Parameters
----------
catalog_url : string
The URL of a THREDDS client catalog
"""
# top level server url
self.catalog_url = catalog_url
self.base_tds_url = catalog_url.split('/thredds/')[0]
session = create_http_session()
# get catalog.xml file
resp = session.get(self.catalog_url)
# If we were given an HTML link, warn about it and try to fix to xml
if 'html' in resp.headers['content-type']:
import warnings
new_url = self.catalog_url.replace('html', 'xml')
warnings.warn('URL %s returned HTML. Changing to: %s' % (self.catalog_url,
new_url))
self.catalog_url = new_url
resp = session.get(self.catalog_url)
# begin parsing the xml doc
root = ET.fromstring(resp.text)
if "name" in root.attrib:
self.catalog_name = root.attrib["name"]
else:
self.catalog_name = "No name found"
self.datasets = {}
self.services = []
self.catalog_refs = {}
self.metadata = {}
service_skip_count = 0
service_skip = 0
for child in root.iter():
tag_type = child.tag.split('}')[-1]
if tag_type == "dataset":
self._process_dataset(child)
elif tag_type == "catalogRef":
self._process_catalog_ref(child)
elif (tag_type == "metadata") or (tag_type == ""):
self._process_metadata(child, tag_type)
elif tag_type == "service":
if child.attrib["serviceType"] != "Compound":
# we do not want to process single services if they
# are already contained within a compound service, so
# we need to skip over those cases.
if service_skip_count >= service_skip:
self.services.append(SimpleService(child))
service_skip = 0
service_skip_count = 0
else:
service_skip_count += 1
else:
self.services.append(CompoundService(child))
service_skip = self.services[-1].number_of_subservices
service_skip_count = 0
self._process_datasets()
def _process_dataset(self, element):
if "urlPath" in element.attrib:
if element.attrib["urlPath"] == "latest.xml":
ds = Dataset(element, self.catalog_url)
else:
ds = Dataset(element)
self.datasets[ds.name] = ds
def _process_catalog_ref(self, element):
catalog_ref = CatalogRef(self.catalog_url, element)
self.catalog_refs[catalog_ref.title] = catalog_ref
def _process_metadata(self, element, tag_type):
if tag_type == "":
log.warning("Trying empty tag type as metadata")
self.metadata = TDSCatalogMetadata(element, self.metadata).metadata
def _process_datasets(self):
for dsName in list(self.datasets.keys()):
self.datasets[dsName].make_access_urls(
self.base_tds_url, self.services, metadata=self.metadata)
class CatalogRef(object):
r"""
An object for holding Catalog References obtained from a THREDDS Client
Catalog.
Attributes
----------
name : string
The name of the catalogRef element
href : string
url to the catalogRef's THREDDS Client Catalog
title : string
Title of the catalogRef element
"""
def __init__(self, base_url, element_node):
r"""
Initialize the catalogRef object.
Parameters
----------
base_url : String
URL to the base catalog that owns this reference
element_node : Element
An Element Tree Element representing a catalogRef node
"""
self.name = element_node.attrib["name"]
self.title = element_node.attrib["{http://www.w3.org/1999/xlink}title"]
# Resolve relative URLs
href = element_node.attrib["{http://www.w3.org/1999/xlink}href"]
self.href = urljoin(base_url, href)
def follow(self):
r"""
Follow the reference, returning a new TDSCatalog
"""
return TDSCatalog(self.href)
class Dataset(object):
r"""
An object for holding Datasets obtained from a THREDDS Client Catalog.
Attributes
----------
name : string
The name of the Dataset element
url_path : string
url to the accessible dataset
access_urls : dict
A dictionary of access urls whose keywords are the access service
types defined in the catalog (for example, "OPENDAP", "NetcdfSubset",
"WMS", etc.
"""
def __init__(self, element_node, catalog_url=""):
r"""
Initialize the Dataset object.
Parameters
----------
element_node : Element
An Element Tree Element representing a Dataset node
catalog_url : string
The top level server url
"""
self.name = element_node.attrib['name']
self.url_path = element_node.attrib['urlPath']
self._resolved = False
self._resolverUrl = None
# if latest.xml, resolve the latest url
if self.url_path == "latest.xml":
if catalog_url != "":
self._resolved = True
self._resolverUrl = self.url_path
self.url_path = self.resolve_url(catalog_url)
else:
log.warning('Must pass along the catalog URL to resolve '
'the latest.xml dataset!')
def resolve_url(self, catalog_url):
r"""
Resolve the url of the dataset when reading latest.xml
Parameters
----------
catalog_url : string
The catalog url to be resolved
"""
if catalog_url != "":
resolver_base = catalog_url.split("catalog.xml")[0]
resolver_url = resolver_base + self.url_path
resolver_xml = urlopen(resolver_url)
tree = ET.parse(resolver_xml)
root = tree.getroot()
if "name" in root.attrib:
self.catalog_name = root.attrib["name"]
else:
self.catalog_name = "No name found"
resolved_url = ''
found = False
for child in root.iter():
if not found:
tag_type = child.tag.split('}')[-1]
if tag_type == "dataset":
if "urlPath" in child.attrib:
ds = Dataset(child)
resolved_url = ds.url_path
found = True
if found:
return resolved_url
else:
log.warning("no dataset url path found in latest.xml!")
def make_access_urls(self, catalog_url, all_services, metadata=None):
r"""
Make fully qualified urls for the access methods enabled on the
dataset.
Parameters
----------
catalog_url : string
The top level server url
services : list
list of SimpleService objects associated with the dataset
"""
service_name = None
if metadata:
if "serviceName" in metadata:
service_name = metadata["serviceName"]
access_urls = {}
server_url = catalog_url.split('/thredds/')[0]
found_service = None
if service_name:
for service in all_services:
if service.name == service_name:
found_service = service
break
service = found_service
if service:
if service.service_type != 'Resolver':
if isinstance(service, CompoundService):
for subservice in service.services:
access_urls[subservice.service_type] = server_url + \
subservice.base + self.url_path
else:
access_urls[service.service_type] = server_url + \
service.base + self.url_path
self.access_urls = access_urls
class SimpleService(object):
r"""
An object for holding information about an access service enabled on a
dataset.
Attributes
----------
name : string
The name of the service
service_type : string
The service type (i.e. "OPENDAP", "NetcdfSubset", "WMS", etc.)
access_urls : dict
A dictionary of access urls whose keywords are the access service
types defined in the catalog (for example, "OPENDAP", "NetcdfSubset",
"WMS", etc.)
"""
def __init__(self, service_node):
r"""
Initialize the Dataset object.
Parameters
----------
service_node : Element
An Element Tree Element representing a service node
"""
self.name = service_node.attrib['name']
self.service_type = service_node.attrib['serviceType']
self.base = service_node.attrib['base']
class CompoundService(object):
r"""
An object for holding information about an Compound services.
Attributes
----------
name : string
The name of the compound service
service_type : string
The service type (for this object, service type will always be
"COMPOUND")
services : list
A list of SimpleService objects
"""
def __init__(self, service_node):
r"""
Initialize a CompoundService object.
Parameters
----------
service_node : Element
An Element Tree Element representing a compound service node
"""
self.name = service_node.attrib['name']
self.service_type = service_node.attrib['serviceType']
self.base = service_node.attrib['base']
services = []
subservices = 0
for child in list(service_node):
services.append(SimpleService(child))
subservices += 1
self.services = services
self.number_of_subservices = subservices
def _get_latest_cat(catalog_url):
r"""
Get the latest dataset catalog from the supplied top level dataset catalog
url.
Parameters
----------
catalog_url : string
The URL of a top level data catalog
access_method : String
desired data access method (i.e. "OPENDAP", "NetcdfSubset", "WMS", etc)
Returns
-------
TDSCatalog
A TDSCatalog object containing the information from the latest dataset
"""
cat = TDSCatalog(catalog_url)
for service in cat.services:
if (service.name.lower() == "latest" and
service.service_type.lower() == "resolver"):
latest_cat = cat.catalog_url.replace("catalog.xml", "latest.xml")
return TDSCatalog(latest_cat)
log.error('ERROR: "latest" service not enabled for this catalog!')
def get_latest_access_url(catalog_url, access_method):
r"""
Get the data access url, using a specified access method, to the latest
data available from a top level dataset catalog (url). Currently only
supports the existence of one "latest" dataset.
Parameters
----------
catalog_url : string
The URL of a top level data catalog
access_method : String
desired data access method (i.e. "OPENDAP", "NetcdfSubset", "WMS", etc)
Returns
-------
string
Data access URL to be used to access the latest data available from a
given catalog using the specified `access_method`. Typical of length 1,
but not always.
"""
latest_cat = _get_latest_cat(catalog_url)
if latest_cat != "":
if len(list(latest_cat.datasets.keys())) > 0:
latest_ds = []
for lds_name in latest_cat.datasets:
lds = latest_cat.datasets[lds_name]
if access_method in lds.access_urls:
latest_ds.append(lds.access_urls[access_method])
if len(latest_ds) == 1:
latest_ds = latest_ds[0]
return latest_ds
else:
log.error('ERROR: More than one latest dataset found '
'this case is currently not suppored in '
'siphon.')
else:
log.error('ERROR: More than one access url matching the '
'requested access method...clearly this is an error')
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DdosProtectionPlansOperations(object):
"""DdosProtectionPlansOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DdosProtectionPlan"
"""Gets information about the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosProtectionPlan, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_04_01.models.DdosProtectionPlan
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.DdosProtectionPlan"
**kwargs # type: Any
):
# type: (...) -> "_models.DdosProtectionPlan"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosProtectionPlan')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.DdosProtectionPlan"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DdosProtectionPlan"]
"""Creates or updates a DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2018_04_01.models.DdosProtectionPlan
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DdosProtectionPlan or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_04_01.models.DdosProtectionPlan]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DdosProtectionPlanListResult"]
"""Gets all DDoS protection plans in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_04_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DdosProtectionPlanListResult"]
"""Gets all the DDoS protection plans in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_04_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
|
|
class DomainHelper(object):
pass
class NullDomainHelper(DomainHelper):
pass
class CppDomainHelper(DomainHelper):
def __init__(self, definition_parser, substitute):
self.definition_parser = definition_parser
self.substitute = substitute
self.duplicates = {}
def check_cache(self, _id):
try:
return True, self.duplicates[_id]
except KeyError:
return False, ""
def cache(self, _id, project_info):
self.duplicates[_id] = project_info
def remove_word(self, word, definition):
return self.substitute(r"(\s*\b|^)%s\b\s*" % word, "", definition)
class CDomainHelper(DomainHelper):
def __init__(self):
self.duplicates = set()
def is_duplicate(self, name):
return name in self.duplicates
def remember(self, name):
self.duplicates.add(name)
class DomainHandler(object):
def __init__(self, node_factory, document, env, helper, project_info, target_handler):
self.node_factory = node_factory
self.document = document
self.env = env
self.helper = helper
self.project_info = project_info
self.target_handler = target_handler
class NullDomainHandler(DomainHandler):
def __init__(self):
pass
def create_function_id(self, data_object):
return ""
def create_function_target(self, data_object):
return []
def create_class_id(self, data_object):
return ""
def create_class_target(self, data_object):
return []
class CDomainHandler(DomainHandler):
def create_function_id(self, data_object):
name = data_object.definition.split()[-1]
return name
def create_function_target(self, data_object):
name = data_object.definition.split()[-1]
return self._create_target(name, "function")
def _create_target(self, name, type_):
if self.helper.is_duplicate(name):
print ( "Warning: Ignoring duplicate '%s'. As C does not support overloaded "
"functions. Perhaps you should be using the cpp domain?" % name )
return
self.helper.remember(name)
# Create target node. This is required for LaTeX output as target nodes are converted to the
# appropriate \phantomsection & \label for in document LaTeX links
(target,) = self.target_handler.create_target(name)
inv = self.env.domaindata['c']['objects']
if name in inv:
self.env.warn(
self.env.docname,
'duplicate C object description of %s, ' % name +
'other instance in ' + self.env.doc2path(inv[name][0]),
self.lineno)
inv[name] = (self.env.docname, "function")
return [target]
class CppDomainHandler(DomainHandler):
def create_class_id(self, data_object):
def_ = data_object.name
parser = self.helper.definition_parser(def_)
sigobj = parser.parse_class()
return sigobj.get_id()
def create_class_target(self, data_object):
id_ = self.create_class_id(data_object)
name = data_object.name
return self._create_target(name, "class", id_)
def create_function_id(self, data_object):
definition = self.helper.remove_word("virtual", data_object.definition)
argstring = data_object.argsstring
explicit = "explicit " if data_object.explicit == "yes" else ""
def_ = "%(explicit)s%(definition)s%(argstring)s" % {
"explicit" : explicit,
"definition" : definition,
"argstring" : argstring,
}
parser = self.helper.definition_parser(def_)
sigobj = parser.parse_function()
return sigobj.get_id()
def create_function_target(self, data_object):
id_ = self.create_function_id(data_object)
name = data_object.definition.split()[-1]
return self._create_target(name, "function", id_)
def _create_target(self, name, type_, id_):
"""Creates a target node and registers it with the appropriate domain
object list in a style which matches Sphinx's behaviour for the domain
directives like cpp:function"""
# Check if we've already got this id
in_cache, project = self.helper.check_cache(id_)
if in_cache:
print "Warning: Ignoring duplicate domain reference '%s'. " \
"First found in project '%s'" % (id_, project.reference())
return []
self.helper.cache(id_, self.project_info)
# Create target node. This is required for LaTeX output as target nodes are converted to the
# appropriate \phantomsection & \label for in document LaTeX links
(target,) = self.target_handler.create_target(id_)
# Register object with the sphinx objects registry
self.document.settings.env.domaindata['cpp']['objects'].setdefault(name,
(self.document.settings.env.docname, type_, id_))
return [target]
class DomainHandlerFactory(object):
def __init__(self, project_info, node_factory, document, env, target_handler, helpers):
self.project_info = project_info
self.node_factory = node_factory
self.document = document
self.env = env
self.target_handler = target_handler
self.domain_helpers = helpers
def create_null_domain_handler(self):
return NullDomainHandler()
def create_domain_handler(self, file_):
domains_handlers = {
"c" : CDomainHandler,
"cpp" : CppDomainHandler,
}
domain = self.project_info.domain_for_file(file_)
try:
helper = self.domain_helpers[domain]
except KeyError:
helper = NullDomainHelper()
try:
return domains_handlers[domain](self.node_factory, self.document, self.env, helper,
self.project_info, self.target_handler)
except KeyError:
return NullDomainHandler()
class NullDomainHandlerFactory(object):
def create_null_domain_handler(self):
return NullDomainHandler()
def create_domain_handler(self, file_):
return NullDomainHandler()
class DomainHandlerFactoryCreator(object):
def __init__(self, node_factory, helpers):
self.node_factory = node_factory
self.helpers = helpers
def create_domain_handler_factory(self, project_info, document, env, options, target_handler):
if "no-link" in options:
return NullDomainHandlerFactory()
return DomainHandlerFactory(
project_info,
self.node_factory,
document,
env,
target_handler,
self.helpers
)
|
|
# Copyright (C) 2006-2013 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
'Pieter Swart ([email protected])',
'Dan Schult ([email protected])'])
__all__ = ['convert_node_labels_to_integers', 'relabel_nodes']
def relabel_nodes(G, mapping, copy=True):
"""Relabel the nodes of the graph G.
Parameters
----------
G : graph
A NetworkX graph
mapping : dictionary
A dictionary with the old labels as keys and new labels as values.
A partial mapping is allowed.
copy : bool (optional, default=True)
If True return a copy, or if False relabel the nodes in place.
Examples
--------
>>> G=nx.path_graph(3) # nodes 0-1-2
>>> mapping={0:'a',1:'b',2:'c'}
>>> H=nx.relabel_nodes(G,mapping)
>>> print(sorted(H.nodes()))
['a', 'b', 'c']
>>> G=nx.path_graph(26) # nodes 0..25
>>> mapping=dict(zip(G.nodes(),"abcdefghijklmnopqrstuvwxyz"))
>>> H=nx.relabel_nodes(G,mapping) # nodes a..z
>>> mapping=dict(zip(G.nodes(),range(1,27)))
>>> G1=nx.relabel_nodes(G,mapping) # nodes 1..26
Partial in-place mapping:
>>> G=nx.path_graph(3) # nodes 0-1-2
>>> mapping={0:'a',1:'b'} # 0->'a' and 1->'b'
>>> G=nx.relabel_nodes(G,mapping, copy=False)
print(G.nodes())
[2, 'b', 'a']
Mapping as function:
>>> G=nx.path_graph(3)
>>> def mapping(x):
... return x**2
>>> H=nx.relabel_nodes(G,mapping)
>>> print(H.nodes())
[0, 1, 4]
Notes
-----
Only the nodes specified in the mapping will be relabeled.
The keyword setting copy=False modifies the graph in place.
This is not always possible if the mapping is circular.
In that case use copy=True.
See Also
--------
convert_node_labels_to_integers
"""
# you can pass a function f(old_label)->new_label
# but we'll just make a dictionary here regardless
if not hasattr(mapping,"__getitem__"):
m = dict((n, mapping(n)) for n in G)
else:
m = mapping
if copy:
return _relabel_copy(G, m)
else:
return _relabel_inplace(G, m)
def _relabel_inplace(G, mapping):
old_labels = set(mapping.keys())
new_labels = set(mapping.values())
if len(old_labels & new_labels) > 0:
# labels sets overlap
# can we topological sort and still do the relabeling?
D = nx.DiGraph(list(mapping.items()))
D.remove_edges_from(D.selfloop_edges())
try:
nodes = nx.topological_sort(D, reverse=True)
except nx.NetworkXUnfeasible:
raise nx.NetworkXUnfeasible('The node label sets are overlapping '
'and no ordering can resolve the '
'mapping. Use copy=True.')
else:
# non-overlapping label sets
nodes = old_labels
multigraph = G.is_multigraph()
directed = G.is_directed()
for old in nodes:
try:
new = mapping[old]
except KeyError:
continue
if new == old:
continue
try:
G.add_node(new, attr_dict=G.node[old])
except KeyError:
raise KeyError("Node %s is not in the graph"%old)
if multigraph:
new_edges = [(new, new if old == target else target, key, data)
for (_,target,key,data)
in G.edges(old, data=True, keys=True)]
if directed:
new_edges += [(new if old == source else source, new, key, data)
for (source, _, key,data)
in G.in_edges(old, data=True, keys=True)]
else:
new_edges = [(new, new if old == target else target, data)
for (_,target,data) in G.edges(old, data=True)]
if directed:
new_edges += [(new if old == source else source,new,data)
for (source,_,data) in G.in_edges(old, data=True)]
G.remove_node(old)
G.add_edges_from(new_edges)
return G
def _relabel_copy(G, mapping):
H = G.__class__()
H.name = "(%s)" % G.name
if G.is_multigraph():
H.add_edges_from( (mapping.get(n1, n1),mapping.get(n2, n2),k,d.copy())
for (n1,n2,k,d) in G.edges_iter(keys=True, data=True))
else:
H.add_edges_from( (mapping.get(n1, n1),mapping.get(n2, n2),d.copy())
for (n1, n2, d) in G.edges_iter(data=True))
H.add_nodes_from(mapping.get(n, n) for n in G)
H.node.update(dict((mapping.get(n, n), d.copy()) for n,d in G.node.items()))
H.graph.update(G.graph.copy())
return H
def convert_node_labels_to_integers(G, first_label=0, ordering="default",
label_attribute=None):
"""Return a copy of the graph G with the nodes relabeled using
consecutive integers.
Parameters
----------
G : graph
A NetworkX graph
first_label : int, optional (default=0)
An integer specifying the starting offset in numbering nodes.
The new integer labels are numbered first_label, ..., n-1+first_label.
ordering : string
"default" : inherit node ordering from G.nodes()
"sorted" : inherit node ordering from sorted(G.nodes())
"increasing degree" : nodes are sorted by increasing degree
"decreasing degree" : nodes are sorted by decreasing degree
label_attribute : string, optional (default=None)
Name of node attribute to store old label. If None no attribute
is created.
Notes
-----
Node and edge attribute data are copied to the new (relabeled) graph.
See Also
--------
relabel_nodes
"""
N = G.number_of_nodes()+first_label
if ordering == "default":
mapping = dict(zip(G.nodes(), range(first_label, N)))
elif ordering == "sorted":
nlist = G.nodes()
nlist.sort()
mapping = dict(zip(nlist, range(first_label, N)))
elif ordering == "increasing degree":
dv_pairs = [(d,n) for (n,d) in G.degree_iter()]
dv_pairs.sort() # in-place sort from lowest to highest degree
mapping = dict(zip([n for d,n in dv_pairs], range(first_label, N)))
elif ordering == "decreasing degree":
dv_pairs = [(d,n) for (n,d) in G.degree_iter()]
dv_pairs.sort() # in-place sort from lowest to highest degree
dv_pairs.reverse()
mapping = dict(zip([n for d,n in dv_pairs], range(first_label, N)))
else:
raise nx.NetworkXError('Unknown node ordering: %s'%ordering)
H = relabel_nodes(G, mapping)
H.name = "("+G.name+")_with_int_labels"
# create node attribute with the old label
if label_attribute is not None:
nx.set_node_attributes(H, label_attribute,
dict((v,k) for k,v in mapping.items()))
return H
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SubnetsOperations:
"""SubnetsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def get(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Subnet":
"""Gets the specified subnet by virtual network and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Subnet, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_09_01.models.Subnet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
subnet_parameters: "_models.Subnet",
**kwargs: Any
) -> "_models.Subnet":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(subnet_parameters, 'Subnet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Subnet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
subnet_parameters: "_models.Subnet",
**kwargs: Any
) -> AsyncLROPoller["_models.Subnet"]:
"""Creates or updates a subnet in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param subnet_parameters: Parameters supplied to the create or update subnet operation.
:type subnet_parameters: ~azure.mgmt.network.v2016_09_01.models.Subnet
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Subnet or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2016_09_01.models.Subnet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
subnet_parameters=subnet_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def list(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SubnetListResult"]:
"""Gets all subnets in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SubnetListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2016_09_01.models.SubnetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SubnetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SubnetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets'} # type: ignore
|
|
"""
Tests the models of the stock app
"""
from collections import namedtuple
import datetime
from unittest import mock, TestCase
import pytest
from stocks.historical import create_stock
from stocks.models import DailyStockQuote, InvestmentBucket, InvestmentStockConfiguration, Stock
from django.db.models.signals import post_save
from django.db.models import Sum
from django.contrib.auth.models import User
from yahoo_historical import Fetcher
from trading.models import TradeStock
def setup_module(module):
"""
Mock out any externals
"""
post_save.disconnect(receiver=create_stock, sender=Stock)
module.original_init_method = Fetcher.__init__
module.original_getHistorical_method = Fetcher.getHistorical
Fetcher.__init__ = mock.Mock(return_value=None)
Fetcher.getHistorical = mock.Mock(return_value=None)
def teardown_module(module):
"""
Restore externals
"""
post_save.connect(receiver=create_stock, sender=Stock)
Fetcher.__init__ = module.original_init_method
Fetcher.getHistorical = module.original_getHistorical_method
@pytest.mark.django_db(transaction=True)
def test_stock_latest_quote():
"""
Tests Stock.latest_quote()
"""
stock = Stock(
name="Name1",
ticker="TKRC"
)
stock.save()
correct_quote3 = stock.daily_quote.create(
value=3,
date="2016-06-03"
)
correct_quote1 = stock.daily_quote.create(
value=4,
date="2016-06-05"
)
correct_quote2 = stock.daily_quote.create(
value=5,
date="2016-06-06"
)
assert stock.latest_quote("2016-06-05") == correct_quote1
assert stock.latest_quote() == correct_quote2
assert stock.latest_quote("2016-06-04") == correct_quote3
with pytest.raises(Exception):
stock.latest_quote("2016-06-02")
with pytest.raises(Exception):
stock.latest_quote(datetime.datetime.now() + datetime.timedelta(days=3))
@pytest.mark.django_db(transaction=True)
def test_stock_find_stock():
"""
Tests Stock.find_stock()
"""
stock1 = Stock(
name="Name1X",
ticker="TKRC"
)
stock1.save()
stock2 = Stock(
name="Name2Y",
ticker="TKFF"
)
stock2.save()
TestCase.assertCountEqual(None, [stock1, stock2], Stock.find_stock(""))
TestCase.assertCountEqual(None, [stock1, stock2], Stock.find_stock("Name"))
TestCase.assertCountEqual(None, [stock1], Stock.find_stock("Name1"))
TestCase.assertCountEqual(None, [stock2], Stock.find_stock("e2"))
single_stock_find = Stock.find_stock("", 1)[0]
assert single_stock_find == stock1 or single_stock_find == stock2
@pytest.mark.django_db(transaction=True)
def test_stock_create_new_stock():
"""
Tests Stock.create_new_stock()
"""
Stock.create_new_stock(ticker="ABC", name="DEF")
with mock.patch.object(Fetcher, "__init__", side_effect=KeyError()):
with pytest.raises(Exception):
Stock.create_new_stock(ticker="ABC", name="DEF")
@pytest.mark.django_db(transaction=True)
def test_stock_quote_in_range():
"""
Tests Stock.quote_in_range()
"""
stock = Stock(
name="Name1X",
ticker="TKRC"
)
stock.save()
quote1 = stock.daily_quote.create(
value=3,
date="2016-06-03"
)
quote3 = stock.daily_quote.create(
value=5,
date="2016-06-06"
)
quote2 = stock.daily_quote.create(
value=4,
date="2016-06-05"
)
assert [quote1, quote2, quote3] == list(stock.quote_in_range())
assert [
quote1,
quote2,
quote3
] == list(stock.quote_in_range(start="2016-06-03", end="2016-06-06"))
assert [] == list(stock.quote_in_range(start="2016-06-03", end="2016-06-02"))
assert [quote1, quote2, quote3] == list(stock.quote_in_range(start="2016-06-03"))
assert [quote2, quote3] == list(stock.quote_in_range(start="2016-06-04"))
assert [quote1, quote2, quote3] == list(stock.quote_in_range(end="2016-06-06"))
assert [quote1, quote2] == list(stock.quote_in_range(end="2016-06-05"))
@pytest.mark.django_db(transaction=True)
def test_stock_trades_for_profile():
"""
Tests Stock.trades_for_profile()
"""
user1 = User.objects.create(username='user1', password="a")
user2 = User.objects.create(username='user2', password="a")
t1_1 = user1.profile.trading_accounts.create(
account_name="u1t1"
)
t1_2 = user1.profile.trading_accounts.create(
account_name="u1t2"
)
t2_1 = user2.profile.trading_accounts.create(
account_name="u2t"
)
stock = Stock.create_new_stock(
name="Name1X",
ticker="TKRC"
)
TradeStock(quantity=1, account=t1_1, stock=stock).save()
TradeStock(quantity=1, account=t1_2, stock=stock).save()
TradeStock(quantity=1, account=t2_1, stock=stock).save()
assert stock.trades_for_profile(user1.profile).count() == 2
assert stock.trades_for_profile(user2.profile).count() == 1
@pytest.mark.django_db(transaction=True)
def test_bucket_trades_for_profile():
"""
Tests InvestmentBucket.trades_for_profile()
"""
user1 = User.objects.create(username='user1', password="a")
user2 = User.objects.create(username='user2', password="a")
InvestmentBucket(name="B1", owner=user1.profile, public=False, available=1).save()
InvestmentBucket(name="B2", owner=user1.profile, public=True, available=1).save()
InvestmentBucket(name="B3", owner=user1.profile, public=False, available=1).save()
InvestmentBucket(name="B4", owner=user2.profile, public=False, available=1).save()
assert InvestmentBucket.accessible_buckets(user1.profile).count() == 3
assert InvestmentBucket.accessible_buckets(user2.profile).count() == 2
@pytest.mark.django_db(transaction=True)
def test_bucket_create_new_bucket():
"""
Tests InvestmentBucket.create_new_bucket()
"""
user1 = User.objects.create(username='user1', password="a")
user2 = User.objects.create(username='user2', password="a")
assert user1.profile.owned_bucket.count() == 0
InvestmentBucket.create_new_bucket(name="Bucket1", public=True, owner=user1.profile)
InvestmentBucket.create_new_bucket(name="Bucket2", public=True, owner=user2.profile)
assert user1.profile.owned_bucket.count() == 1
@pytest.mark.django_db(transaction=True)
def test_bucket_add_attribute():
"""
Tests InvestmentBucket.add_attribute()
"""
user1 = User.objects.create(username='user1', password="a")
bucket = InvestmentBucket(name="Bucket1", public=True, owner=user1.profile, available=1)
bucket.save()
assert bucket.description.count() == 0
bucket.add_attribute("Some text")
assert bucket.description.count() == 1
attr = bucket.description.get()
assert attr.is_good
assert attr.text == "Some text"
bucket.add_attribute("Some more text", False)
assert bucket.description.count() == 2
assert bucket.description.filter(is_good=True).count() == 1
assert bucket.description.filter(is_good=False).count() == 1
@pytest.mark.django_db(transaction=True)
def test_bucket_get_stock_configs():
"""
Tests InvestmentBucket.get_stock_configs()
"""
user1 = User.objects.create(username='user1', password="a")
stock1 = Stock(
name="Name1X",
ticker="TKRC"
)
stock2 = Stock(
name="Name2X",
ticker="TKRCF"
)
stock1.save()
stock2.save()
bucket = InvestmentBucket(name="Bucket1", public=True, owner=user1.profile, available=1)
bucket.save()
InvestmentStockConfiguration(
quantity=1,
stock=stock1,
bucket=bucket,
start="2016-06-06",
end="2016-06-08",
).save()
InvestmentStockConfiguration(quantity=1, stock=stock1, bucket=bucket, start="2016-06-08").save()
InvestmentStockConfiguration(quantity=1, stock=stock2, bucket=bucket, start="2016-06-06").save()
assert bucket.get_stock_configs().count() == 2
assert bucket.get_stock_configs("2016-06-06").count() == 2
assert bucket.get_stock_configs("2016-06-08").count() == 3
@pytest.mark.django_db(transaction=True)
def test_bucket_sell_all():
"""
Tests InvestmentBucket._sell_all()
"""
user1 = User.objects.create(username='user1', password="a")
stock1 = Stock(
name="Name1X",
ticker="TKRC"
)
stock1.save()
DailyStockQuote(date="2016-06-10", value=100.0, stock=stock1).save()
bucket = InvestmentBucket(name="Bucket1", public=True, owner=user1.profile, available=10)
bucket.save()
cfg1 = InvestmentStockConfiguration(
quantity=1,
stock=stock1,
bucket=bucket,
start="2016-06-06",
end="2016-06-08",
)
cfg2 = InvestmentStockConfiguration(
quantity=1,
stock=stock1,
bucket=bucket,
start="2016-06-08",
)
cfg1.save()
cfg2.save()
# pylint: disable=protected-access
bucket._sell_all()
# pylint: enable=protected-access
bucket.refresh_from_db()
cfg1.refresh_from_db()
cfg2.refresh_from_db()
assert bucket.available == 110
assert cfg1.end == datetime.date(2016, 6, 8)
assert cfg2.end is not None
@pytest.mark.django_db(transaction=True)
def test_bucket_change_config():
"""
Tests InvestmentBucket.change_config()
"""
cfg_str = namedtuple("cfg_str", ["id", "quantity"])
user1 = User.objects.create(username='user1', password="a")
stock1 = Stock(
name="Name1X",
ticker="TKRC"
)
stock1.save()
DailyStockQuote(date="2016-06-10", value=100.0, stock=stock1).save()
bucket = InvestmentBucket(name="Bucket1", public=True, owner=user1.profile, available=10)
bucket.save()
cfg1 = InvestmentStockConfiguration(
quantity=1,
stock=stock1,
bucket=bucket,
start="2016-06-06",
end="2016-06-08",
)
cfg2 = InvestmentStockConfiguration(
quantity=1,
stock=stock1,
bucket=bucket,
start="2016-06-08",
)
cfg1.save()
cfg2.save()
with pytest.raises(Exception):
bucket.change_config([cfg_str(id=stock1.id, quantity=2)])
bucket.available = 1000
bucket.change_config([cfg_str(id=stock1.id, quantity=2)])
bucket.refresh_from_db()
assert bucket.available == 900
assert bucket.stocks.filter(
end=None
).values('stock_id').annotate(
sum_q=Sum('quantity')
).get()['sum_q'] == 2
@pytest.mark.django_db(transaction=True)
def test_stock_config_value_on():
"""
Tests InvestmentStockConfiguration.value_on()
"""
user = User.objects.create(username='user1', password="a")
stock = Stock(
name="Name1X",
ticker="TKRC"
)
stock.save()
value = 5
stock.daily_quote.create(
value=value,
date="2016-06-06"
)
bucket = InvestmentBucket(name="bucket", public=True, owner=user.profile, available=2)
bucket.save()
quantity = 3
config = InvestmentStockConfiguration(
quantity=quantity,
stock=stock,
bucket=bucket,
start="2016-06-08"
)
config.save()
with pytest.raises(Exception):
config.value_on("2016-06-01")
assert config.value_on("2016-06-08") == quantity * value
mock_quote = namedtuple('mock_quote', 'value')
with mock.patch.object(
Stock,
"latest_quote",
mock.MagicMock(
return_value=mock_quote(float('NaN'))
)):
with pytest.raises(Exception):
config.value_on()
@pytest.mark.django_db(transaction=True)
def test_bucket_value_on():
"""
Tests to see if bucket properly handles exception
"""
user = User.objects.create(username='user1', password="a")
bucket = InvestmentBucket(name="bucket", public=True, owner=user.profile, available=10)
bucket.save()
assert bucket.value_on("2016-06-01") == 10
@pytest.mark.django_db(transaction=True)
def test_bucket_historical():
"""
Tests InvestmentBucket.historical()
"""
user = User.objects.create(username='user1', password="a")
stock = Stock(
name="Name1X",
ticker="TKRC"
)
stock.save()
value = [3, 5, 7, 2]
skip = 2
for idx, val in enumerate(value):
stock.daily_quote.create(
value=val,
date=datetime.datetime.now().date() - datetime.timedelta(days=idx+2)
)
available = 2
bucket = InvestmentBucket(name="bucket", public=True, owner=user.profile, available=available)
bucket.save()
quantity = 3
config = InvestmentStockConfiguration(
quantity=quantity,
stock=stock,
bucket=bucket,
start=datetime.datetime.now().date() - datetime.timedelta(days=len(value)+2)
)
config.save()
historical = bucket.historical(count=len(value), skip=skip)
for idx, val in enumerate(value):
assert historical[idx] == (
datetime.datetime.now().date() - datetime.timedelta(days=idx+2),
val * quantity + available
)
stock2 = Stock(
name="Name2X",
ticker="Testes"
)
stock2.save()
value = list(range(1, 31))
for val in value:
idx = val - 1
stock2.daily_quote.create(
value=val,
date=datetime.datetime.now().date() - datetime.timedelta(days=idx)
)
bucket2 = InvestmentBucket(name="bucket2", public=True, owner=user.profile, available=0)
bucket2.save()
config2 = InvestmentStockConfiguration(
quantity=1,
stock=stock2,
bucket=bucket2,
start=datetime.datetime.now().date() - datetime.timedelta(days=len(value))
)
config2.save()
historical2 = bucket2.historical()
for val in value:
idx = val - 1
assert historical2[idx] == (
datetime.datetime.now().date() - datetime.timedelta(days=idx),
val
)
|
|
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from google.appengine.api import users
from google.appengine.api import images
from google.appengine.api.labs import taskqueue
from google.appengine.api.urlfetch import DownloadError
from google.appengine.runtime import DeadlineExceededError
from django.utils import simplejson as json
import os
from os import environ
import urllib
import logging
import time
import oauth
import foursquare
import constants
import time
from datetime import datetime
from scripts import fetch_foursquare_data
from gheatae import color_scheme, tile, provider
from models import UserInfo, UserVenue, MapImage, AppToken
class IndexHandler(webapp.RequestHandler):
def get(self):
welcome_data = {
'user': '',
'userinfo': '',
'url': users.create_login_url(self.request.uri),
'real_name': '',
'photo_url': constants.default_photo,
'is_ready': False
}
sidebar_data = {
'color_scheme_dict': color_scheme.color_schemes,
'color_scheme': constants.default_color,
}
map_data = {
'citylat': constants.default_lat,
'citylng': constants.default_lng,
'zoom': constants.default_zoom,
'width': constants.default_dimension,
'height': constants.default_dimension,
}
foursquare_is_happy = True
user = users.get_current_user()
if user:
welcome_data['user'] = user
welcome_data['url'] = users.create_logout_url(self.request.uri)
userinfo = UserInfo.all().filter('user =', user).order('-created').get()
if userinfo:
if userinfo.is_authorized:
try:
fetch_foursquare_data.update_user_info(userinfo)
except foursquare.FoursquareRemoteException, err:
if str(err).find('403 Forbidden') >= 0:
foursquare_is_happy = False
else:
raise err
welcome_data['userinfo'] = userinfo
welcome_data['real_name'] = userinfo.real_name
welcome_data['photo_url'] = userinfo.photo_url
welcome_data['is_ready'] = userinfo.is_ready
sidebar_data['color_scheme'] = userinfo.color_scheme
map_data['citylat'] = userinfo.citylat
map_data['citylng'] = userinfo.citylng
os_path = os.path.dirname(__file__)
self.response.out.write(template.render(os.path.join(os_path, 'templates/header.html'), {'key': constants.get_google_maps_apikey()}))
self.response.out.write(template.render(os.path.join(os_path, 'templates/private_welcome.html'), welcome_data))
if not foursquare_is_happy:
self.response.out.write(template.render(os.path.join(os_path, 'templates/private_forbidden.html'), None))
elif user and userinfo:
if userinfo.is_authorized:
self.response.out.write(template.render(os.path.join(os_path, 'templates/private_sidebar.html'), sidebar_data))
else:
self.response.out.write(template.render(os.path.join(os_path, 'templates/private_unauthorized.html'), None))
else:
self.response.out.write(template.render(os.path.join(os_path, 'templates/information.html'), {'user': user }))
self.response.out.write(template.render(os.path.join(os_path, 'templates/private_map.html'), map_data))
self.response.out.write(template.render(os.path.join(os_path, 'templates/all_footer.html'), None))
class AuthHandler(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
oauth_token = self.request.get("oauth_token")
def get_new_fs_and_credentials():
oauth_token, oauth_secret = constants.get_oauth_strings()
credentials = foursquare.OAuthCredentials(oauth_token, oauth_secret)
fs = foursquare.Foursquare(credentials)
return fs, credentials
if oauth_token:
old_userinfos = UserInfo.all().filter('user =', user).fetch(500)
db.delete(old_userinfos)
fs, credentials = get_new_fs_and_credentials()
apptoken = AppToken.all().filter('token =', oauth_token).get()
try:
user_token = fs.access_token(oauth.OAuthToken(apptoken.token, apptoken.secret))
credentials.set_access_token(user_token)
userinfo = UserInfo(user = user, token = credentials.access_token.key, secret = credentials.access_token.secret, is_ready=False, is_authorized=True, last_checkin=0, last_updated=datetime.now(), color_scheme='fire', level_max=int(constants.level_const), checkin_count=0, venue_count=0)
except DownloadError, err:
if str(err).find('ApplicationError: 5') >= 0:
pass # if something bad happens on OAuth, then it currently just redirects to the signup page
#TODO find a better way to handle this case, but it's not clear there is a simple way to do it without messing up a bunch of code
else:
raise err
try:
fetch_foursquare_data.update_user_info(userinfo)
fetch_foursquare_data.fetch_and_store_checkins(userinfo, limit=10)
taskqueue.add(url='/fetch_foursquare_data/all_for_user/%s' % userinfo.key())
except foursquare.FoursquareRemoteException, err:
if str(err).find('403 Forbidden') >= 0:
pass # if a user tries to sign up while my app is blocked, then it currently just redirects to the signup page
#TODO find a better way to handle this case, but it's not clear there is a simple way to do it without messing up a bunch of code
else:
raise err
except DownloadError:
pass #TODO make this better, but I'd rather throw the user back to the main page to try again than show the user an error.
self.redirect("/")
else:
fs, credentials = get_new_fs_and_credentials()
app_token = fs.request_token()
auth_url = fs.authorize(app_token)
new_apptoken = AppToken(token = app_token.key, secret = app_token.secret)
new_apptoken.put()
self.redirect(auth_url)
else:
self.redirect(users.create_login_url(self.request.uri))
class StaticMapHandler(webapp.RequestHandler):
def get(self):
path = environ['PATH_INFO']
if path.endswith('.png'):
raw = path[:-4] # strip extension
try:
assert raw.count('/') == 2, "%d /'s" % raw.count('/')
foo, bar, map_key = raw.split('/')
except AssertionError, err:
logging.error(err.args[0])
return
else:
logging.error("Invalid path: " + path)
return
mapimage = db.get(map_key)
if mapimage:
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(mapimage.img)
else:
self.redirect("/")
class TileHandler(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
path = environ['PATH_INFO']
if path.endswith('.png'):
raw = path[:-4] # strip extension
try:
assert raw.count('/') == 4, "%d /'s" % raw.count('/')
foo, bar, layer, zoom, yx = raw.split('/') #tile is ignored, is just here to prevent caching
assert yx.count(',') == 1, "%d /'s" % yx.count(',')
y, x = yx.split(',')
assert zoom.isdigit() and x.isdigit() and y.isdigit(), "not digits"
zoom = int(zoom)
x = int(x)
y = int(y)
assert constants.min_zoom <= zoom <= constants.max_zoom, "bad zoom: %d" % zoom
except AssertionError, err:
logging.error(err.args[0])
self.respondError(err)
return
else:
self.respondError("Invalid path")
return
start = datetime.now()
try:
new_tile = tile.GoogleTile(user, zoom, x, y)
img_data = new_tile.image_out()
self.response.headers['Content-Type'] = "image/png"
self.response.out.write(img_data)
except DeadlineExceededError, err:
logging.warning('%s error - started at %s, failed at %s' % (str(err), start, datetime.now()))
self.response.headers['Content-Type'] = "image/png"
self.response.out.write('')
class PublicPageHandler(webapp.RequestHandler):
def get(self):
path = environ['PATH_INFO']
if path.endswith('.html'):
raw = path[:-5] # strip extension
try:
assert raw.count('/') == 2, "%d /'s" % raw.count('/')
foo, bar, map_key = raw.split('/')
except AssertionError, err:
logging.error(err.args[0])
return
else:
logging.error("Invalid path: " + path)
return
mapimage = db.get(map_key)
if mapimage:
welcome_data = {
'real_name': '',
'photo_url': constants.default_photo,
}
sidebar_data = {
'domain': environ['HTTP_HOST'],
'public_url': 'public/%s.html' % mapimage.key(),
}
map_data = {
'domain': environ['HTTP_HOST'],
'static_url': mapimage.static_url,
'mapimage_url': 'map/%s.png' % mapimage.key(),
}
userinfo = UserInfo.all().filter('user =', mapimage.user).order('-created').get()
if userinfo:
welcome_data['real_name'] = userinfo.real_name
welcome_data['photo_url'] = userinfo.photo_url
#welcome_data['checkin_count'] = userinfo.checkin_count
os_path = os.path.dirname(__file__)
self.response.out.write(template.render(os.path.join(os_path, 'templates/header.html'), None))
self.response.out.write(template.render(os.path.join(os_path, 'templates/public_welcome.html'), welcome_data))
self.response.out.write(template.render(os.path.join(os_path, 'templates/public_sidebar.html'), sidebar_data))
self.response.out.write(template.render(os.path.join(os_path, 'templates/public_map.html'), map_data))
self.response.out.write(template.render(os.path.join(os_path, 'templates/all_footer.html'), None))
else:
self.redirect("/")
class UserVenueWriter(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
userinfo = UserInfo.all().filter('user =', user).order('-created').get()
if userinfo:
self.response.out.write(str(userinfo))
usevenues = constants.provider.get_user_data(user=user)
if not uservenue.checkin_guid_list or len(uservenue.checkin_guid_list) is 0:
uservenue.checkin_guid_list = [str(checkin_id) for checkin_id in uservenue.checkin_list]
usevenue.put()
template_data = { 'uservenues': usevenues}
os_path = os.path.dirname(__file__)
self.response.out.write(template.render(os.path.join(os_path, 'templates/uservenue_list.html'), template_data))
class StaticMapHtmlWriter(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
mapimage = MapImage.all().filter('user =', user).get()
if mapimage:
template_data = {
'domain': environ['HTTP_HOST'],
'static_url': mapimage.static_url,
'mapimage_url': 'map/%s.png' % mapimage.key(),
'public_url': 'public/%s.html' % mapimage.key(),
'timestamp': str(time.time())
}
os_path = os.path.dirname(__file__)
self.response.out.write(template.render(os.path.join(os_path, 'templates/static_map.html'), template_data))
else:
self.response.out.write("")
class ReadyInfoWriter(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
userinfo = UserInfo.all().filter('user =', user).get() #.order('-created')
if userinfo:
self.response.out.write(str(userinfo.is_ready) + ',' + str(userinfo.checkin_count))
return
self.response.out.write("")
def main():
application = webapp.WSGIApplication([('/', IndexHandler),
('/go_to_foursquare', AuthHandler),
('/authenticated', AuthHandler),
('/tile/.*', TileHandler),
('/map/.*', StaticMapHandler),
('/public/.*', PublicPageHandler),
('/static_map_html', StaticMapHtmlWriter),
('/user_is_ready', ReadyInfoWriter),
('/view_uservenues', UserVenueWriter)],
debug=True)
constants.provider = provider.DBProvider()
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import re
import traceback
from collections import defaultdict
import six
from pathspec import PathSpec
from pathspec.patterns.gitwildmatch import GitWildMatchPattern
from twitter.common.collections import OrderedSet
from pants.base.build_environment import get_buildroot
from pants.base.build_file import BuildFile
from pants.base.specs import DescendantAddresses, SiblingAddresses, SingleAddress
from pants.build_graph.address import Address, parse_spec
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.build_graph.address_mapper import AddressMapper
from pants.build_graph.build_file_parser import BuildFileParser
from pants.util.dirutil import fast_relpath, join_specs, longest_dir_prefix
logger = logging.getLogger(__name__)
# Note: Significant effort has been made to keep the types BuildFile, BuildGraph, Address, and
# Target separated appropriately. The BuildFileAddressMapper is intended to have knowledge
# of just BuildFile, BuildFileParser and Address.
#
# Here are some guidelines to help maintain this abstraction:
# - Use the terminology 'address' instead of 'target' in symbols and user messages
# - Wrap exceptions from BuildFile and BuildFileParser with a subclass of AddressLookupError
# so that callers do not have to reference those modules
#
# Note: 'spec' should not be a user visible term, substitute 'address' instead.
class BuildFileAddressMapper(AddressMapper):
"""Maps addresses in the pants virtual address space to corresponding BUILD file declarations."""
# Target specs are mapped to the patterns which match them, if any. This variable is a key for
# specs which don't match any exclusion regexps. We know it won't already be in the list of
# patterns, because the asterisks in its name make it an invalid regexp.
_UNMATCHED_KEY = '** unmatched **'
def __init__(self, build_file_parser, project_tree, build_ignore_patterns=None, exclude_target_regexps=None,
subproject_roots=None):
"""Create a BuildFileAddressMapper.
:param build_file_parser: An instance of BuildFileParser
:param build_file_type: A subclass of BuildFile used to construct and cache BuildFile objects
"""
self._build_file_parser = build_file_parser
self._spec_path_to_address_map_map = {} # {spec_path: {address: addressable}} mapping
self._project_tree = project_tree
self._build_ignore_patterns = PathSpec.from_lines(GitWildMatchPattern, build_ignore_patterns or [])
self._exclude_target_regexps = exclude_target_regexps or []
self._exclude_patterns = [re.compile(pattern) for pattern in self._exclude_target_regexps]
self.subproject_roots = subproject_roots or []
@property
def root_dir(self):
return self._build_file_parser.root_dir
def resolve(self, address):
"""Maps an address in the virtual address space to an object.
:param Address address: the address to lookup in a BUILD file
:raises AddressLookupError: if the path to the address is not found.
:returns: A tuple of the natively mapped BuildFileAddress and the Addressable it points to.
"""
address_map = self._address_map_from_spec_path(address.spec_path)
if address not in address_map:
self._raise_incorrect_address_error(address.spec_path, address.target_name, address_map)
else:
return address_map[address]
def _address_map_from_spec_path(self, spec_path):
"""Returns a resolution map of all addresses in a "directory" in the virtual address space.
:returns {Address: (Address, <resolved Object>)}:
"""
if spec_path not in self._spec_path_to_address_map_map:
try:
build_files = list(BuildFile.get_build_files_family(self._project_tree, spec_path,
self._build_ignore_patterns))
if not build_files:
raise self.BuildFileScanError("{spec_path} does not contain any BUILD files."
.format(spec_path=os.path.join(self.root_dir, spec_path)))
mapping = self._build_file_parser.address_map_from_build_files(build_files)
except BuildFileParser.BuildFileParserError as e:
raise AddressLookupError("{message}\n Loading addresses from '{spec_path}' failed."
.format(message=e, spec_path=spec_path))
address_map = {address: (address, addressed) for address, addressed in mapping.items()}
self._spec_path_to_address_map_map[spec_path] = address_map
return self._spec_path_to_address_map_map[spec_path]
def addresses_in_spec_path(self, spec_path):
"""Returns only the addresses gathered by `address_map_from_spec_path`, with no values."""
return self._address_map_from_spec_path(spec_path).keys()
def determine_subproject_spec(self, spec, relative_to):
subproject_prefix = longest_dir_prefix(relative_to, self.subproject_roots)
if subproject_prefix:
spec = join_specs(subproject_prefix, spec)
logger.debug('Determined that spec {} relative to {} belongs to '
'subproject {}'.format(spec, relative_to, subproject_prefix))
return spec
def spec_to_address(self, spec, relative_to=''):
"""A helper method for mapping a spec to the correct build file address.
:param string spec: A spec to lookup in the map.
:param string relative_to: Path the spec might be relative to
:raises :class:`pants.build_graph.address_lookup_error.AddressLookupError`
If the BUILD file cannot be found in the path specified by the spec.
:returns: A new Address instance.
:rtype: :class:`pants.build_graph.address.BuildFileAddress`
"""
try:
spec = self.determine_subproject_spec(spec, relative_to)
spec_path, name = parse_spec(spec, relative_to=relative_to)
address = Address(spec_path, name)
build_file_address, _ = self.resolve(address)
return build_file_address
except (ValueError, AddressLookupError) as e:
raise self.InvalidBuildFileReference('{message}\n when translating spec {spec}'
.format(message=e, spec=spec))
def scan_build_files(self, base_path):
build_files = BuildFile.scan_build_files(self._project_tree, base_path,
build_ignore_patterns=self._build_ignore_patterns)
return OrderedSet(bf.relpath for bf in build_files)
def specs_to_addresses(self, specs, relative_to=''):
"""The equivalent of `spec_to_address` for a group of specs all relative to the same path.
:param spec: iterable of Addresses.
:raises AddressLookupError: if the BUILD file cannot be found in the path specified by the spec
"""
for spec in specs:
yield self.spec_to_address(spec, relative_to=relative_to)
def scan_addresses(self, root=None):
"""Recursively gathers all addresses visible under `root` of the virtual address space.
:param string root: The absolute path of the root to scan; defaults to the root directory of the
pants project.
:rtype: set of :class:`pants.build_graph.address.Address`
:raises AddressLookupError: if there is a problem parsing a BUILD file
"""
root_dir = get_buildroot()
base_path = None
if root:
try:
base_path = fast_relpath(root, root_dir)
except ValueError as e:
raise self.InvalidRootError(e)
addresses = set()
try:
for build_file in BuildFile.scan_build_files(self._project_tree,
base_relpath=base_path,
build_ignore_patterns=self._build_ignore_patterns):
for address in self.addresses_in_spec_path(build_file.spec_path):
addresses.add(address)
except BuildFile.BuildFileError as e:
# Handle exception from BuildFile out of paranoia. Currently, there is no way to trigger it.
raise self.BuildFileScanError("{message}\n while scanning BUILD files in '{root}'."
.format(message=e, root=root))
return addresses
def scan_specs(self, specs, fail_fast=True):
"""Execute a collection of `specs.Spec` objects and return a set of Addresses."""
excluded_target_map = defaultdict(set) # pattern -> targets (for debugging)
def exclude_spec(spec):
for pattern in self._exclude_patterns:
if pattern.search(spec) is not None:
excluded_target_map[pattern.pattern].add(spec)
return True
excluded_target_map[self._UNMATCHED_KEY].add(spec)
return False
def exclude_address(address):
return exclude_spec(address.spec)
#TODO: Investigate why using set will break ci. May help migration to v2 engine.
addresses = OrderedSet()
for spec in specs:
for address in self._scan_spec(spec, fail_fast):
if not exclude_address(address):
addresses.add(address)
# Print debug information about the excluded targets
if logger.getEffectiveLevel() <= logging.DEBUG and excluded_target_map:
logger.debug('excludes:\n {excludes}'
.format(excludes='\n '.join(self._exclude_target_regexps)))
targets = ', '.join(excluded_target_map[self._UNMATCHED_KEY])
logger.debug('Targets after excludes: %s', targets)
excluded_count = 0
for pattern, targets in six.iteritems(excluded_target_map):
if pattern != self._UNMATCHED_KEY:
logger.debug('Targets excluded by pattern {pattern}\n {targets}'
.format(pattern=pattern,
targets='\n '.join(targets)))
excluded_count += len(targets)
logger.debug('Excluded {count} target{plural}.'
.format(count=excluded_count,
plural=('s' if excluded_count != 1 else '')))
return addresses
@staticmethod
def is_declaring_file(address, file_path):
return address.build_file.relpath == file_path
def _scan_spec(self, spec, fail_fast):
"""Scans the given address spec."""
errored_out = []
if type(spec) is DescendantAddresses:
addresses = set()
try:
build_files = self.scan_build_files(base_path=spec.directory)
except BuildFile.BuildFileError as e:
raise AddressLookupError(e)
for build_file in build_files:
try:
addresses.update(self.addresses_in_spec_path(os.path.dirname(build_file)))
except (BuildFile.BuildFileError, AddressLookupError) as e:
if fail_fast:
raise AddressLookupError(e)
errored_out.append('--------------------')
errored_out.append(traceback.format_exc())
errored_out.append('Exception message: {0}'.format(e))
if errored_out:
error_msg = '\n'.join(errored_out + ["Invalid BUILD files for [{0}]".format(spec.to_spec_string())])
raise AddressLookupError(error_msg)
return addresses
elif type(spec) is SiblingAddresses:
return set(self.addresses_in_spec_path(spec.directory))
elif type(spec) is SingleAddress:
return {self.spec_to_address(spec.to_spec_string())}
else:
raise ValueError('Unsupported Spec type: {}'.format(spec))
def _raise_incorrect_address_error(self, spec_path, wrong_target_name, addresses):
"""Search through the list of targets and return those which originate from the same folder
which wrong_target_name resides in.
:raises: A helpful error message listing possible correct target addresses.
"""
was_not_found_message = '{target_name} was not found in BUILD files from {spec_path}'.format(
target_name=wrong_target_name, spec_path=spec_path)
if not addresses:
raise self.EmptyBuildFileError(
'{was_not_found_message}, because that directory contains no BUILD files defining addressable entities.'
.format(was_not_found_message=was_not_found_message))
# Print BUILD file extensions if there's more than one BUILD file with targets only.
if (any(not hasattr(address, 'build_file') for address in addresses) or
len(set(address.build_file for address in addresses)) == 1):
specs = [':{}'.format(address.target_name) for address in addresses]
else:
specs = [':{} (from {})'.format(address.target_name, os.path.basename(address.build_file.relpath))
for address in addresses]
# Might be neat to sort by edit distance or something, but for now alphabetical is fine.
specs.sort()
# Give different error messages depending on whether BUILD file was empty.
one_of = ' one of' if len(specs) > 1 else '' # Handle plurality, just for UX.
raise self.AddressNotInBuildFile(
'{was_not_found_message}. Perhaps you '
'meant{one_of}: \n {specs}'.format(was_not_found_message=was_not_found_message,
one_of=one_of,
specs='\n '.join(specs)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.