gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import os
from os import mkdir, chmod
from os.path import join, exists
import stat
import tempfile
import shutil
import sys
import hashlib
import bz2
import difflib
from zipfile import ZipFile
from subprocess import Popen, PIPE
from nose.tools import *
import unittest
from pixiepatch import *
from pixiepatch.bz2compressor import BZ2Compressor
from pixiepatch.ziphandler import ZIPHandler
from pixiepatch.reader import URLReader
class Base(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
self.sources = [join(self.dir, 'source-%i' % i) for i in range(1, 4)]
self.dists = [join(self.dir, 'dist-%i' % i) for i in range(1, 4)]
for name in self.sources + self.dists:
mkdir(name)
def tearDown(self):
shutil.rmtree(self.dir)
class TextDiffer(Differ):
def diff(self, source, target):
return '\n'.join(difflib.unified_diff(source.split('\n'), target.split('\n')))
def patch(self, source, patch):
with tempfile.NamedTemporaryFile('w') as f:
f.write(source)
f.flush()
return Popen(['patch', '-o', '-', f.name], stdin=PIPE, stdout=PIPE, stderr=PIPE).communicate(patch)[0]
extension = '.patch'
class TestPatch(Base):
def setUp(self):
Base.setUp(self)
self.pp = PixiePatch(differ=TextDiffer(), reader=URLReader('file://' + self.dir + '/dist-'))
self.pp.register_ignore_pattern('^ignore$')
with open(join(self.sources[0], 'a'), 'w') as f:
f.write('test\n' * 100)
with open(join(self.sources[0], 'b'), 'w') as f:
f.write('v1\n' * 100)
with open(join(self.sources[0], 'c'), 'w') as f:
f.write(''.join(['test %i\n' % i for i in range(100)]))
f.write('v1\n')
with open(join(self.sources[0], 'd'), 'w') as f:
f.write('test\n' * 100)
with open(join(self.sources[0], 'e'), 'w') as f:
f.write(''.join(['test %i\n' % i for i in range(100)]))
f.write('v1\n')
with open(join(self.sources[1], 'a'), 'w') as f:
f.write('test\n' * 100)
with open(join(self.sources[1], 'b'), 'w') as f:
f.write('v2\n' * 100)
with open(join(self.sources[1], 'c'), 'w') as f:
f.write(''.join(['test %i\n' % i for i in range(100)]))
f.write('v2\n')
with open(join(self.sources[1], 'e'), 'w') as f:
f.write(''.join(['test %i\n' % i for i in range(100)]))
f.write('v2\n')
with open(join(self.sources[1], 'f'), 'w') as f:
f.write('test\n' * 100)
chmod(join(self.sources[1], 'f'), stat.S_IREAD)
with open(join(self.sources[2], 'a'), 'w') as f:
f.write('test\n' * 100)
with open(join(self.sources[2], 'b'), 'w') as f:
f.write('v2\n' * 100)
with open(join(self.sources[2], 'c'), 'w') as f:
f.write(''.join(['test %i\n' % i for i in range(100)]))
f.write('v3\n')
with open(join(self.sources[2], 'e'), 'w') as f:
f.write(''.join(['test %i\n' % i for i in range(100)]))
f.write('v2\n')
with open(join(self.sources[2], 'f'), 'w') as f:
f.write('test\n' * 100)
self.pp.make_distribution('1', self.sources[0], self.dists[0])
self.pp.make_distribution('2', self.sources[1], self.dists[1], self.dists[0])
self.pp.make_distribution('3', self.sources[2], self.dists[2], self.dists[1])
def test_plans(self):
# version 1 -> 2
client_manifest = self.pp.create_client_manifest('1', self.sources[0])
plan = self.pp.get_patch_plan(client_manifest, '2')
assert set(plan['download']) == set(['b', 'f'])
assert set(plan['delete']) == set(['d'])
assert set([p[0] for p in plan['patch']]) == set(['c', 'e'])
# version 2 -> 3
client_manifest = self.pp.create_client_manifest('2', self.sources[1])
plan = self.pp.get_patch_plan(client_manifest, '3')
assert set(plan['download']) == set([])
assert set(plan['delete']) == set([])
assert set([p[0] for p in plan['patch']]) == set(['c'])
assert plan['patch'][0][1] == ['3']
# version 1 -> 3
client_manifest = self.pp.create_client_manifest('1', self.sources[0])
plan = self.pp.get_patch_plan(client_manifest, '3')
assert set(plan['download']) == set(['b', 'f'])
assert set(plan['delete']) == set(['d'])
assert set([p[0] for p in plan['patch']]) == set(['c', 'e'])
for name, chain in plan['patch']:
if name == 'c':
assert chain == ['2', '3']
else:
assert chain == ['2']
def test_ignore(self):
# version 1 -> 2
with open(join(self.sources[0], 'ignore'), 'w') as f:
f.write('ignore\n')
client_manifest = self.pp.create_client_manifest('1', self.sources[0])
plan = self.pp.get_patch_plan(client_manifest, '2')
assert set(plan['download']) == set(['b', 'f'])
assert set(plan['delete']) == set(['d'])
assert set([p[0] for p in plan['patch']]) == set(['c', 'e'])
def test_patch_1_to_2(self):
client_manifest = self.pp.create_client_manifest('1', self.sources[0])
plan = self.pp.get_patch_plan(client_manifest, '2')
self.pp.patch(self.sources[0], plan)
diff = Popen(['diff', '-ru', self.sources[0], self.sources[1]], stdout=PIPE).communicate()[0]
self.assertEqual(diff, '')
def test_patch_2_to_3(self):
client_manifest = self.pp.create_client_manifest('2', self.sources[1])
plan = self.pp.get_patch_plan(client_manifest, '3')
self.pp.patch(self.sources[1], plan)
diff = Popen(['diff', '-ru', self.sources[1], self.sources[2]], stdout=PIPE).communicate()[0]
self.assertEqual(diff, '')
def test_patch_1_to_3(self):
client_manifest = self.pp.create_client_manifest('1', self.sources[0])
plan = self.pp.get_patch_plan(client_manifest, '3')
self.pp.patch(self.sources[0], plan)
diff = Popen(['diff', '-ru', self.sources[0], self.sources[2]], stdout=PIPE).communicate()[0]
self.assertEqual(diff, '')
def test_mode(self):
# version 1 -> 2
client_manifest = self.pp.create_client_manifest('1', self.sources[0])
plan = self.pp.get_patch_plan(client_manifest, '2')
self.pp.patch(self.sources[0], plan)
stats = os.stat(join(self.sources[0], 'f'))
assert stats.st_mode & 0777 == stat.S_IREAD
class TestZipPatch(Base):
def setUp(self):
Base.setUp(self)
self.pp = PixiePatch(differ=TextDiffer(), reader=URLReader('file://' + self.dir + '/dist-'))
self.pp.register_archive_handler('.zip', ZIPHandler())
with ZipFile(join(self.sources[0], 'a.zip'), 'w') as f:
f.writestr('a', 'test\n' * 100)
f.writestr('b', 'v1\n' * 100)
f.writestr('c', ''.join(['test %i\n' % i for i in range(100)]) + 'v1\n')
f.writestr('d','test\n' * 100)
f.writestr('e', ''.join(['test %i\n' % i for i in range(100)]) + 'v1\n')
with ZipFile(join(self.sources[1], 'a.zip'), 'w') as f:
f.writestr('a', 'test\n' * 100)
f.writestr('b', 'v2\n' * 100)
f.writestr('c', ''.join(['test %i\n' % i for i in range(100)]) + 'v2\n')
f.writestr('e', ''.join(['test %i\n' % i for i in range(100)]) + 'v2\n')
f.writestr('f', 'test\n' * 100)
with ZipFile(join(self.sources[2], 'a.zip'), 'w') as f:
f.writestr('a', 'test\n' * 100)
f.writestr('b', 'v2\n' * 100)
f.writestr('c', ''.join(['test %i\n' % i for i in range(100)]) + 'v3\n')
f.writestr('e', ''.join(['test %i\n' % i for i in range(100)]) + 'v2\n')
f.writestr('f', 'test\n' * 100)
self.pp.make_distribution('1', self.sources[0], self.dists[0])
self.pp.make_distribution('2', self.sources[1], self.dists[1], self.dists[0])
self.pp.make_distribution('3', self.sources[2], self.dists[2], self.dists[1])
def read_zip(self, archive):
entries = set()
with ZipFile(archive, 'r') as zip:
for name in zip.namelist():
info = zip.getinfo(name)
entries.add((name, info.CRC))
return entries
def test_plans(self):
# version 1 -> 2
client_manifest = self.pp.create_client_manifest('1', self.sources[0])
plan = self.pp.get_patch_plan(client_manifest, '2')
assert set(plan['download']) == set(['a.zip/b', 'a.zip/f'])
assert set(plan['delete']) == set(['a.zip/d'])
assert set([p[0] for p in plan['patch']]) == set(['a.zip/c', 'a.zip/e'])
# version 2 -> 3
client_manifest = self.pp.create_client_manifest('2', self.sources[1])
plan = self.pp.get_patch_plan(client_manifest, '3')
assert set(plan['download']) == set([])
assert set(plan['delete']) == set([])
assert set([p[0] for p in plan['patch']]) == set(['a.zip/c'])
assert plan['patch'][0][1] == ['3']
# version 1 -> 3
client_manifest = self.pp.create_client_manifest('1', self.sources[0])
plan = self.pp.get_patch_plan(client_manifest, '3')
assert set(plan['download']) == set(['a.zip/b', 'a.zip/f'])
assert set(plan['delete']) == set(['a.zip/d'])
assert set([p[0] for p in plan['patch']]) == set(['a.zip/c', 'a.zip/e'])
for name, chain in plan['patch']:
if name == 'a.zip/c':
assert chain == ['2', '3']
else:
assert chain == ['2']
def test_patch_1_to_2(self):
client_manifest = self.pp.create_client_manifest('1', self.sources[0])
plan = self.pp.get_patch_plan(client_manifest, '2')
self.pp.patch(self.sources[0], plan)
patched = self.read_zip(join(self.sources[0], 'a.zip'))
target = self.read_zip(join(self.sources[1], 'a.zip'))
self.assertEqual(patched, target)
def test_patch_2_to_3(self):
client_manifest = self.pp.create_client_manifest('2', self.sources[1])
plan = self.pp.get_patch_plan(client_manifest, '3')
self.pp.patch(self.sources[1], plan)
patched = self.read_zip(join(self.sources[1], 'a.zip'))
target = self.read_zip(join(self.sources[2], 'a.zip'))
self.assertEqual(patched, target)
def test_patch_1_to_3(self):
client_manifest = self.pp.create_client_manifest('1', self.sources[0])
plan = self.pp.get_patch_plan(client_manifest, '3')
self.pp.patch(self.sources[0], plan)
patched = self.read_zip(join(self.sources[0], 'a.zip'))
target = self.read_zip(join(self.sources[2], 'a.zip'))
self.assertEqual(patched, target)
|
|
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
PXE Driver and supporting meta-classes.
"""
import os
import shutil
from oslo.config import cfg
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common.i18n import _LW
from ironic.common import image_service as service
from ironic.common import keystone
from ironic.common import paths
from ironic.common import pxe_utils
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers import base
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import image_cache
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers import utils as driver_utils
from ironic.openstack.common import fileutils
from ironic.openstack.common import log as logging
pxe_opts = [
cfg.StrOpt('pxe_config_template',
default=paths.basedir_def(
'drivers/modules/pxe_config.template'),
help='Template file for PXE configuration.'),
cfg.StrOpt('uefi_pxe_config_template',
default=paths.basedir_def(
'drivers/modules/elilo_efi_pxe_config.template'),
help='Template file for PXE configuration for UEFI boot'
' loader.'),
cfg.StrOpt('tftp_server',
default='$my_ip',
help='IP address of Ironic compute node\'s tftp server.'),
cfg.StrOpt('tftp_root',
default='/tftpboot',
help='Ironic compute node\'s tftp root path.'),
cfg.StrOpt('tftp_master_path',
default='/tftpboot/master_images',
help='Directory where master tftp images are stored on disk.'),
# NOTE(dekehn): Additional boot files options may be created in the event
# other architectures require different boot files.
cfg.StrOpt('pxe_bootfile_name',
default='pxelinux.0',
help='Bootfile DHCP parameter.'),
cfg.StrOpt('uefi_pxe_bootfile_name',
default='elilo.efi',
help='Bootfile DHCP parameter for UEFI boot mode.'),
cfg.StrOpt('http_url',
help='Ironic compute node\'s HTTP server URL. '
'Example: http://192.1.2.3:8080'),
cfg.StrOpt('http_root',
default='/httpboot',
help='Ironic compute node\'s HTTP root path.'),
cfg.BoolOpt('ipxe_enabled',
default=False,
help='Enable iPXE boot.'),
cfg.StrOpt('ipxe_boot_script',
default=paths.basedir_def(
'drivers/modules/boot.ipxe'),
help='The path to the main iPXE script file.'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(pxe_opts, group='pxe')
CONF.import_opt('deploy_callback_timeout', 'ironic.conductor.manager',
group='conductor')
REQUIRED_PROPERTIES = {
'pxe_deploy_kernel': _("UUID (from Glance) of the deployment kernel. "
"Required."),
'pxe_deploy_ramdisk': _("UUID (from Glance) of the ramdisk that is "
"mounted at boot time. Required."),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES
def _parse_driver_info(node):
"""Gets the driver specific Node deployment info.
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
:param node: a single Node.
:returns: A dict with the driver_info values.
:raises: MissingParameterValue
"""
info = node.driver_info
d_info = {}
d_info['deploy_kernel'] = info.get('pxe_deploy_kernel')
d_info['deploy_ramdisk'] = info.get('pxe_deploy_ramdisk')
error_msg = _("Cannot validate PXE bootloader. Some parameters were"
" missing in node's driver_info")
deploy_utils.check_for_missing_params(d_info, error_msg, 'pxe_')
return d_info
def _parse_deploy_info(node):
"""Gets the instance and driver specific Node deployment info.
This method validates whether the 'instance_info' and 'driver_info'
property of the supplied node contains the required information for
this driver to deploy images to the node.
:param node: a single Node.
:returns: A dict with the instance_info and driver_info values.
:raises: MissingParameterValue
:raises: InvalidParameterValue
"""
info = {}
info.update(iscsi_deploy.parse_instance_info(node))
info.update(_parse_driver_info(node))
return info
def _build_pxe_config_options(node, pxe_info, ctx):
"""Build the PXE config options for a node
This method builds the PXE boot options for a node,
given all the required parameters.
The options should then be passed to pxe_utils.create_pxe_config to
create the actual config files.
:param node: a single Node.
:param pxe_info: a dict of values to set on the configuration file
:param ctx: security context
:returns: A dictionary of pxe options to be used in the pxe bootfile
template.
"""
if CONF.pxe.ipxe_enabled:
deploy_kernel = '/'.join([CONF.pxe.http_url, node.uuid,
'deploy_kernel'])
deploy_ramdisk = '/'.join([CONF.pxe.http_url, node.uuid,
'deploy_ramdisk'])
kernel = '/'.join([CONF.pxe.http_url, node.uuid, 'kernel'])
ramdisk = '/'.join([CONF.pxe.http_url, node.uuid, 'ramdisk'])
else:
deploy_kernel = pxe_info['deploy_kernel'][1]
deploy_ramdisk = pxe_info['deploy_ramdisk'][1]
kernel = pxe_info['kernel'][1]
ramdisk = pxe_info['ramdisk'][1]
pxe_options = {
'deployment_aki_path': deploy_kernel,
'deployment_ari_path': deploy_ramdisk,
'aki_path': kernel,
'ari_path': ramdisk,
'pxe_append_params': CONF.pxe.pxe_append_params,
'tftp_server': CONF.pxe.tftp_server
}
deploy_ramdisk_options = iscsi_deploy.build_deploy_ramdisk_options(node)
pxe_options.update(deploy_ramdisk_options)
return pxe_options
def _get_token_file_path(node_uuid):
"""Generate the path for PKI token file."""
return os.path.join(CONF.pxe.tftp_root, 'token-' + node_uuid)
@image_cache.cleanup(priority=25)
class TFTPImageCache(image_cache.ImageCache):
def __init__(self, image_service=None):
super(TFTPImageCache, self).__init__(
CONF.pxe.tftp_master_path,
# MiB -> B
cache_size=CONF.pxe.image_cache_size * 1024 * 1024,
# min -> sec
cache_ttl=CONF.pxe.image_cache_ttl * 60,
image_service=image_service)
def _cache_ramdisk_kernel(ctx, node, pxe_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree(
os.path.join(pxe_utils.get_root_dir(), node.uuid))
LOG.debug("Fetching kernel and ramdisk for node %s",
node.uuid)
deploy_utils.fetch_images(ctx, TFTPImageCache(), pxe_info.values(),
CONF.force_raw_images)
def _get_image_info(node, ctx):
"""Generate the paths for tftp files for this instance
Raises IronicException if
- instance does not contain kernel_id or ramdisk_id
- deploy_kernel_id or deploy_ramdisk_id can not be read from
driver_info and defaults are not set
"""
d_info = _parse_deploy_info(node)
image_info = {}
root_dir = pxe_utils.get_root_dir()
image_info.update(pxe_utils.get_deploy_kr_info(node.uuid, d_info))
i_info = node.instance_info
labels = ('kernel', 'ramdisk')
if not (i_info.get('kernel') and i_info.get('ramdisk')):
glance_service = service.Service(version=1, context=ctx)
iproperties = glance_service.show(d_info['image_source'])['properties']
for label in labels:
i_info[label] = str(iproperties[label + '_id'])
node.instance_info = i_info
node.save()
for label in labels:
image_info[label] = (
i_info[label],
os.path.join(root_dir, node.uuid, label)
)
return image_info
def _create_token_file(task):
"""Save PKI token to file."""
token_file_path = _get_token_file_path(task.node.uuid)
token = task.context.auth_token
if token:
timeout = CONF.conductor.deploy_callback_timeout
if timeout and keystone.token_expires_soon(token, timeout):
token = keystone.get_admin_auth_token()
utils.write_to_file(token_file_path, token)
else:
utils.unlink_without_raise(token_file_path)
def _destroy_token_file(node):
"""Delete PKI token file."""
token_file_path = _get_token_file_path(node['uuid'])
utils.unlink_without_raise(token_file_path)
class PXEDeploy(base.DeployInterface):
"""PXE Deploy Interface for deploy-related actions."""
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Validate the deployment information for the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue.
:raises: MissingParameterValue
"""
# Check the boot_mode capability parameter value.
driver_utils.validate_boot_mode_capability(task.node)
if CONF.pxe.ipxe_enabled:
if not CONF.pxe.http_url or not CONF.pxe.http_root:
raise exception.MissingParameterValue(_(
"iPXE boot is enabled but no HTTP URL or HTTP "
"root was specified."))
# iPXE and UEFI should not be configured together.
if driver_utils.get_node_capability(task.node,
'boot_mode') == 'uefi':
LOG.error(_LE("UEFI boot mode is not supported with "
"iPXE boot enabled."))
raise exception.InvalidParameterValue(_(
"Conflict: iPXE is enabled, but cannot be used with node"
"%(node_uuid)s configured to use UEFI boot") %
{'node_uuid': task.node.uuid})
d_info = _parse_deploy_info(task.node)
iscsi_deploy.validate(task)
props = ['kernel_id', 'ramdisk_id']
iscsi_deploy.validate_glance_image_properties(task.context, d_info,
props)
@task_manager.require_exclusive_lock
def deploy(self, task):
"""Start deployment of the task's node'.
Fetches instance image, creates a temporary keystone token file,
updates the DHCP port options for next boot, and issues a reboot
request to the power driver.
This causes the node to boot into the deployment ramdisk and triggers
the next phase of PXE-based deployment via
VendorPassthru._continue_deploy().
:param task: a TaskManager instance containing the node to act on.
:returns: deploy state DEPLOYWAIT.
"""
iscsi_deploy.cache_instance_image(task.context, task.node)
iscsi_deploy.check_image_size(task)
# TODO(yuriyz): more secure way needed for pass auth token
# to deploy ramdisk
_create_token_file(task)
dhcp_opts = pxe_utils.dhcp_options_for_instance(task)
provider = dhcp_factory.DHCPFactory()
provider.update_dhcp(task, dhcp_opts)
# NOTE(faizan): Under UEFI boot mode, setting of boot device may differ
# between different machines. IPMI does not work for setting boot
# devices in UEFI mode for certain machines.
# Expected IPMI failure for uefi boot mode. Logging a message to
# set the boot device manually and continue with deploy.
try:
manager_utils.node_set_boot_device(task, 'pxe', persistent=True)
except exception.IPMIFailure:
if driver_utils.get_node_capability(task.node,
'boot_mode') == 'uefi':
LOG.warning(_LW("ipmitool is unable to set boot device while "
"the node is in UEFI boot mode."
"Please set the boot device manually."))
else:
raise
manager_utils.node_power_action(task, states.REBOOT)
return states.DEPLOYWAIT
@task_manager.require_exclusive_lock
def tear_down(self, task):
"""Tear down a previous deployment on the task's node.
Power off the node. All actual clean-up is done in the clean_up()
method which should be called separately.
:param task: a TaskManager instance containing the node to act on.
:returns: deploy state DELETED.
"""
manager_utils.node_power_action(task, states.POWER_OFF)
return states.DELETED
def prepare(self, task):
"""Prepare the deployment environment for this task's node.
Generates the TFTP configuration for PXE-booting both the deployment
and user images, fetches the TFTP image from Glance and add it to the
local cache.
:param task: a TaskManager instance containing the node to act on.
"""
# TODO(deva): optimize this if rerun on existing files
if CONF.pxe.ipxe_enabled:
# Copy the iPXE boot script to HTTP root directory
bootfile_path = os.path.join(CONF.pxe.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script))
shutil.copyfile(CONF.pxe.ipxe_boot_script, bootfile_path)
pxe_info = _get_image_info(task.node, task.context)
pxe_options = _build_pxe_config_options(task.node, pxe_info,
task.context)
if driver_utils.get_node_capability(task.node, 'boot_mode') == 'uefi':
pxe_config_template = CONF.pxe.uefi_pxe_config_template
else:
pxe_config_template = CONF.pxe.pxe_config_template
pxe_utils.create_pxe_config(task, pxe_options,
pxe_config_template)
_cache_ramdisk_kernel(task.context, task.node, pxe_info)
def clean_up(self, task):
"""Clean up the deployment environment for the task's node.
Unlinks TFTP and instance images and triggers image cache cleanup.
Removes the TFTP configuration files for this node. As a precaution,
this method also ensures the keystone auth token file was removed.
:param task: a TaskManager instance containing the node to act on.
"""
node = task.node
try:
pxe_info = _get_image_info(node, task.context)
except exception.MissingParameterValue as e:
LOG.warning(_LW('Could not get image info to clean up images '
'for node %(node)s: %(err)s'),
{'node': node.uuid, 'err': e})
else:
for label in pxe_info:
path = pxe_info[label][1]
utils.unlink_without_raise(path)
TFTPImageCache().clean_up()
pxe_utils.clean_up_pxe_config(task)
iscsi_deploy.destroy_images(node.uuid)
_destroy_token_file(node)
def take_over(self, task):
dhcp_opts = pxe_utils.dhcp_options_for_instance(task)
provider = dhcp_factory.DHCPFactory()
provider.update_dhcp(task, dhcp_opts)
class VendorPassthru(base.VendorInterface):
"""Interface to mix IPMI and PXE vendor-specific interfaces."""
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task, **kwargs):
"""Validates the inputs for a vendor passthru.
This method checks whether the vendor passthru method is a valid one,
and then validates whether the required information for executing the
vendor passthru has been provided or not.
:param task: a TaskManager instance containing the node to act on.
:param kwargs: kwargs containins the method name and its parameters.
:raises: InvalidParameterValue if method is invalid or any parameters
to the method is invalid.
"""
iscsi_deploy.get_deploy_info(task.node, **kwargs)
@base.passthru(['POST'], method='pass_deploy_info')
@task_manager.require_exclusive_lock
def _continue_deploy(self, task, **kwargs):
"""Continues the deployment of baremetal node over iSCSI.
This method continues the deployment of the baremetal node over iSCSI
from where the deployment ramdisk has left off.
:param task: a TaskManager instance containing the node to act on.
:param kwargs: kwargs for performing iscsi deployment.
"""
node = task.node
if node.provision_state != states.DEPLOYWAIT:
LOG.error(_LE('Node %s is not waiting to be deployed.'), node.uuid)
return
_destroy_token_file(node)
root_uuid = iscsi_deploy.continue_deploy(task, **kwargs)
if not root_uuid:
return
try:
pxe_config_path = pxe_utils.get_pxe_config_file_path(node.uuid)
deploy_utils.switch_pxe_config(pxe_config_path, root_uuid,
driver_utils.get_node_capability(node, 'boot_mode'))
deploy_utils.notify_deploy_complete(kwargs['address'])
LOG.info(_LI('Deployment to node %s done'), node.uuid)
node.provision_state = states.ACTIVE
node.target_provision_state = states.NOSTATE
node.save()
except Exception as e:
LOG.error(_LE('Deploy failed for instance %(instance)s. '
'Error: %(error)s'),
{'instance': node.instance_uuid, 'error': e})
msg = _('Failed to continue iSCSI deployment.')
deploy_utils.set_failed_state(task, msg)
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reader_ops."""
# pylint: disable=no-name-in-module,unused-import,g-bad-import-order,maybe-no-member,no-member,g-importing-member
import os.path
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from syntaxnet import dictionary_pb2
from syntaxnet import graph_builder
from syntaxnet import sparse_pb2
from syntaxnet.ops import gen_parser_ops
FLAGS = tf.app.flags.FLAGS
if not hasattr(FLAGS, 'test_srcdir'):
FLAGS.test_srcdir = ''
if not hasattr(FLAGS, 'test_tmpdir'):
FLAGS.test_tmpdir = tf.test.get_temp_dir()
class ParsingReaderOpsTest(test_util.TensorFlowTestCase):
def setUp(self):
# Creates a task context with the correct testing paths.
initial_task_context = os.path.join(FLAGS.test_srcdir,
'syntaxnet/'
'testdata/context.pbtxt')
self._task_context = os.path.join(FLAGS.test_tmpdir, 'context.pbtxt')
with open(initial_task_context, 'r') as fin:
with open(self._task_context, 'w') as fout:
fout.write(fin.read().replace('SRCDIR', FLAGS.test_srcdir)
.replace('OUTPATH', FLAGS.test_tmpdir))
# Creates necessary term maps.
with self.test_session() as sess:
gen_parser_ops.lexicon_builder(task_context=self._task_context,
corpus_name='training-corpus').run()
self._num_features, self._num_feature_ids, _, self._num_actions = (
sess.run(gen_parser_ops.feature_size(task_context=self._task_context,
arg_prefix='brain_parser')))
def GetMaxId(self, sparse_features):
max_id = 0
for x in sparse_features:
for y in x:
f = sparse_pb2.SparseFeatures()
f.ParseFromString(y)
for i in f.id:
max_id = max(i, max_id)
return max_id
def testParsingReaderOp(self):
# Runs the reader over the test input for two epochs.
num_steps_a = 0
num_actions = 0
num_word_ids = 0
num_tag_ids = 0
num_label_ids = 0
batch_size = 10
with self.test_session() as sess:
(words, tags, labels), epochs, gold_actions = (
gen_parser_ops.gold_parse_reader(self._task_context,
3,
batch_size,
corpus_name='training-corpus'))
while True:
tf_gold_actions, tf_epochs, tf_words, tf_tags, tf_labels = (
sess.run([gold_actions, epochs, words, tags, labels]))
num_steps_a += 1
num_actions = max(num_actions, max(tf_gold_actions) + 1)
num_word_ids = max(num_word_ids, self.GetMaxId(tf_words) + 1)
num_tag_ids = max(num_tag_ids, self.GetMaxId(tf_tags) + 1)
num_label_ids = max(num_label_ids, self.GetMaxId(tf_labels) + 1)
self.assertIn(tf_epochs, [0, 1, 2])
if tf_epochs > 1:
break
# Runs the reader again, this time with a lot of added graph nodes.
num_steps_b = 0
with self.test_session() as sess:
num_features = [6, 6, 4]
num_feature_ids = [num_word_ids, num_tag_ids, num_label_ids]
embedding_sizes = [8, 8, 8]
hidden_layer_sizes = [32, 32]
# Here we aim to test the iteration of the reader op in a complex network,
# not the GraphBuilder.
parser = graph_builder.GreedyParser(
num_actions, num_features, num_feature_ids, embedding_sizes,
hidden_layer_sizes)
parser.AddTraining(self._task_context,
batch_size,
corpus_name='training-corpus')
sess.run(parser.inits.values())
while True:
tf_epochs, tf_cost, _ = sess.run(
[parser.training['epochs'], parser.training['cost'],
parser.training['train_op']])
num_steps_b += 1
self.assertGreaterEqual(tf_cost, 0)
self.assertIn(tf_epochs, [0, 1, 2])
if tf_epochs > 1:
break
# Assert that the two runs made the exact same number of steps.
logging.info('Number of steps in the two runs: %d, %d',
num_steps_a, num_steps_b)
self.assertEqual(num_steps_a, num_steps_b)
def testParsingReaderOpWhileLoop(self):
feature_size = 3
batch_size = 5
def ParserEndpoints():
return gen_parser_ops.gold_parse_reader(self._task_context,
feature_size,
batch_size,
corpus_name='training-corpus')
with self.test_session() as sess:
# The 'condition' and 'body' functions expect as many arguments as there
# are loop variables. 'condition' depends on the 'epoch' loop variable
# only, so we disregard the remaining unused function arguments. 'body'
# returns a list of updated loop variables.
def Condition(epoch, *unused_args):
return tf.less(epoch, 2)
def Body(epoch, num_actions, *feature_args):
# By adding one of the outputs of the reader op ('epoch') as a control
# dependency to the reader op we force the repeated evaluation of the
# reader op.
with epoch.graph.control_dependencies([epoch]):
features, epoch, gold_actions = ParserEndpoints()
num_actions = tf.maximum(num_actions,
tf.reduce_max(gold_actions, [0], False) + 1)
feature_ids = []
for i in range(len(feature_args)):
feature_ids.append(features[i])
return [epoch, num_actions] + feature_ids
epoch = ParserEndpoints()[-2]
num_actions = tf.constant(0)
loop_vars = [epoch, num_actions]
res = sess.run(
tf.while_loop(Condition, Body, loop_vars,
shape_invariants=[tf.TensorShape(None)] * 2,
parallel_iterations=1))
logging.info('Result: %s', res)
self.assertEqual(res[0], 2)
def _token_embedding(self, token, embedding):
e = dictionary_pb2.TokenEmbedding()
e.token = token
e.vector.values.extend(embedding)
return e.SerializeToString()
def testWordEmbeddingInitializer(self):
# Provide embeddings for the first three words in the word map.
records_path = os.path.join(FLAGS.test_tmpdir, 'records1')
writer = tf.python_io.TFRecordWriter(records_path)
writer.write(self._token_embedding('.', [1, 2]))
writer.write(self._token_embedding(',', [3, 4]))
writer.write(self._token_embedding('the', [5, 6]))
del writer
with self.test_session():
embeddings = gen_parser_ops.word_embedding_initializer(
vectors=records_path,
task_context=self._task_context).eval()
self.assertAllClose(
np.array([[1. / (1 + 4) ** .5, 2. / (1 + 4) ** .5],
[3. / (9 + 16) ** .5, 4. / (9 + 16) ** .5],
[5. / (25 + 36) ** .5, 6. / (25 + 36) ** .5]]),
embeddings[:3,])
def testWordEmbeddingInitializerRepeatability(self):
records_path = os.path.join(FLAGS.test_tmpdir, 'records2')
writer = tf.python_io.TFRecordWriter(records_path)
writer.write(self._token_embedding('.', [1, 2, 3])) # 3 dims
del writer
# As long as there is one non-zero seed, the result should be repeatable.
for seed1, seed2 in [(0, 1), (1, 0), (123, 456)]:
with tf.Graph().as_default(), self.test_session():
embeddings1 = gen_parser_ops.word_embedding_initializer(
vectors=records_path,
task_context=self._task_context,
seed=seed1,
seed2=seed2)
embeddings2 = gen_parser_ops.word_embedding_initializer(
vectors=records_path,
task_context=self._task_context,
seed=seed1,
seed2=seed2)
# The number of terms is based on the word map, which may change if the
# test corpus is updated. Just assert that there are some terms.
self.assertGreater(tf.shape(embeddings1)[0].eval(), 0)
self.assertGreater(tf.shape(embeddings2)[0].eval(), 0)
self.assertEqual(tf.shape(embeddings1)[1].eval(), 3)
self.assertEqual(tf.shape(embeddings2)[1].eval(), 3)
self.assertAllEqual(embeddings1.eval(), embeddings2.eval())
def testWordEmbeddingInitializerFailIfNeitherTaskContextOrVocabulary(self):
with self.test_session():
with self.assertRaises(Exception):
gen_parser_ops.word_embedding_initializer(vectors='/dev/null').eval()
def testWordEmbeddingInitializerFailIfBothTaskContextAndVocabulary(self):
with self.test_session():
with self.assertRaises(Exception):
gen_parser_ops.word_embedding_initializer(
vectors='/dev/null',
task_context='/dev/null',
vocabulary='/dev/null').eval()
def testWordEmbeddingInitializerVocabularyFile(self):
records_path = os.path.join(FLAGS.test_tmpdir, 'records3')
writer = tf.python_io.TFRecordWriter(records_path)
writer.write(self._token_embedding('a', [1, 2, 3]))
writer.write(self._token_embedding('b', [2, 3, 4]))
writer.write(self._token_embedding('c', [3, 4, 5]))
writer.write(self._token_embedding('d', [4, 5, 6]))
writer.write(self._token_embedding('e', [5, 6, 7]))
del writer
vocabulary_path = os.path.join(FLAGS.test_tmpdir, 'vocabulary3')
with open(vocabulary_path, 'w') as vocabulary_file:
vocabulary_file.write('a\nc\ne\nx\n') # 'x' not in pretrained embeddings
# Enumerate a variety of configurations.
for cache_vectors_locally in [False, True]:
for num_special_embeddings in [None, 1, 2, 5]: # None = use default of 3
with self.test_session():
embeddings = gen_parser_ops.word_embedding_initializer(
vectors=records_path,
vocabulary=vocabulary_path,
cache_vectors_locally=cache_vectors_locally,
num_special_embeddings=num_special_embeddings)
# Expect 4 embeddings from the vocabulary plus special embeddings.
expected_num_embeddings = 4 + (num_special_embeddings or 3)
self.assertAllEqual([expected_num_embeddings, 3],
tf.shape(embeddings).eval())
# The first 3 embeddings should be pretrained.
norm_a = (1.0 + 4.0 + 9.0) ** 0.5
norm_c = (9.0 + 16.0 + 25.0) ** 0.5
norm_e = (25.0 + 36.0 + 49.0) ** 0.5
self.assertAllClose([[1.0 / norm_a, 2.0 / norm_a, 3.0 / norm_a],
[3.0 / norm_c, 4.0 / norm_c, 5.0 / norm_c],
[5.0 / norm_e, 6.0 / norm_e, 7.0 / norm_e]],
embeddings[:3].eval())
def testWordEmbeddingInitializerVocabularyFileWithDuplicates(self):
records_path = os.path.join(FLAGS.test_tmpdir, 'records4')
writer = tf.python_io.TFRecordWriter(records_path)
writer.write(self._token_embedding('a', [1, 2, 3]))
writer.write(self._token_embedding('b', [2, 3, 4]))
writer.write(self._token_embedding('c', [3, 4, 5]))
writer.write(self._token_embedding('d', [4, 5, 6]))
writer.write(self._token_embedding('e', [5, 6, 7]))
del writer
vocabulary_path = os.path.join(FLAGS.test_tmpdir, 'vocabulary4')
with open(vocabulary_path, 'w') as vocabulary_file:
vocabulary_file.write('a\nc\ne\nx\ny\nx') # 'x' duplicated
with self.test_session():
with self.assertRaises(Exception):
gen_parser_ops.word_embedding_initializer(
vectors=records_path, vocabulary=vocabulary_path).eval()
if __name__ == '__main__':
googletest.main()
|
|
# django-salesforce
#
# by Phil Christensen
# (c) 2012-2013 Freelancers Union (http://www.freelancersunion.org)
# See LICENSE.md for details
#
"""
Customized fields for Salesforce, especially the primary key. (like django.db.models.fields)
"""
from typing import Any, Callable, Optional, Tuple, Type, TYPE_CHECKING, Union
import warnings
from decimal import Decimal
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import gettext_lazy as _
from django.db.backends.base.base import BaseDatabaseWrapper as DatabaseWrapper
from django.db.models import fields
from django.db.models import PROTECT, DO_NOTHING # NOQA pylint:disable=unused-import
from django.db import models
from salesforce.defaults import DEFAULTED_ON_CREATE, DefaultedOnCreate, BaseDefault
# None of field types defined here don't need a "deconstruct" method.
# Their parameters only describe the different, but stable nature of SF standard objects.
FULL_WRITABLE = 0
NOT_UPDATEABLE = 1
NOT_CREATEABLE = 2
READ_ONLY = 3 # (NOT_UPDATEABLE & NOT_CREATEABLE)
SF_PK = getattr(settings, 'SF_PK', 'id')
if SF_PK not in ('id', 'Id'):
raise ImproperlyConfigured("Value of settings.SF_PK must be 'id' or 'Id' or undefined.")
STANDARD_FIELDS = {
x.lower() for x in (
'Id',
'Name',
'RecordType',
'CreatedDate',
'CreatedBy',
'LastModifiedDate',
'LastModifiedBy',
'SystemModstamp',
'LastActivityDate',
'LastViewdDate',
'LastReferencedDate',
'IsDeleted',
)
}
class SalesforceAutoField(fields.AutoField):
"""
An AutoField that works with Salesforce primary keys.
It is used by SalesforceModel as a custom primary key. It doesn't convert
its value to int.
"""
description = _("Text")
default_error_messages = {
'invalid': _('This value must be a valid Salesforce ID.'),
}
# the model can be managed by Django also in SFDC databases if 'self.sf_managed_model = True'
sf_managed_model = False
def __init__(self, *args: Any, **kwargs: Any) -> None:
# The parameter 'sf_read_only' is not used normally, maybe only if someone
# added SalesforceAutoFields to the Model manually
kwargs.pop('sf_read_only', None)
self.sf_managed = False
self.sf_managed_model = kwargs.pop('sf_managed_model', False)
super().__init__(*args, **kwargs)
def to_python(self, value: Any) -> Optional[str]:
if isinstance(value, str) or value is None:
return value
return str(value)
def get_prep_value(self, value: Any) -> Any:
return self.to_python(value)
def contribute_to_class(self, cls: Type[models.Model], name: str, # noqa pylint:disable=arguments-differ
private_only: bool = False) -> None:
name = name if self.name is None else self.name
# we can't require "self.auto_created==True" due to backward compatibility
# with old migrations created before v0.6. Other conditions are enough.
if name != SF_PK or not self.primary_key:
raise ImproperlyConfigured(
"SalesforceAutoField must be a primary key "
"with the name '%s' in model %s (configurable by settings)." % (SF_PK, cls._meta.object_name))
if cls._meta.auto_field:
# A model has another auto_field yet and a new auto field is added.
same_type = type(self) == type(cls._meta.auto_field) # noqa pylint:disable=unidiomatic-typecheck
# If the previous auto field was created automatically by inheritation from more abstract classes
# then it is OK and ignore it. In all other cases it is error.
if same_type and self.model._meta.abstract and cls._meta.auto_field.name == SF_PK:
return
raise ImproperlyConfigured(
"The model %s can not have more than one AutoField, "
"but currently: (%s=%s, %s=%s)" % (
cls,
cls._meta.auto_field.name, cls._meta.auto_field,
name, self
)
)
if getattr(cls._meta, 'sf_managed', False):
self.sf_managed_model = True
super().contribute_to_class(cls, name, private_only=private_only)
cls._meta.auto_field = self
def deconstruct(self) -> Tuple[Any, Any, Any, Any]:
name, path, args, kwargs = super().deconstruct()
if self.db_column == 'Id' and 'db_column' in kwargs:
del kwargs['db_column']
if self.sf_managed_model:
kwargs['sf_managed_model'] = True
return name, path, args, kwargs
class SfField(models.Field):
"""
Add support of 'sf_read_only' and 'custom' parameters to Salesforce fields.
sf_read_only=3 (READ_ONLY): The field can not be specified neither on insert or update.
e.g. LastModifiedDate (the most frequent type of read only)
sf_read_only=1 (NOT_UPDATEABLE): The field can be specified on insert but can not be later never modified.
e.g. ContactId in User object (relative frequent)
sf_read_only=2 (NOT_CREATEABLE): The field can not be specified on insert but can be later modified.
e.g. RecordType.IsActive or Lead.EmailBouncedReason
sf_read_only=0: normal writable (default)
custom=True : Add '__c' to the column name if no db_column is defined.
"""
column = None # type: str
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.sf_read_only = kwargs.pop('sf_read_only', 0) # type: int
self.sf_custom = kwargs.pop('custom', None) # type: Optional[bool]
self.sf_namespace = kwargs.pop('sf_namespace', '') # type: str
self.sf_managed = kwargs.pop('sf_managed', None) # type: Optional[bool]
assert (self.sf_custom is None or kwargs.get('db_column') is None or
self.sf_custom == kwargs['db_column'].endswith('__c'))
assert not self.sf_namespace or self.sf_custom is not False
if kwargs.get('default') is DEFAULTED_ON_CREATE:
kwargs['default'] = DefaultedOnCreate(internal_type=self.get_internal_type())
super().__init__(*args, **kwargs)
def deconstruct(self) -> Tuple[Any, Any, Any, Any]:
name, path, args, kwargs = super().deconstruct()
if self.name:
policy = 'minimal'
_, column = self.get_attname_column()
if '__' in column or policy != 'minimal':
kwargs['db_column'] = column
else:
tmp_db_column = self.db_column
self.db_column = None
_, auto_db_column = self.get_attname_column()
self.db_column = tmp_db_column
if column != auto_db_column:
kwargs['db_column'] = column
elif 'db_column' in kwargs:
del kwargs['db_column']
if self.sf_managed is not None:
kwargs['sf_managed'] = self.sf_managed
return name, path, args, kwargs
def get_attname_column(self) -> Tuple[str, str]:
"""
Get the database column name automatically in most cases.
"""
# See "A guide to Field parameters": django/db/models/fields/__init__.py
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
attname = self.get_attname()
if self.db_column is not None:
# explicit name
column = self.db_column
else:
if not self.name.islower():
# a Salesforce style name e.g. 'LastName' or 'MyCustomField'
column = self.name
else:
# a Django style name like 'last_name' or 'my_custom_field'
column = self.name.title().replace('_', '')
# Fix custom fields
if self.sf_custom:
column = column + '__c'
if self.sf_namespace:
column = self.sf_namespace + '__' + column
return attname, column
def contribute_to_class(self, cls: Type[models.Model], name: str, private_only: bool = False) -> None:
super().contribute_to_class(cls, name, private_only=private_only)
is_custom_model = getattr(cls._meta, 'sf_custom', False)
if self.sf_custom is None and is_custom_model and self.column.lower() not in STANDARD_FIELDS:
# Automatically recognized custom fields can be only in custom models explicitly marked by Meta custom=True
# are recognized automatically - for
# backward compatibility reasons.
self.sf_custom = True
# set an empty value to be fixed on the next line
self.column = ''
self.set_attributes_from_name(name)
column = self.column
assert column
sf_managed_model = getattr(cls._meta, 'sf_managed', False)
if self.sf_managed is None and sf_managed_model and self.column and column.endswith('__c'):
self.sf_managed = True
# pylint:disable=unnecessary-pass,too-many-ancestors
class CharField(SfField, models.CharField):
"""CharField with sf_read_only attribute for Salesforce."""
def db_type(self, connection: Any) -> str:
return 'Text' if not self.choices else 'Picklist'
class EmailField(SfField, models.EmailField):
"""EmailField with sf_read_only attribute for Salesforce."""
pass
class URLField(SfField, models.URLField):
"""URLField with sf_read_only attribute for Salesforce."""
pass
class TextField(SfField, models.TextField):
"""TextField with sf_read_only attribute for Salesforce."""
pass
class IntegerField(SfField, models.IntegerField):
"""IntegerField with sf_read_only attribute for Salesforce."""
pass
class BigIntegerField(SfField, models.BigIntegerField):
"""BigIntegerField with sf_read_only attribute for Salesforce."""
# important for other database backends, e.g. in tests
# The biggest exact value is +-(2 ** 53 -1 ), approx. 9.007E15
pass
class SmallIntegerField(SfField, models.SmallIntegerField):
"""SmallIntegerField with sf_read_only attribute for Salesforce."""
pass
class DecimalField(SfField, models.DecimalField):
"""
DecimalField with sf_read_only attribute for Salesforce.
Salesforce has only one numeric type xsd:double, but no integer.
Even a numeric field with declared zero decimal_places can contain
pi=3.14159265358979 in the database accidentally, but if also the value
is integer,then it is without '.0'.
DecimalField is the default numeric type used by itrospection inspectdb.
"""
def to_python(self, value: Any) -> Any:
if str(value) == '':
return value
ret = super().to_python(value)
if ret is not None and self.decimal_places == 0:
# this is because Salesforce has no numeric integer type
if ret == int(ret):
ret = Decimal(int(ret))
return ret
def from_db_value(self, value: Any, expression: Any, connection: DatabaseWrapper) -> Any:
# pylint:disable=unused-argument
# TODO refactor and move to the driver like in other backends
if isinstance(value, float):
value = str(value)
return self.to_python(value)
class FloatField(SfField, models.FloatField):
"""FloatField for Salesforce.
It is Float in Python and the same as DecimalField in the database.
"""
pass
class BooleanField(SfField, models.BooleanField):
"""BooleanField with sf_read_only attribute for Salesforce.
Every BooleanField has a default value. It is False if default
value checkbox is unchecked or True if checked.
No NullBooleanField exist for Salesforce.
"""
def __init__(self, default: Union[bool, BaseDefault] = False, **kwargs: Any) -> None:
super().__init__(default=default, **kwargs)
class DateTimeField(SfField, models.DateTimeField):
"""DateTimeField with sf_read_only attribute for Salesforce."""
class DateField(SfField, models.DateField):
"""DateField with sf_read_only attribute for Salesforce."""
def from_db_value(self, value: Any, expression: Any, connection: DatabaseWrapper) -> Any:
# pylint:disable=unused-argument
return self.to_python(value)
class TimeField(SfField, models.TimeField):
"""TimeField with sf_read_only attribute for Salesforce."""
def from_db_value(self, value: Any, expression: Any, connection: DatabaseWrapper) -> Any:
# pylint:disable=unused-argument
if isinstance(value, str):
# value from salesforce is a string "HH:MM:ss.000Z", value from other db is datetime.time().
value = value.rstrip('Z')
return self.to_python(value)
if TYPE_CHECKING:
# static typing of a mixin requires an additional base, that is not necessary
# at runtime
_MixinTypingBase = models.ForeignObject
else:
_MixinTypingBase = object
class SfForeignObjectMixin(SfField, _MixinTypingBase):
def __init__(self, to: Union[Type[models.Model], str], on_delete: Callable[..., None], *args: Any, **kwargs: Any
) -> None:
# Checks parameters before call to ancestor.
if on_delete.__name__ not in ('PROTECT', 'DO_NOTHING'):
# The option CASCADE (currently fails) would be unsafe after a fix
# of on_delete because Cascade delete is not usually enabled in SF
# for safety reasons for most fields objects, namely for Owner,
# CreatedBy etc. Some related objects are deleted automatically
# by SF even with DO_NOTHING in Django, e.g. for
# Campaign/CampaignMember
warnings.warn(
"Only foreign keys with on_delete = PROTECT or "
"DO_NOTHING are currently supported, not %s related to %s"
% (on_delete, to))
super().__init__(to, on_delete, *args, **kwargs)
def get_attname(self) -> str:
if self.name.islower(): # pylint:disable=no-else-return
# the same as django.db.models.fields.related.ForeignKey.get_attname
return '%s_id' % self.name
else:
return '%sId' % self.name
def get_attname_column(self) -> Tuple[str, str]:
attname, column = super().get_attname_column()
if self.db_column is None and not self.sf_custom:
column += 'Id'
return attname, column
class ForeignKey(SfForeignObjectMixin, models.ForeignKey):
"""ForeignKey with sf_read_only attribute that is acceptable by Salesforce."""
def db_type(self, connection: Any) -> str:
if connection.vendor == 'salesforce':
return 'Lookup'
return super().db_type(connection)
class OneToOneField(SfForeignObjectMixin, models.OneToOneField):
"""OneToOneField with sf_read_only attribute that is acceptable by Salesforce."""
def db_type(self, connection: Any) -> str:
if connection.vendor == 'salesforce':
return 'Lookup'
return super().db_type(connection)
class XJSONField(TextField):
"""
Salesforce internal "complexvalue" field similar to JSON, used by SFDC for metadata,
this field should not be used for normal data or with other database backends.
"""
def get_internal_type(self) -> str:
return "TextField"
def get_prep_value(self, value: Any) -> Any:
return value
def to_python(self, value: Any) -> Any:
return value
AutoField = SalesforceAutoField
|
|
from .. import names
from ..ndtypes import BoolT, IntT
import type_mappings
from reserved_names import is_reserved
class BaseCompiler(object):
def __init__(self, extra_link_flags = None, extra_compile_flags = None):
self.blocks = []
self.name_versions = {}
self.name_mappings = {}
self.extra_link_flags = extra_link_flags if extra_link_flags else []
self.extra_compile_flags = extra_compile_flags if extra_compile_flags else []
def add_compile_flag(self, flag):
if flag not in self.extra_compile_flags:
self.extra_compile_flags.append(flag)
def add_link_flag(self, flag):
if flag not in self.extra_link_flags:
self.extra_link_flags.append(flag)
def visit_expr(self, expr):
expr_class_name = expr.__class__.__name__
method_name = "visit_" + expr_class_name
assert hasattr(self, method_name), "Unsupported expression %s" % expr_class_name
result = getattr(self, method_name)(expr)
assert result is not None, \
"Compilation method for expression %s returned None, expected code string" % expr_class_name
return result
def visit_expr_list(self, exprs):
return [self.visit_expr(e) for e in exprs]
def breakpoint(self):
self.append("raise(SIGINT);")
def visit_stmt(self, stmt):
stmt_class_name = stmt.__class__.__name__
method_name = "visit_" + stmt_class_name
assert hasattr(self, method_name), \
"Statement %s not supported by %s" % (stmt_class_name, self.__class__.__name__)
result = getattr(self, method_name)(stmt)
assert result is not None, "Compilation method for statement %s return None" % stmt_class_name
return result
def push(self):
self.blocks.append([])
def pop(self):
stmts = self.blocks.pop()
return " " + self.indent("\n".join(" " + stmt for stmt in stmts))
def indent(self, block_str):
return block_str.replace("\n", "\n ")
def append(self, stmt):
stripped = stmt.strip()
assert len(stripped) == 0 or \
";" in stripped or \
stripped.startswith("//") or \
stripped.startswith("/*"), "Invalid statement: %s" % stmt
self.blocks[-1].append(stmt)
def newline(self):
self.append("\n")
def comment(self, text):
self.append("// %s" % text)
def printf(self, fmt, *args):
result = 'printf("%s\\n"' % fmt
if len(args) > 0:
result = result + ", " + ", ".join(str(arg) for arg in args)
self.append( result + ");" )
def fresh_name(self, prefix):
prefix = names.original(prefix)
prefix = prefix.replace(".", "")
version = self.name_versions.get(prefix, 1)
self.name_versions[prefix] = version + 1
# not valid chars!
if not any(c.isalpha() for c in prefix):
prefix = self.fresh_name("temp" + prefix)
if version == 1 and not is_reserved(prefix):
return prefix
elif prefix[-1] != "_":
return "%s_%d" % (prefix, version)
else:
return prefix + str(version)
def to_ctype(self, t):
"""
Convert Parakeet type to string representing its C type.
The base class implementation only handles scalars,
support for Tuples, Slices, and Arrays is in the overload FlatFnCompiler.to_ctype
"""
return type_mappings.to_ctype(t)
def fresh_var(self, t, prefix = None, init = None):
if prefix is None:
prefix = "temp"
name = self.fresh_name(prefix)
if isinstance(t, str):
t_str = t
else:
t_str = self.to_ctype(t)
if init is None:
self.append("%s %s;" % (t_str, name))
else:
self.append("%s %s = %s;" % (t_str, name, init))
return name
def fresh_array_var(self, t, n, prefix = None):
if prefix is None:
prefix = "temp"
name = self.fresh_name(prefix)
if isinstance(t, str):
t_str = t
else:
t_str = self.to_ctype(t)
self.append("%s %s[%d];" % (t_str, name, n))
return name
def assign(self, name, rhs):
self.append("%s = %s;" % (name, rhs))
def name(self, ssa_name, overwrite = False):
"""
Convert from ssa names, which might have large version numbers and contain
syntactically invalid characters to valid local C names
"""
if ssa_name in self.name_mappings and not overwrite:
return self.name_mappings[ssa_name]
prefix = names.original(ssa_name)
prefix = prefix.replace(".", "")
name = self.fresh_name(prefix)
self.name_mappings[ssa_name] = name
return name
def return_if_null(self, obj):
self.append("if (!%s) { return NULL; }" % obj)
def not_(self, x):
if x == "1":
return "0"
elif x == "0":
return "1"
return "!%s" % x
def and_(self, x, y):
if x == "0" or y == "0":
return "0"
elif x == "1" and y == "1":
return "1"
elif x == "1":
return y
elif y == "1":
return x
return "%s && %s" % (x,y)
def or_(self, x, y):
if x == "1" or y == "1":
return "1"
elif x == "0":
return y
elif y == "0":
return x
return "%s || %s" % (x,y)
def gt(self, x, y, t):
if isinstance(t, (BoolT, IntT)) and x == y:
return "0"
return "%s > %s" % (x, y)
def gte(self, x, y, t):
if isinstance(t, (BoolT, IntT)) and x == y:
return "1"
return "%s >= %s" % (x,y)
def lt(self, x, y, t):
if isinstance(t, (BoolT, IntT)) and x == y:
return "0"
return "%s < %s" % (x,y)
def lte(self, x, y, t):
if isinstance(t, (BoolT, IntT)) and x == y:
return "1"
return "%s <= %s" % (x, y)
def neq(self, x, y, t):
if isinstance(t, (BoolT, IntT)) and x == y:
return "0"
return "%s != %s" % (x, y)
def eq(self, x, y, t):
if isinstance(t, (BoolT, IntT)) and x == y:
return "1"
return "%s == %s" % (x, y)
def add(self, x, y):
if x == "0":
return y
elif y == "0":
return x
return "%s + %s" % (x,y)
def sub(self, x, y):
if x == "0":
return "-(%s)" % y
elif y == "0":
return x
return "%s - %s" % (x,y)
def mul(self, x, y):
if x == "1":
return y
elif y == "1":
return x
elif x == 0 or y == 0:
return "0"
return "%s * %s" % (x,y)
def div(self, x, y):
if x == y:
return "1"
elif x == "0":
return "0"
elif y == "1":
return x
else:
return "%s / %s" % (x,y)
|
|
import warnings
import numpy as np
from .core import CallbackBase, CollectThenCompute
class LiveFit(CallbackBase):
"""
Fit a model to data using nonlinear least-squares minimization.
Parameters
----------
model : lmfit.Model
y : string
name of the field in the Event document that is the dependent variable
independent_vars : dict
map the independent variable name(s) in the model to the field(s)
in the Event document; e.g., ``{'x': 'motor'}``
init_guess : dict, optional
initial guesses for other values, if expected by model;
e.g., ``{'sigma': 1}``
update_every : int or None, optional
How often to recompute the fit. If `None`, do not compute until the
end. Default is 1 (recompute after each new point).
Attributes
----------
result : lmfit.ModelResult
"""
def __init__(self, model, y, independent_vars, init_guess=None, *,
update_every=1):
self.ydata = []
self.independent_vars_data = {}
self.__stale = False
self.result = None
self._model = model
self.y = y
self.independent_vars = independent_vars
if init_guess is None:
init_guess = {}
self.init_guess = init_guess
self.update_every = update_every
@property
def model(self):
# Make this a property so it can't be updated.
return self._model
@property
def independent_vars(self):
return self._independent_vars
@independent_vars.setter
def independent_vars(self, val):
if set(val) != set(self.model.independent_vars):
raise ValueError("keys {} must match the independent variables in "
"the model "
"{}".format(set(val),
set(self.model.independent_vars)))
self._independent_vars = val
self.independent_vars_data.clear()
self.independent_vars_data.update({k: [] for k in val})
self._reset()
def _reset(self):
self.result = None
self.__stale = False
self.ydata.clear()
for v in self.independent_vars_data.values():
v.clear()
def start(self, doc):
self._reset()
super().start(doc)
def event(self, doc):
if self.y not in doc['data']:
return
y = doc['data'][self.y]
idv = {k: doc['data'][v] for k, v in self.independent_vars.items()}
# Always stash the data for the next time the fit is updated.
self.update_caches(y, idv)
self.__stale = True
# Maybe update the fit or maybe wait.
if self.update_every is not None:
i = len(self.ydata)
N = len(self.model.param_names)
if i < N:
# not enough points to fit yet
pass
elif (i == N) or ((i - 1) % self.update_every == 0):
self.update_fit()
super().event(doc)
def stop(self, doc):
# Update the fit if it was not updated by the last event.
if self.__stale:
self.update_fit()
super().stop(doc)
def update_caches(self, y, independent_vars):
self.ydata.append(y)
for k, v in self.independent_vars_data.items():
v.append(independent_vars[k])
def update_fit(self):
N = len(self.model.param_names)
if len(self.ydata) < N:
warnings.warn("LiveFitPlot cannot update fit until there are at least {} "
"data points".format(N))
else:
kwargs = {}
kwargs.update(self.independent_vars_data)
kwargs.update(self.init_guess)
self.result = self.model.fit(self.ydata, **kwargs)
self.__stale = False
# This function is vendored from scipy v0.16.1 to avoid adding a scipy
# dependency just for one Python function
def center_of_mass(input, labels=None, index=None):
"""
Calculate the center of mass of the values of an array at labels.
Parameters
----------
input : ndarray
Data from which to calculate center-of-mass.
labels : ndarray, optional
Labels for objects in `input`, as generated by `ndimage.label`.
Only used with `index`. Dimensions must be the same as `input`.
index : int or sequence of ints, optional
Labels for which to calculate centers-of-mass. If not specified,
all labels greater than zero are used. Only used with `labels`.
Returns
-------
center_of_mass : tuple, or list of tuples
Coordinates of centers-of-mass.
Examples
--------
>>> a = np.array(([0,0,0,0],
[0,1,1,0],
[0,1,1,0],
[0,1,1,0]))
>>> from scipy import ndimage
>>> ndimage.measurements.center_of_mass(a)
(2.0, 1.5)
Calculation of multiple objects in an image
>>> b = np.array(([0,1,1,0],
[0,1,0,0],
[0,0,0,0],
[0,0,1,1],
[0,0,1,1]))
>>> lbl = ndimage.label(b)[0]
>>> ndimage.measurements.center_of_mass(b, lbl, [1,2])
[(0.33333333333333331, 1.3333333333333333), (3.5, 2.5)]
"""
normalizer = np.sum(input, labels, index)
grids = np.ogrid[[slice(0, i) for i in input.shape]]
results = [
np.sum(input * grids[dir].astype(float), labels, index) / normalizer
for dir in range(input.ndim)]
if np.isscalar(results[0]):
return tuple(results)
return [tuple(v) for v in np.array(results).T]
class PeakStats(CollectThenCompute):
"""
Compute peak statsitics after a run finishes.
Results are stored in the attributes.
Parameters
----------
x : string
field name for the x variable (e.g., a motor)
y : string
field name for the y variable (e.g., a detector)
edge_count : int or None, optional
If not None, number of points at beginning and end to use
for quick and dirty background subtraction.
Notes
-----
It is assumed that the two fields, x and y, are recorded in the same
Event stream.
Attributes
----------
com : center of mass
cen : mid-point between half-max points on each side of the peak
max : x location of y maximum
min : x location of y minimum
crossings : crosses between y and middle line, which is
((np.max(y) + np.min(y)) / 2). Users can estimate FWHM based
on those info.
fwhm : the computed full width half maximum (fwhm) of a peak.
The distance between the first and last crossing is taken to
be the fwhm.
"""
def __init__(self, x, y, edge_count=None):
self.x = x
self.y = y
self.com = None
self.cen = None
self.max = None
self.min = None
self.crossings = None
self.fwhm = None
self.lin_bkg = None
self._edge_count = edge_count
super().__init__()
def __getitem__(self, key):
if key in ['com', 'cen', 'max', 'min']:
return getattr(self, key)
else:
raise KeyError
def compute(self):
"This method is called at run-stop time by the superclass."
# clear all results
self.com = None
self.cen = None
self.max = None
self.min = None
self.crossings = None
self.fwhm = None
self.lin_bkg = None
x = []
y = []
for event in self._events:
try:
_x = event['data'][self.x]
_y = event['data'][self.y]
except KeyError:
pass
else:
x.append(_x)
y.append(_y)
x = np.array(x)
y = np.array(y)
if not len(x):
# nothing to do
return
self.x_data = x
self.y_data = y
if self._edge_count is not None:
left_x = np.mean(x[:self._edge_count])
left_y = np.mean(y[:self._edge_count])
right_x = np.mean(x[-self._edge_count:])
right_y = np.mean(y[-self._edge_count:])
m = (right_y - left_y) / (right_x - left_x)
b = left_y - m * left_x
# don't do this in place to not mess with self.y_data
y = y - (m * x + b)
self.lin_bkg = {'m': m, 'b': b}
# Compute x value at min and max of y
self.max = x[np.argmax(y)], self.y_data[np.argmax(y)],
self.min = x[np.argmin(y)], self.y_data[np.argmin(y)],
self.com, = np.interp(center_of_mass(y), np.arange(len(x)), x)
mid = (np.max(y) + np.min(y)) / 2
crossings = np.where(np.diff((y > mid).astype(np.int)))[0]
_cen_list = []
for cr in crossings.ravel():
_x = x[cr:cr+2]
_y = y[cr:cr+2] - mid
dx = np.diff(_x)[0]
dy = np.diff(_y)[0]
m = dy / dx
_cen_list.append((-_y[0] / m) + _x[0])
if _cen_list:
self.cen = np.mean(_cen_list)
self.crossings = np.array(_cen_list)
if len(_cen_list) >= 2:
self.fwhm = np.abs(self.crossings[-1] - self.crossings[0],
dtype=float)
# reset y data
y = self.y_data
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_resource_group_request(
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OpenEnergyPlatform/energyServices')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_subscription_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.OpenEnergyPlatform/energyServices')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OpenEnergyPlatform/energyServices/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OpenEnergyPlatform/energyServices/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OpenEnergyPlatform/energyServices/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OpenEnergyPlatform/energyServices/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
class EnergyServicesOperations(object):
"""EnergyServicesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~open_energy_platform_management_service_apis.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.EnergyServiceList"]:
"""Returns list of oep resources..
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnergyServiceList or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~open_energy_platform_management_service_apis.models.EnergyServiceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EnergyServiceList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("EnergyServiceList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OpenEnergyPlatform/energyServices'} # type: ignore
@distributed_trace
def list_by_subscription(
self,
**kwargs: Any
) -> Iterable["_models.EnergyServiceList"]:
"""Lists a collection of oep resources under the given Azure Subscription ID.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnergyServiceList or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~open_energy_platform_management_service_apis.models.EnergyServiceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EnergyServiceList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("EnergyServiceList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.OpenEnergyPlatform/energyServices'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.EnergyService":
"""Returns oep resource for a given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The resource name.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EnergyService, or the result of cls(response)
:rtype: ~open_energy_platform_management_service_apis.models.EnergyService
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EnergyService"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('EnergyService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OpenEnergyPlatform/energyServices/{resourceName}'} # type: ignore
def _create_initial(
self,
resource_group_name: str,
resource_name: str,
body: Optional["_models.EnergyService"] = None,
**kwargs: Any
) -> "_models.EnergyService":
cls = kwargs.pop('cls', None) # type: ClsType["_models.EnergyService"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'EnergyService')
else:
_json = None
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('EnergyService', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('EnergyService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OpenEnergyPlatform/energyServices/{resourceName}'} # type: ignore
@distributed_trace
def begin_create(
self,
resource_group_name: str,
resource_name: str,
body: Optional["_models.EnergyService"] = None,
**kwargs: Any
) -> LROPoller["_models.EnergyService"]:
"""Method that gets called if subscribed for ResourceCreationBegin trigger.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The resource name.
:type resource_name: str
:param body: Request body.
:type body: ~open_energy_platform_management_service_apis.models.EnergyService
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either EnergyService or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~open_energy_platform_management_service_apis.models.EnergyService]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EnergyService"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
body=body,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('EnergyService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OpenEnergyPlatform/energyServices/{resourceName}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
resource_name: str,
body: Optional["_models.EnergyResourceUpdate"] = None,
**kwargs: Any
) -> "_models.EnergyService":
"""update.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The resource name.
:type resource_name: str
:param body:
:type body: ~open_energy_platform_management_service_apis.models.EnergyResourceUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EnergyService, or the result of cls(response)
:rtype: ~open_energy_platform_management_service_apis.models.EnergyService
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EnergyService"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'EnergyResourceUpdate')
else:
_json = None
request = build_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('EnergyService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OpenEnergyPlatform/energyServices/{resourceName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OpenEnergyPlatform/energyServices/{resourceName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes oep resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The resource name.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OpenEnergyPlatform/energyServices/{resourceName}'} # type: ignore
|
|
#!/usr/bin/env python3
import smbus, os, struct, time
class FuelGauge:
def __init__(self, address=0x64, bus=None, shunt=0.002):
self.address = address
self.charge = None
self.voltage = None
self.current = None
self.temperature = None
self.timestamp = None
self.shunt = shunt
self.prescalar = 4096
self.prescalarLookup = {1:0x00,
4:0b00001000,
16:0b00010000,
64:0b00011000,
256:0b00100000,
1024:0b00101000,
4096:0b00111000}
self.registerLookup = {"status" : 0x00,
"control" : 0x01,
"charge": 0x02,
"voltage": 0x08,
"current": 0x0E,
"temperature": 0x14}
self.alccModes = {"None": 0b00000000,
"Alarm": 0b00000100,
"Reset": 0b00000010}
if bus is None:
for b in range(0,3):
path = '/dev/i2c-' + str(b)
try:
s = os.stat(path)
bus = b
break
except:
pass
if bus is None:
bus = 1
self.bus = bus
try:
self.i2c = smbus.SMBus(self.bus)
except:
raise IOError("Cant open i2c bus")
return
def initSensor(self, prescalar, alccMode="None"):
if prescalar not in self.prescalarLookup:
raise ValueError("Prescalar value not valid")
if alccMode not in self.alccModes:
raise ValueError("ALCC Mode not valid")
self.prescalar = prescalar
controlByte = 0b11000000
controlByte |= self.prescalarLookup[self.prescalar]
controlByte |= self.alccModes[alccMode]
try:
self.i2c.write_byte_data(self.address, self.registerLookup["control"], controlByte)
except:
raise IOError("Could not write control data to device at %s" % self.address)
def setLimit(self, limitName, upperLimit, lowerLimit):
if limitName not in self.registerLookup:
raise ValueError("Limit name not valid")
upperData = None
lowerData = None
if limitName == "charge":
upperData = struct.pack('>H', int(upperLimit * 4096/self.prescalar * self.shunt/0.05 / 0.34) + 0x7FFF)
lowerData = struct.pack('>H', int(lowerLimit * 4096/self.prescalar * self.shunt/0.05 / 0.34) + 0x7FFF)
elif limitName == "voltage":
upperData = struct.pack('>H', int(upperLimit/23.6 * 65535))
lowerData = struct.pack('>H', int(lowerLimit/23.6 * 65535))
elif limitName == "current":
upperData = struct.pack('>H', int(self.shunt/0.06 * 32767 * upperLimit)+0x7FFF)
lowerData = struct.pack('>H', int(self.shunt/0.06 * 32767 * lowerLimit)+0x7FFF)
elif limitName == "temperature":
upperData = struct.pack('>H', int((upperLimit+273.15)/510*0xFFFF))
lowerData = struct.pack('>H', int((lowerLimit+273.15)/510*0xFFFF))
else:
return
try:
self.i2c.write_i2c_block_data(self.address, self.registerLookup[limitName] + 2, list(bytearray(upperData)))
self.i2c.write_i2c_block_data(self.address, self.registerLookup[limitName] + 4, list(bytearray(lowerData)))
except:
raise IOError("Could not write limit data to device at %s" % self.address)
def resetCharge(self):
data = [0x7F, 0xFF]
try:
self.i2c.write_i2c_block_data(self.address, self.registerLookup["chargeMSB"], data)
except:
raise IOError("Could not write charge data to device at %s" % self.address)
def resetAlarms(self):
try:
self.i2c.read_byte(0x0c)
except:
pass
def checkAlarms(self):
try:
if self.i2c.read_byte_data(self.address, self.registerLookup["status"]) != 0x00:
return True
else:
return False
except:
raise IOError("Could not read alarm data from device at %s" % self.address)
def read(self):
try:
chargeBuf = bytearray(self.i2c.read_i2c_block_data(self.address, self.registerLookup["charge"], 2))
voltageBuf = bytearray(self.i2c.read_i2c_block_data(self.address, self.registerLookup["voltage"], 2))
currentBuf = bytearray(self.i2c.read_i2c_block_data(self.address, self.registerLookup["current"], 2))
temperatureBuf = bytearray(self.i2c.read_i2c_block_data(self.address, self.registerLookup["temperature"], 2))
self.charge = float(struct.unpack('>H', chargeBuf)[0]-0x7FFF) * 0.34 * 0.05/self.shunt * self.prescalar / 4096
self.voltage = 23.6 * float(struct.unpack('>H', voltageBuf)[0])/65535.0
self.current = 0.06/self.shunt * float(struct.unpack('>H', currentBuf)[0]-0x7FFF)/32767.0
self.temperature = 510 * float(struct.unpack('>H', temperatureBuf)[0])/0xFFFF
self.temperature = self.temperature - 273.15
self.timestamp = time.time()
except:
raise IOError("Could not read data from device at %s" % self.address)
def populateBatteryMessage(msg, fg):
msg.voltage = fg.voltage
msg.current = fg.current
msg.charge = fg.charge/1000
if msg.current < 0:
msg.power_supply_status = msg.POWER_SUPPLY_STATUS_DISCHARGING
elif msg.current > 0:
msg.power_supply_status = msg.POWER_SUPPLY_STATUS_CHARGING
if fg.temperature > 60:
msg.power_supply_health = msg.POWER_SUPPLY_HEALTH_OVERHEAT
elif fg.temperature < 0:
msg.power_supply_health = msg.POWER_SUPPLY_HEALTH_COLD
else:
msg.power_supply_health = msg.POWER_SUPPLY_HEALTH_GOOD
if fg.checkAlarms():
fg.resetAlarms()
msg.power_supply_health = msg.POWER_SUPPLY_HEALTH_UNSPEC_FAILURE
msg.percentage = 1+(msg.charge/msg.design_capacity)
msg.header.stamp = rospy.Time.from_sec(fg.timestamp)
msg.present = True
return msg
if __name__ == "__main__":
import rospy
from sensor_msgs.msg import BatteryState
runFG1 = True
runFG2 = True
rospy.init_node('BatteryMonitor')
try:
fg1 = FuelGauge(address=0x64, bus=1)
fg1.initSensor(256, "Alarm")
fg1.setLimit("current", 25, -12)
fg1.setLimit("voltage", 17, 12)
fg1.setLimit("temperature", 50, 0)
rospy.loginfo("Fuel Gauge 1 initialized")
except:
runFG1 = False
rospy.logerr("Fuel Gauge 1 missing")
try:
fg2 = FuelGauge(address=0x65, bus=1)
fg2.initSensor(256, "Alarm")
fg2.setLimit("current", 25, -12)
fg2.setLimit("voltage", 17, 12)
fg2.setLimit("temperature", 50, 0)
rospy.loginfo("Fuel Gauge 2 initialized")
except:
runFG2 = False
rospy.logerr("Fuel Gauge 2 missing")
r = rospy.Rate(4)
bat1Pub = rospy.Publisher("/battery1Status", BatteryState, queue_size = 10)
bat2Pub = rospy.Publisher("/battery2Status", BatteryState, queue_size = 10)
msg = BatteryState()
msg.design_capacity = 12.6
msg.power_supply_technology = msg.POWER_SUPPLY_TECHNOLOGY_LIPO
while not rospy.is_shutdown():
if runFG1:
fg1.read()
msg = populateBatteryMessage(msg, fg1)
else:
msg.power_supply_status = msg.POWER_SUPPLY_STATUS_UNKNOWN
msg.current = 0.0
msg.voltage = 0.0
msg.present = False
msg.charge = 0.0
msg.capacity = 0.0
msg.header.frame_id = "battery1"
bat1Pub.publish(msg)
if runFG2:
fg2.read()
msg = populateBatteryMessage(msg, fg2)
else:
msg.power_supply_status = msg.POWER_SUPPLY_STATUS_UNKNOWN
msg.current = 0.0
msg.voltage = 0.0
msg.present = False
msg.charge = 0.0
msg.capacity = 0.0
msg.header.frame_id = "battery2"
bat2Pub.publish(msg)
r.sleep()
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import ldappool
from oslo_config import cfg
from keystone.common.ldap import core as ldap_core
from keystone.identity.backends import ldap
from keystone.tests import unit as tests
from keystone.tests.unit import fakeldap
from keystone.tests.unit import test_backend_ldap_pool
from keystone.tests.unit import test_ldap_livetest
CONF = cfg.CONF
class LiveLDAPPoolIdentity(test_backend_ldap_pool.LdapPoolCommonTestMixin,
test_ldap_livetest.LiveLDAPIdentity):
"""Executes existing LDAP live test with pooled LDAP handler to make
sure it works without any error.
Also executes common pool specific tests via Mixin class.
"""
def setUp(self):
super(LiveLDAPPoolIdentity, self).setUp()
self.addCleanup(self.cleanup_pools)
# storing to local variable to avoid long references
self.conn_pools = ldap_core.PooledLDAPHandler.connection_pools
def config_files(self):
config_files = super(LiveLDAPPoolIdentity, self).config_files()
config_files.append(tests.dirs.
tests_conf('backend_pool_liveldap.conf'))
return config_files
def test_assert_connector_used_not_fake_ldap_pool(self):
handler = ldap_core._get_connection(CONF.ldap.url, use_pool=True)
self.assertNotEqual(type(handler.Connector),
type(fakeldap.FakeLdapPool))
self.assertEqual(type(ldappool.StateConnector),
type(handler.Connector))
def test_async_search_and_result3(self):
self.config_fixture.config(group='ldap', page_size=1)
self.test_user_enable_attribute_mask()
def test_pool_size_expands_correctly(self):
who = CONF.ldap.user
cred = CONF.ldap.password
# get related connection manager instance
ldappool_cm = self.conn_pools[CONF.ldap.url]
def _get_conn():
return ldappool_cm.connection(who, cred)
with _get_conn() as c1: # 1
self.assertEqual(1, len(ldappool_cm))
self.assertTrue(c1.connected, True)
self.assertTrue(c1.active, True)
with _get_conn() as c2: # conn2
self.assertEqual(2, len(ldappool_cm))
self.assertTrue(c2.connected)
self.assertTrue(c2.active)
self.assertEqual(2, len(ldappool_cm))
# c2 went out of context, its connected but not active
self.assertTrue(c2.connected)
self.assertFalse(c2.active)
with _get_conn() as c3: # conn3
self.assertEqual(2, len(ldappool_cm))
self.assertTrue(c3.connected)
self.assertTrue(c3.active)
self.assertTrue(c3 is c2) # same connection is reused
self.assertTrue(c2.active)
with _get_conn() as c4: # conn4
self.assertEqual(3, len(ldappool_cm))
self.assertTrue(c4.connected)
self.assertTrue(c4.active)
def test_password_change_with_auth_pool_disabled(self):
self.config_fixture.config(group='ldap', use_auth_pool=False)
old_password = self.user_sna['password']
self.test_password_change_with_pool()
self.assertRaises(AssertionError,
self.identity_api.authenticate,
context={},
user_id=self.user_sna['id'],
password=old_password)
def _create_user_and_authenticate(self, password):
user_dict = {
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex,
'password': password}
user = self.identity_api.create_user(user_dict)
self.identity_api.authenticate(
context={},
user_id=user['id'],
password=password)
return self.identity_api.get_user(user['id'])
def _get_auth_conn_pool_cm(self):
pool_url = ldap_core.PooledLDAPHandler.auth_pool_prefix + CONF.ldap.url
return self.conn_pools[pool_url]
def _do_password_change_for_one_user(self, password, new_password):
self.config_fixture.config(group='ldap', use_auth_pool=True)
self.cleanup_pools()
self.load_backends()
user1 = self._create_user_and_authenticate(password)
auth_cm = self._get_auth_conn_pool_cm()
self.assertEqual(1, len(auth_cm))
user2 = self._create_user_and_authenticate(password)
self.assertEqual(1, len(auth_cm))
user3 = self._create_user_and_authenticate(password)
self.assertEqual(1, len(auth_cm))
user4 = self._create_user_and_authenticate(password)
self.assertEqual(1, len(auth_cm))
user5 = self._create_user_and_authenticate(password)
self.assertEqual(1, len(auth_cm))
# connection pool size remains 1 even for different user ldap bind
# as there is only one active connection at a time
user_api = ldap.UserApi(CONF)
u1_dn = user_api._id_to_dn_string(user1['id'])
u2_dn = user_api._id_to_dn_string(user2['id'])
u3_dn = user_api._id_to_dn_string(user3['id'])
u4_dn = user_api._id_to_dn_string(user4['id'])
u5_dn = user_api._id_to_dn_string(user5['id'])
# now create multiple active connections for end user auth case which
# will force to keep them in pool. After that, modify one of user
# password. Need to make sure that user connection is in middle
# of pool list.
auth_cm = self._get_auth_conn_pool_cm()
with auth_cm.connection(u1_dn, password) as _:
with auth_cm.connection(u2_dn, password) as _:
with auth_cm.connection(u3_dn, password) as _:
with auth_cm.connection(u4_dn, password) as _:
with auth_cm.connection(u5_dn, password) as _:
self.assertEqual(5, len(auth_cm))
_.unbind_s()
user3['password'] = new_password
self.identity_api.update_user(user3['id'], user3)
return user3
def test_password_change_with_auth_pool_enabled_long_lifetime(self):
self.config_fixture.config(group='ldap',
auth_pool_connection_lifetime=600)
old_password = 'my_password'
new_password = 'new_password'
user = self._do_password_change_for_one_user(old_password,
new_password)
user.pop('password')
# with long connection lifetime auth_pool can bind to old password
# successfully which is not desired if password change is frequent
# use case in a deployment.
# This can happen in multiple concurrent connections case only.
user_ref = self.identity_api.authenticate(
context={}, user_id=user['id'], password=old_password)
self.assertDictEqual(user_ref, user)
def test_password_change_with_auth_pool_enabled_no_lifetime(self):
self.config_fixture.config(group='ldap',
auth_pool_connection_lifetime=0)
old_password = 'my_password'
new_password = 'new_password'
user = self._do_password_change_for_one_user(old_password,
new_password)
# now as connection lifetime is zero, so authentication
# with old password will always fail.
self.assertRaises(AssertionError,
self.identity_api.authenticate,
context={}, user_id=user['id'],
password=old_password)
|
|
from numpy import ones, resize, linspace, atleast_3d
from traits.api import Property, Str, Button, Trait, \
Any, Instance, HasStrictTraits, false, Dict, HasTraits, \
CArray, Bool
from traitsui.api import EnumEditor, View, Item, HGroup, \
VGroup, spring, Group, TextEditor, HTMLEditor, InstanceEditor, \
TabularEditor, TitleEditor, Label, ArrayEditor, ImageEditor
from traitsui.tabular_adapter import TabularAdapter
from traitsui.image.image import ImageLibrary
from pyface.api import ImageResource
from .data_source_factory import DataSourceFactory
from .preview_window import PreviewWindow
from mayavi.modules.api import Surface, Glyph
from mayavi.filters.api import ExtractEdges
############################################################################
# The DataSourceWizard class
############################################################################
class DataSourceWizard(HasTraits):
data_sources = Dict
_data_sources_names = Property(depends_on='data_sources')
def _get__data_sources_names(self):
names = []
for name in self.data_sources:
try:
self.data_sources[name] + 1
names.append(name)
except TypeError:
pass
names.sort()
return names
# Dictionnary mapping the views
data_type = Trait('point',
{'A surface':
'surface',
'A set of points, that can be connected by lines':
'point',
'A set of vectors':
'vector',
'Volumetric data':
'volumetric',
})
position_type = Trait('image data',
{'Specified explicitly':
'explicit',
'Implicitely positioned on a regular grid':
'image data',
'On an orthogonal grid with varying spacing':
'orthogonal grid',
})
# The array that is used for finding out the shape of the grid,
# when creating an ImageData
grid_shape_source = Property(depends_on='grid_shape_source_')
def _get_grid_shape_source(self):
if self.grid_shape_source_ == '':
# catter for improperly initialized view
keys = self._data_sources_names
if not self.grid_shape.any():
self.grid_shape = \
self.data_sources[keys[0]].shape
return keys[0]
elif self.grid_shape_source_[:16] == 'Shape of array: ':
return self.grid_shape_source_[17:-1]
else:
return ""
# Shadow traits for grid_shape_source
grid_shape_source_ = Str
def _grid_shape_source_changed(self):
if not self.grid_shape_source == '':
array_shape = \
atleast_3d(self.data_sources[self.grid_shape_source]).shape
grid_shape = ones((3, ))
grid_shape[:len(array_shape)] = array_shape
self.grid_shape = grid_shape
_grid_shape_source_labels = Property(depends_on='_data_sources_names')
def _get__grid_shape_source_labels(self):
values = ['Shape of array: "%s"' % name
for name in self._data_sources_names]
values.sort
values.append('Specified explicitly')
return values
# The shape of the grid array. Used when position is implicit
grid_shape = CArray(shape=(3,), dtype='i')
# Whether or not the data points should be connected.
lines = false
# The scalar data selection
scalar_data = Str('', help="Select the array that gives the value of the "
"scalars plotted.")
position_x = Str(help="Select the array that gives the x "
"position of the data points")
position_y = Str(help="Select the array that gives the y "
"position of the data points")
position_z = Str(help="Select the array that gives the z "
"position of the data points")
connectivity_triangles = Str
has_vector_data = false(help="""Do you want to plot vector components?""")
# A boolean to ask the user if he wants to load scalar data
has_scalar_data = false
vector_u = Str
vector_v = Str
vector_w = Str
#----------------------------------------------------------------------
# Public interface
#----------------------------------------------------------------------
def init_arrays(self):
# Force all the array names to be properly initialized
array_names = set(self.data_sources.keys())
if len(array_names) == 0:
# We should probably bail out here.
return False
for attr in ('position_x', 'position_y', 'position_z',
'scalar_data', 'vector_u', 'vector_v',
'vector_w', 'connectivity_triangles',
):
if len(array_names) > 0:
array_name = array_names.pop()
setattr(self, attr, array_name)
def guess_arrays(self):
""" Do some guess work on the arrays to find sensible default.
"""
array_names = set(self._data_sources_names)
found_some = False
if set(('x', 'y', 'z')).issubset(array_names):
self.position_x = 'x'
self.position_y = 'y'
self.position_z = 'z'
array_names = array_names.difference(('x', 'y', 'z'))
found_some = True
elif set(('X', 'Y', 'Z')).issubset(array_names):
self.position_x = 'X'
self.position_y = 'Y'
self.position_z = 'Z'
array_names = array_names.difference(('X', 'Y', 'Z'))
found_some = True
if set(('u', 'v', 'w')).issubset(array_names):
self.vector_u = 'u'
self.vector_v = 'v'
self.vector_w = 'w'
array_names = array_names.difference(('u', 'v', 'w'))
found_some = True
elif set(('U', 'V', 'W')).issubset(array_names):
self.vector_u = 'U'
self.vector_v = 'V'
self.vector_w = 'W'
array_names = array_names.difference(('U', 'V', 'W'))
found_some = True
if found_some:
# Need to re-attribute the guessed names.
for attr in ('scalar_data', 'vector_u', 'vector_v',
'vector_w', 'connectivity_triangles'):
if len(array_names) > 0:
setattr(self, attr, array_names.pop())
else:
break
def build_data_source(self):
""" This is where we apply the selections made by the user in
in the wizard to build the data source.
"""
factory = DataSourceFactory()
# Keep a reference to the factory to be able to replay it, say
# on other data.
self._factory = factory
if self.data_type_ == 'point':
# The user wants to explicitly position vector,
# thus only sensible data structures for points is with
# explicit positioning.
self.position_type_ == 'explicit'
# In addition, this view does not allow for
# connectivity.
factory.unstructured = True
factory.connected = False
else:
factory.connected = True
if (self.position_type_ == "image data"
and not self.data_type_ == "point"):
if not self.has_scalar_data and not self.vector_u == '':
# With image data we need a scalar array always:
factory.scalar_data = ones(self.grid_shape)
factory.position_implicit = True
else:
factory.position_x = self.get_sdata(self.position_x)
factory.position_y = self.get_sdata(self.position_y)
factory.position_z = self.get_sdata(self.position_z)
if self.position_type_ == "orthogonal grid":
factory.orthogonal_grid = True
if self.position_type_ == "explicit" and self.data_type_ == "surface":
factory.connectivity_triangles = self.get_data(
self.connectivity_triangles)
if self.lines and self.data_type_ == "point":
factory.lines = True
if self.has_vector_data or self.data_type_ == 'vector':
# In the vector view, the user is not explicitly asked to
# Enable vectors.
factory.has_vector_data = True
factory.vector_u = self.get_sdata(self.vector_u)
factory.vector_v = self.get_sdata(self.vector_v)
factory.vector_w = self.get_sdata(self.vector_w)
if self.has_scalar_data or self.data_type_ == 'volumetric':
# In the volumetric view, the user is not explicitly asked to
# Enable scalars.
factory.scalar_data = self.get_sdata(self.scalar_data)
if self.connectivity_triangles == '':
factory.connectivity_triangles = None
self.data_source = factory.build_data_source()
if self.has_scalar_data:
if hasattr(self.data_source, 'scalar_name'):
self.data_source.scalar_name = self.scalar_data
elif hasattr(self.data_source, 'point_scalar_name'):
self.data_source.point_scalar_name = self.scalars
#----------------------------------------------------------------------
# Private interface
#----------------------------------------------------------------------
def get_data(self, name):
return self.data_sources[name]
def get_sdata(self, name):
ary = self.data_sources[name]
if not self.data_type_ == 'point':
ary = resize(ary, self.grid_shape)
return ary
def active_arrays(self):
""" Return the list of the active array-selection drop-downs.
"""
arrays = []
if self.data_type_ == 'point' or self.position_type_ == 'explicit':
arrays.extend(
['position_x', 'position_y', 'position_z', ])
if self.data_type_ == 'vector' or self.has_vector_data:
arrays.extend(['vector_u', 'vector_v', 'vector_w'])
if self.has_scalar_data or self.data_type_ == 'volumetric':
arrays.extend(['scalar_data'])
return arrays
def check_arrays(self):
""" Checks that all the array have the right size.
"""
arrays_to_check = self.active_arrays()
if len(arrays_to_check) == 0:
return True
size = self.get_data(getattr(self, arrays_to_check.pop())).size
for attr in arrays_to_check:
if not self.get_data(getattr(self, attr)).size == size:
return False
if (self.data_type_ == 'surface'
and self.position_type_ == "explicit"):
if not self.connectivity_triangles.size / 3 == size:
return False
return True
###########################################################################
# class ArrayColumnWrapper
###########################################################################
class ArrayColumnWrapper(HasStrictTraits):
name = Str
shape = Str
############################################################################
# class ArrayColumnAdapter
############################################################################
class ArrayColumnAdapter(TabularAdapter):
columns = [('name', 'name'),
('shape', 'shape'), ]
width = 100
############################################################################
# The DataSourceWizardView class
############################################################################
class DataSourceWizardView(DataSourceWizard):
#----------------------------------------------------------------------
# Private traits
#----------------------------------------------------------------------
_top_label = Str('Describe your data')
_info_text = Str('Array size do not match')
_array_label = Str('Available arrays')
_data_type_text = Str("What does your data represents?")
_lines_text = Str("Connect the points with lines")
_scalar_data_text = Str("Array giving the value of the scalars")
_optional_scalar_data_text = Str("Associate scalars with the data points")
_connectivity_text = Str("Array giving the triangles")
_vector_data_text = Str("Associate vector components")
_position_text = Property(depends_on="position_type_")
_position_text_dict = {'explicit':
'Coordinnates of the data points:',
'orthogonal grid':
'Position of the layers along each axis:',
}
def _get__position_text(self):
return self._position_text_dict.get(self.position_type_, "")
_shown_help_text = Str
_data_sources_wrappers = Property(depends_on='data_sources')
def _get__data_sources_wrappers(self):
return [
ArrayColumnWrapper(name=name,
shape=repr(self.data_sources[name].shape))
for name in self._data_sources_names
]
# A traits pointing to the object, to play well with traitsUI
_self = Instance(DataSourceWizard)
_suitable_traits_view = Property(depends_on="data_type_")
def _get__suitable_traits_view(self):
return "_%s_data_view" % self.data_type_
ui = Any(False)
_preview_button = Button(label='Preview structure')
def __preview_button_fired(self):
if self.ui:
self.build_data_source()
self.preview()
_ok_button = Button(label='OK')
def __ok_button_fired(self):
if self.ui:
self.ui.dispose()
self.build_data_source()
_cancel_button = Button(label='Cancel')
def __cancel_button_fired(self):
if self.ui:
self.ui.dispose()
_is_ok = Bool
_is_not_ok = Bool
def _anytrait_changed(self):
""" Validates if the OK button is enabled.
"""
if self.ui:
self._is_ok = self.check_arrays()
self._is_not_ok = not self._is_ok
_preview_window = Instance(PreviewWindow, ())
_info_image = Instance(ImageResource,
ImageLibrary.image_resource('@std:alert16',))
#----------------------------------------------------------------------
# TraitsUI views
#----------------------------------------------------------------------
_coordinates_group = \
HGroup(
Item('position_x', label='x',
editor=EnumEditor(name='_data_sources_names',
invalid='_is_not_ok')),
Item('position_y', label='y',
editor=EnumEditor(name='_data_sources_names',
invalid='_is_not_ok')),
Item('position_z', label='z',
editor=EnumEditor(name='_data_sources_names',
invalid='_is_not_ok')),
)
_position_group = \
Group(
Item('position_type'),
Group(
Item('_position_text', style='readonly',
resizable=False,
show_label=False),
_coordinates_group,
visible_when='not position_type_=="image data"',
),
Group(
Item('grid_shape_source_',
label='Grid shape',
editor=EnumEditor(
name='_grid_shape_source_labels',
invalid='_is_not_ok')),
HGroup(
spring,
Item('grid_shape', style='custom',
editor=ArrayEditor(width=-60),
show_label=False),
enabled_when='grid_shape_source==""',
),
visible_when='position_type_=="image data"',
),
label='Position of the data points',
show_border=True,
show_labels=False,
),
_connectivity_group = \
Group(
HGroup(
Item('_connectivity_text', style='readonly',
resizable=False),
spring,
Item('connectivity_triangles',
editor=EnumEditor(name='_data_sources_names'),
show_label=False,
),
show_labels=False,
),
label='Connectivity information',
show_border=True,
show_labels=False,
enabled_when='position_type_=="explicit"',
),
_scalar_data_group = \
Group(
Item('_scalar_data_text', style='readonly',
resizable=False,
show_label=False),
HGroup(
spring,
Item('scalar_data',
editor=EnumEditor(name='_data_sources_names',
invalid='_is_not_ok')),
show_labels=False,
),
label='Scalar value',
show_border=True,
show_labels=False,
)
_optional_scalar_data_group = \
Group(
HGroup(
'has_scalar_data',
Item('_optional_scalar_data_text',
resizable=False,
style='readonly'),
show_labels=False,
),
Item('_scalar_data_text', style='readonly',
resizable=False,
enabled_when='has_scalar_data',
show_label=False),
HGroup(
spring,
Item('scalar_data',
editor=EnumEditor(name='_data_sources_names',
invalid='_is_not_ok'),
enabled_when='has_scalar_data'),
show_labels=False,
),
label='Scalar data',
show_border=True,
show_labels=False,
),
_vector_data_group = \
VGroup(
HGroup(
Item('vector_u', label='u',
editor=EnumEditor(name='_data_sources_names',
invalid='_is_not_ok')),
Item('vector_v', label='v',
editor=EnumEditor(name='_data_sources_names',
invalid='_is_not_ok')),
Item('vector_w', label='w',
editor=EnumEditor(name='_data_sources_names',
invalid='_is_not_ok')),
),
label='Vector data',
show_border=True,
),
_optional_vector_data_group = \
VGroup(
HGroup(
Item('has_vector_data', show_label=False),
Item('_vector_data_text', style='readonly',
resizable=False,
show_label=False),
),
HGroup(
Item('vector_u', label='u',
editor=EnumEditor(name='_data_sources_names',
invalid='_is_not_ok')),
Item('vector_v', label='v',
editor=EnumEditor(name='_data_sources_names',
invalid='_is_not_ok')),
Item('vector_w', label='w',
editor=EnumEditor(name='_data_sources_names',
invalid='_is_not_ok')),
enabled_when='has_vector_data',
),
label='Vector data',
show_border=True,
),
_array_view = \
View(
Item('_array_label', editor=TitleEditor(),
show_label=False),
Group(
Item('_data_sources_wrappers',
editor=TabularEditor(
adapter=ArrayColumnAdapter(),
),
),
show_border=True,
show_labels=False
))
_questions_view = View(
Item('_top_label', editor=TitleEditor(),
show_label=False),
HGroup(
Item('_data_type_text', style='readonly',
resizable=False),
spring,
'data_type',
spring,
show_border=True,
show_labels=False,
),
HGroup(
Item('_self', style='custom',
editor=InstanceEditor(
view_name='_suitable_traits_view'),
),
Group(
# FIXME: Giving up on context sensitive help
# because of lack of time.
#Group(
# Item('_shown_help_text', editor=HTMLEditor(),
# width=300,
# label='Help',
# ),
# show_labels=False,
# label='Help',
#),
#Group(
Item('_preview_button',
enabled_when='_is_ok'),
Item('_preview_window', style='custom',
label='Preview structure'),
show_labels=False,
#label='Preview structure',
#),
#layout='tabbed',
#dock='tab',
),
show_labels=False,
show_border=True,
),
)
_point_data_view = \
View(Group(
Group(_coordinates_group,
label='Position of the data points',
show_border=True,
),
HGroup(
'lines',
Item('_lines_text', style='readonly',
resizable=False),
label='Lines',
show_labels=False,
show_border=True,
),
_optional_scalar_data_group,
_optional_vector_data_group,
# XXX: hack to have more vertical space
Label('\n'),
Label('\n'),
Label('\n'),
))
_surface_data_view = \
View(Group(
_position_group,
_connectivity_group,
_optional_scalar_data_group,
_optional_vector_data_group,
))
_vector_data_view = \
View(Group(
_vector_data_group,
_position_group,
_optional_scalar_data_group,
))
_volumetric_data_view = \
View(Group(
_scalar_data_group,
_position_group,
_optional_vector_data_group,
))
_wizard_view = View(
Group(
HGroup(
Item('_self', style='custom', show_label=False,
editor=InstanceEditor(view='_array_view'),
width=0.17,
),
'_',
Item('_self', style='custom', show_label=False,
editor=InstanceEditor(view='_questions_view'),
),
),
HGroup(
Item('_info_image', editor=ImageEditor(),
visible_when="_is_not_ok"),
Item('_info_text', style='readonly', resizable=False,
visible_when="_is_not_ok"),
spring,
'_cancel_button',
Item('_ok_button', enabled_when='_is_ok'),
show_labels=False,
),
),
title='Import arrays',
resizable=True,
)
#----------------------------------------------------------------------
# Public interface
#----------------------------------------------------------------------
def __init__(self, **traits):
DataSourceFactory.__init__(self, **traits)
self._self = self
def view_wizard(self):
""" Pops up the view of the wizard, and keeps the reference it to
be able to close it.
"""
# FIXME: Workaround for traits bug in enabled_when
self.position_type_
self.data_type_
self._suitable_traits_view
self.grid_shape_source
self._is_ok
self.ui = self.edit_traits(view='_wizard_view')
def preview(self):
""" Display a preview of the data structure in the preview
window.
"""
self._preview_window.clear()
self._preview_window.add_source(self.data_source)
data = lambda name: self.data_sources[name]
g = Glyph()
g.glyph.glyph_source.glyph_source = \
g.glyph.glyph_source.glyph_list[0]
g.glyph.scale_mode = 'data_scaling_off'
if not (self.has_vector_data or self.data_type_ == 'vector'):
g.glyph.glyph_source.glyph_source.glyph_type = 'cross'
g.actor.property.representation = 'points'
g.actor.property.point_size = 3.
self._preview_window.add_module(g)
if not self.data_type_ in ('point', 'vector') or self.lines:
s = Surface()
s.actor.property.opacity = 0.3
self._preview_window.add_module(s)
if not self.data_type_ == 'point':
self._preview_window.add_filter(ExtractEdges())
s = Surface()
s.actor.property.opacity = 0.2
self._preview_window.add_module(s)
if __name__ == '__main__':
from numpy import mgrid
x, y, z = mgrid[-5:5, -5:5, -5:5]
r = x ** 2 + y ** 2 + z ** 2
X = linspace(0, 8)
data_sources = {
'x': X,
'y': y,
'z': z,
'r': r
}
wizard = DataSourceWizardView(data_sources=data_sources)
wizard.init_arrays()
wizard.guess_arrays()
wizard.view_wizard()
|
|
#!/usr/bin/env python
# Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module which provides compatibility with older Python versions."""
__all__ = ["PY3", "int", "long", "xrange", "exec_", "callable",
"namedtuple", "property", "defaultdict"]
import sys
# --- python 2/3 compatibility layer
PY3 = sys.version_info >= (3,)
try:
import __builtin__
except ImportError:
import builtins as __builtin__ # py3
if PY3:
int = int
long = int
xrange = range
exec_ = getattr(__builtin__, "exec")
print_ = getattr(__builtin__, "print")
else:
int = int
long = long
xrange = xrange
def exec_(code, globs=None, locs=None):
if globs is None:
frame = _sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
def print_(s):
sys.stdout.write(s + '\n')
sys.stdout.flush()
# removed in 3.0, reintroduced in 3.2
try:
callable = callable
except Exception:
def callable(obj):
for klass in type(obj).__mro__:
if "__call__" in klass.__dict__:
return True
return False
# --- stdlib additions
try:
from collections import namedtuple
except ImportError:
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
def namedtuple(typename, field_names, verbose=False, rename=False):
"""A collections.namedtuple implementation written in Python
to support Python versions < 2.6.
Taken from: http://code.activestate.com/recipes/500261/
"""
# Parse and validate the field names. Validation serves two
# purposes, generating informative error messages and preventing
# template injection attacks.
if isinstance(field_names, basestring):
# names separated by whitespace and/or commas
field_names = field_names.replace(',', ' ').split()
field_names = tuple(map(str, field_names))
if rename:
names = list(field_names)
seen = set()
for i, name in enumerate(names):
if (not min(c.isalnum() or c=='_' for c in name) or _iskeyword(name)
or not name or name[0].isdigit() or name.startswith('_')
or name in seen):
names[i] = '_%d' % i
seen.add(name)
field_names = tuple(names)
for name in (typename,) + field_names:
if not min(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain ' \
'alphanumeric characters and underscores: %r'
% name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' \
% name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a ' \
'number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r'
% name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
# tuple repr without parens or quotes
argtxt = repr(field_names).replace("'", "")[1:-1]
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(self):
'Return a new dict which maps field names to their values'
return dict(zip(self._fields, self)) \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
if verbose:
sys.stdout.write(template + '\n')
sys.stdout.flush()
# Execute the template string in a temporary namespace
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec_(template, namespace)
except SyntaxError:
e = sys.exc_info()[1]
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set
# to the frame where the named tuple is created. Bypass this
# step in enviroments where sys._getframe is not defined (Jython
# for example) or sys._getframe is not defined for arguments
# greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
# hack to support property.setter/deleter on python < 2.6
# http://docs.python.org/library/functions.html?highlight=property#property
if hasattr(property, 'setter'):
property = property
else:
class property(__builtin__.property):
__metaclass__ = type
def __init__(self, fget, *args, **kwargs):
super(property, self).__init__(fget, *args, **kwargs)
self.__doc__ = fget.__doc__
def getter(self, method):
return property(method, self.fset, self.fdel)
def setter(self, method):
return property(self.fget, method, self.fdel)
def deleter(self, method):
return property(self.fget, self.fset, method)
# py 2.5 collections.defauldict
# Taken from:
# http://code.activestate.com/recipes/523034-emulate-collectionsdefaultdict/
# credits: Jason Kirtland
try:
from collections import defaultdict
except ImportError:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory,
dict.__repr__(self))
# py 2.5 functools.wraps
try:
from functools import wraps
except ImportError:
def wraps(original):
def inner(fn):
# see functools.WRAPPER_ASSIGNMENTS
for attribute in ['__module__',
'__name__',
'__doc__'
]:
setattr(fn, attribute, getattr(original, attribute))
# see functools.WRAPPER_UPDATES
for attribute in ['__dict__',
]:
if hasattr(fn, attribute):
getattr(fn, attribute).update(getattr(original, attribute))
else:
setattr(fn, attribute,
getattr(original, attribute).copy())
return fn
return inner
|
|
"""Module that reads binary Plink files."""
# This file is part of pyplink.
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Louis-Philippe Lemieux Perreault
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import logging
from itertools import repeat
from collections import Counter
from io import UnsupportedOperation
try:
from itertools import zip_longest as zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
import numpy as np
import pandas as pd
from six.moves import range
__author__ = "Louis-Philippe Lemieux Perreault"
__copyright__ = "Copyright 2014 Louis-Philippe Lemieux Perreault"
__license__ = "MIT"
__all__ = ["PyPlink"]
# The logger
logger = logging.getLogger(__name__)
# The recoding values
_geno_recode = {1: -1, # Unknown genotype
2: 1, # Heterozygous genotype
0: 2, # Homozygous A1
3: 0} # Homozygous A2
_byte_recode = dict(value[::-1] for value in _geno_recode.items())
class PyPlink(object):
"""Reads and store a set of binary Plink files.
Args:
prefix (str): The prefix of the binary Plink files.
mode (str): The open mode for the binary Plink file.
bed_format (str): The type of bed (SNP-major or INDIVIDUAL-major).
Reads or write binary Plink files (BED, BIM and FAM).
.. code-block:: python
from pyplink import PyPlink
# Reading BED files
with PyPlink("plink_file_prefix") as bed:
pass
# Writing BED files
with PyPlink("plink_file_prefix", "w") as bed:
pass
"""
# The genotypes values
_geno_values = np.array(
[
[_geno_recode[(i >> j) & 3] for j in range(0, 7, 2)]
for i in range(256)
],
dtype=np.int8,
)
def __init__(self, prefix, mode="r", bed_format="SNP-major"):
"""Initializes a new PyPlink instance."""
# The mode
self._mode = mode
# The bed format
if bed_format not in {"SNP-major", "INDIVIDUAL-major"}:
raise ValueError("invalid bed format: {}".format(bed_format))
self._bed_format = bed_format
# These are the name of the files
self.bed_filename = "{}.bed".format(prefix)
self.bim_filename = "{}.bim".format(prefix)
self.fam_filename = "{}.fam".format(prefix)
if self._mode == "r":
if self._bed_format != "SNP-major":
raise ValueError("only SNP-major format is supported "
"with mode 'r'")
# Checking that all the files exists (otherwise, error...)
for filename in (self.bed_filename, self.bim_filename,
self.fam_filename):
if not os.path.isfile(filename):
raise IOError("No such file: '{}'".format(filename))
# Setting BIM and FAM to None
self._bim = None
self._fam = None
# Reading the input files
self._read_bim()
self._read_fam()
self._read_bed()
# Where we're at
self._n = 0
elif self._mode == "w":
# The dummy number of samples and bytes
self._nb_values = None
# Opening the output BED file
self._bed = open(self.bed_filename, "wb")
self._write_bed_header()
else:
raise ValueError("invalid mode: '{}'".format(self._mode))
def __repr__(self):
"""The representation of the PyPlink object."""
if self._mode == "r":
return "PyPlink({:,d} samples; {:,d} markers)".format(
self.get_nb_samples(),
self.get_nb_markers(),
)
return 'PyPlink(mode="w")'
def __iter__(self):
"""The __iter__ function."""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
return self
def __next__(self):
"""The __next__ function."""
return self.next()
def __enter__(self):
"""Entering the context manager."""
return self
def __exit__(self, *args):
"""Exiting the context manager."""
self.close()
def close(self):
"""Closes the BED file."""
# Closing the BED file
self._bed.close()
def next(self):
"""Returns the next marker.
Returns:
tuple: The marker name as a string and its genotypes as a
:py:class:`numpy.ndarray`.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
self._n += 1
if self._n > self._nb_markers:
raise StopIteration()
return self._bim.index[self._n - 1], self._read_current_marker()
def _read_current_marker(self):
"""Reads the current marker and returns its genotypes."""
return self._geno_values[
np.fromstring(self._bed.read(self._nb_bytes), dtype=np.uint8)
].flatten(order="C")[:self._nb_samples]
def seek(self, n):
"""Gets to a certain marker position in the BED file.
Args:
n (int): The index of the marker to seek to.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
if 0 <= n < self._nb_markers:
self._n = n
self._bed.seek(self._get_seek_position(n))
else:
# Invalid seek value
raise ValueError("invalid position in BED: {}".format(n))
def _get_seek_position(self, n):
"""Gets the seek position in the file (including special bytes).
Args:
n (int): The index of the marker to seek to.
"""
return 3 + self._nb_bytes * n
def _read_bim(self):
"""Reads the BIM file."""
# Reading the BIM file and setting the values
bim = pd.read_csv(self.bim_filename, delim_whitespace=True,
names=["chrom", "snp", "cm", "pos", "a1", "a2"],
dtype=dict(snp=str, a1=str, a2=str))
# Saving the index as integer
bim["i"] = bim.index
# Checking for duplicated markers
try:
bim = bim.set_index("snp", verify_integrity=True)
self._has_duplicated = False
except ValueError as e:
# Setting this flag to true
self._has_duplicated = True
# Finding the duplicated markers
duplicated = bim.snp.duplicated(keep=False)
duplicated_markers = bim.loc[duplicated, "snp"]
duplicated_marker_counts = duplicated_markers.value_counts()
# The dictionary that will contain information about the duplicated
# markers
self._dup_markers = {
m: [] for m in duplicated_marker_counts.index
}
# Logging a warning
logger.warning("Duplicated markers found")
for marker, count in duplicated_marker_counts.iteritems():
logger.warning(" - {}: {:,d} times".format(marker, count))
logger.warning("Appending ':dupX' to the duplicated markers "
"according to their location in the BIM file")
# Renaming the markers
counter = Counter()
for i, marker in duplicated_markers.iteritems():
counter[marker] += 1
new_name = "{}:dup{}".format(marker, counter[marker])
bim.loc[i, "snp"] = new_name
# Updating the dictionary containing the duplicated markers
self._dup_markers[marker].append(new_name)
# Resetting the index
bim = bim.set_index("snp", verify_integrity=True)
# Encoding the allele
# - The original 0 is the actual 2 (a1/a1)
# - The original 2 is the actual 1 (a1/a2)
# - The original 3 is the actual 0 (a2/a2)
# - The original 1 is the actual -1 (no call)
allele_encoding = np.array(
[bim.a2 * 2, bim.a1 + bim.a2, bim.a1 * 2,
list(repeat("00", bim.shape[0]))],
dtype="U2",
)
self._allele_encoding = allele_encoding.T
# Saving the data in the object
self._bim = bim[["chrom", "pos", "cm", "a1", "a2", "i"]]
self._nb_markers = self._bim.shape[0]
def get_bim(self):
"""Returns the BIM file.
Returns:
pandas.DataFrame: The BIM file.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
return self._bim.drop("i", axis=1)
def get_nb_markers(self):
"""Returns the number of markers.
Returns:
int: The number of markers in the dataset.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
return self._nb_markers
def get_duplicated_markers(self):
"""Returns the duplicated markers, if any.
Args:
set: The set of duplicated marker (might be empty).
"""
if self._has_duplicated:
return self._dup_markers
else:
return {}
def _read_fam(self):
"""Reads the FAM file."""
# Reading the FAM file and setting the values
fam = pd.read_csv(self.fam_filename, delim_whitespace=True,
names=["fid", "iid", "father", "mother", "gender",
"status"],
dtype=dict(fid=str, iid=str, father=str, mother=str))
# Getting the byte and bit location of each samples
fam["byte"] = [
int(np.ceil((1 + 1) / 4.0)) - 1 for i in range(len(fam))
]
fam["bit"] = [(i % 4) * 2 for i in range(len(fam))]
# Saving the data in the object
self._fam = fam
self._nb_samples = self._fam.shape[0]
def get_fam(self):
"""Returns the FAM file.
Returns:
pandas.DataFrame: The FAM file.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
return self._fam.drop(["byte", "bit"], axis=1)
def get_nb_samples(self):
"""Returns the number of samples.
Returns:
int: The number of samples in the dataset.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
return self._nb_samples
def _read_bed(self):
"""Reads the BED file."""
# Checking if BIM and BAM files were both read
if (self._bim is None) or (self._fam is None):
raise RuntimeError("no BIM or FAM file were read")
# The number of bytes per marker
self._nb_bytes = int(np.ceil(self._nb_samples / 4.0))
# Checking the file is valid by looking at the first 3 bytes and the
# last entry (correct size)
with open(self.bed_filename, "rb") as bed_file:
# Checking that the first two bytes are OK
if (ord(bed_file.read(1)) != 108) or (ord(bed_file.read(1)) != 27):
raise ValueError("not a valid BED file: "
"{}".format(self.bed_filename))
# Checking that the format is SNP-major
if ord(bed_file.read(1)) != 1:
raise ValueError("not in SNP-major format (please recode): "
"{}".format(self.bed_filename))
# Checking the last entry (for BED corruption)
seek_index = self._get_seek_position(self._bim.iloc[-1, :].i)
bed_file.seek(seek_index)
geno = self._geno_values[
np.fromstring(bed_file.read(self._nb_bytes), dtype=np.uint8)
].flatten(order="C")[:self._nb_samples]
if geno.shape[0] != self._nb_samples:
raise ValueError("invalid number of entries: corrupted BED?")
# Opening the file for the rest of the operations (reading 3 bytes)
self._bed = open(self.bed_filename, "rb")
self._bed.read(3)
def _write_bed_header(self):
"""Writes the BED first 3 bytes."""
# Writing the first three bytes
final_byte = 1 if self._bed_format == "SNP-major" else 0
self._bed.write(bytearray((108, 27, final_byte)))
def iter_geno(self):
"""Iterates over genotypes from the beginning of the BED file.
Returns:
tuple: The name of the marker as a string, and its genotypes as a
:py:class:`numpy.ndarray` (additive format).
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
# Seeking back at the beginning of the file
self.seek(0)
# Return itself (the generator)
return self
def iter_acgt_geno(self):
"""Iterates over genotypes (ACGT format).
Returns:
tuple: The name of the marker as a string, and its genotypes as a
:py:class:`numpy.ndarray` (ACGT format).
"""
# Need to iterate over itself, and modify the actual genotypes
for i, (marker, geno) in enumerate(self.iter_geno()):
yield marker, self._allele_encoding[i][geno]
def iter_geno_marker(self, markers, return_index=False):
"""Iterates over genotypes for a list of markers.
Args:
markers (list): The list of markers to iterate onto.
return_index (bool): Wether to return the marker's index or not.
Returns:
tuple: The name of the marker as a string, and its genotypes as a
:py:class:`numpy.ndarray` (additive format).
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
# If string, we change to list
if isinstance(markers, str):
markers = [markers]
# Iterating over all markers
if return_index:
for marker in markers:
geno, seek = self.get_geno_marker(marker, return_index=True)
yield marker, geno, seek
else:
for marker in markers:
yield marker, self.get_geno_marker(marker)
def iter_acgt_geno_marker(self, markers):
"""Iterates over genotypes for a list of markers (ACGT format).
Args:
markers (list): The list of markers to iterate onto.
Returns:
tuple: The name of the marker as a string, and its genotypes as a
:py:class:`numpy.ndarray` (ACGT format).
"""
# We iterate over the markers
for snp, geno, s in self.iter_geno_marker(markers, return_index=True):
# Getting the SNP position and converting to ACGT
yield snp, self._allele_encoding[s][geno]
def get_geno_marker(self, marker, return_index=False):
"""Gets the genotypes for a given marker.
Args:
marker (str): The name of the marker.
return_index (bool): Wether to return the marker's index or not.
Returns:
numpy.ndarray: The genotypes of the marker (additive format).
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
# Check if the marker exists
if marker not in self._bim.index:
raise ValueError("{}: marker not in BIM".format(marker))
# Seeking to the correct position
seek_index = self._bim.loc[marker, "i"]
self.seek(seek_index)
if return_index:
return self._read_current_marker(), seek_index
return self._read_current_marker()
def get_acgt_geno_marker(self, marker):
"""Gets the genotypes for a given marker (ACGT format).
Args:
marker (str): The name of the marker.
Returns:
numpy.ndarray: The genotypes of the marker (ACGT format).
"""
# Getting the marker's genotypes
geno, snp_position = self.get_geno_marker(marker, return_index=True)
# Returning the ACGT's format of the genotypes
return self._allele_encoding[snp_position][geno]
def write_genotypes(self, genotypes):
"""Write genotypes to binary file.
Args:
genotypes (numpy.ndarray): The genotypes to write in the BED file.
"""
if self._mode != "w":
raise UnsupportedOperation("not available in 'r' mode")
# Initializing the number of samples if required
if self._nb_values is None:
self._nb_values = len(genotypes)
# Checking the expected number of samples
if self._nb_values != len(genotypes):
raise ValueError("{:,d} samples expected, got {:,d}".format(
self._nb_values,
len(genotypes),
))
# Writing to file
byte_array = [
g[0] | (g[1] << 2) | (g[2] << 4) | (g[3] << 6) for g in
self._grouper((_byte_recode[geno] for geno in genotypes), 4)
]
self._bed.write(bytearray(byte_array))
@staticmethod
def _grouper(iterable, n, fillvalue=0):
"""Collect data into fixed-length chunks or blocks.
Args:
n (int): The size of the chunk.
fillvalue (int): The fill value.
Returns:
iterator: An iterator over the chunks.
"""
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
|
|
from sklearn import metrics, preprocessing, pipeline, \
feature_extraction, decomposition, model_selection
import sklearn
import pandas as pd
import numpy as np
from time import gmtime, strftime
import numpy.random as rng
# from multiprocessing.dummy import Pool
# import concurrent.futures
import tensorflow as tf
# import multiprocessing as mp
import os
from sklearn.cross_validation import KFold
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from keras.models import Sequential, Model
from keras.layers.core import Dense, Dropout, Flatten, Reshape
from keras.layers.normalization import BatchNormalization
from keras.layers.embeddings import Embedding
from keras.layers import Input, concatenate, merge, LSTM, Lambda, Add, Activation, Subtract
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D
from keras.optimizers import SGD, RMSprop, Adam, Nadam
from keras.callbacks import EarlyStopping, Callback
from keras.utils import np_utils
from keras import backend as K
from keras.regularizers import l1, l2
from sklearn.metrics import log_loss
from keras import __version__ as keras_version
RANK_SCALE = 1
DROPOUT_RATE = 0.5 #0.35
EPSILON = 1e-7
L2_NORM = 0
R_RANK_GAMMA = 0.1
R_RANK_P = 1
HIDDEN_UNITS = [200, 150, 100]
DNN_EPOCHS = 40
BATCH_SIZE = 64
class RocAucEvaluation(Callback):
def __init__(self, validation_data=(), interval=1):
super(Callback, self).__init__()
self.interval = interval
self.X_val, self.y_val = validation_data
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
score = metrics.roc_auc_score(self.y_val, y_pred)
print("\n ROC-AUC - epoch: %d - score: %.6f \n" % (epoch+1, score))
def dense_bn_layer(input_tensor, hn_num, name = None, dropout = True, bn = True):
"""
"""
hn_num = int(hn_num)
x = Dense(hn_num, kernel_regularizer = l2(L2_NORM))(input_tensor)
if bn:
x = BatchNormalization(name = name)(x)
if dropout:
x = Dropout(DROPOUT_RATE)(x)
return x
def dense_bn_act_layer(input_tensor, hn_num, name = None, act = 'relu', dropout = True, bn = True):
"""
"""
hn_num = int(hn_num)
x = Dense(hn_num, kernel_regularizer = l2(L2_NORM))(input_tensor)
if bn:
x = BatchNormalization()(x)
if dropout:
x = Dropout(DROPOUT_RATE)(x)
x = Activation(act, name = name)(x)
return x
def identity_block(input_tensor, hn_num, name = None, dropout = True):
"""
"""
adjust_layer = dense_bn_layer(input_tensor, hn_num, dropout = dropout)
x = Activation('relu')(adjust_layer)
# x = dense_bn_act_layer(x, hn_num * 3 / 2, dropout = dropout)
x = dense_bn_act_layer(x, hn_num, dropout = dropout)
x = dense_bn_layer(x, hn_num, dropout = dropout)
x = Add()([x, adjust_layer])
x = Activation('relu', name = name)(x)
return x
def boosting_identity_block(input_tensor, hn_num, name = None):
"""
"""
boost_input = Input(shape=(1,))
adjust_layer = dense_bn_layer(input_tensor, hn_num)
x = Activation('relu')(adjust_layer)
x = dense_bn_act_layer(x, hn_num * 3 / 2)
x = dense_bn_layer(x, hn_num)
x = Add()([x, adjust_layer])
x = Activation('relu', name = name)(x)
return x
def res_net(input_shape, hns = [8, 6, 4, 4], classes = 2):
"""
"""
inputs = Input(shape=input_shape)
x = BatchNormalization()(inputs)
x = identity_block(x, hns[0], name = 'block0', dropout = False)
x = identity_block(x, hns[1], name = 'block1', dropout = False)
x = identity_block(x, hns[2], name = 'block2', dropout = False)
#x = identity_block(x, hns[3], name = 'block3', dropout = True)
x = Dense(1, name = 'pre_sigmoid')(x)
x = BatchNormalization()(x)
proba = Activation('sigmoid')(x)
model = Model(inputs, x)
model.compile(optimizer=Nadam(lr = 0.001), loss='binary_crossentropy')
return model
def boosting_dnn(input_shape, hns = [8, 6, 4, 7], classes = 2):
"""
"""
inputs = Input(input_shape)
boost_input = Lambda(lambda x: x[:, -1])(inputs)
# dnn_input = Lambda(lambda x: x[:, :-1])(inputs)
dnn_input = inputs
#dnn_module
# dnn_model = create_dnn((input_shape[0] - 1,), hns)
dnn_model = create_dnn((input_shape[0],), hns)
dnn_pre_sigmoid = Model(dnn_model.input, dnn_model.get_layer('pre_sigmoid').output)(dnn_input)
# boost
pre_sigmoid = Add(name = 'pre_sigmoid')([dnn_pre_sigmoid, boost_input])
proba = Activation('sigmoid')(pre_sigmoid)
model = Model(inputs, proba)
model.compile(optimizer=Nadam(lr = 0.001), loss='binary_crossentropy')
return model
def boosting_res_net(input_shape, hns = [128, 64, 16, 4], classes = 2, out_layer_name = None):
"""
"""
inputs = Input(input_shape)
boost_input = Lambda(lambda x: x[:, -1])(inputs)
# res_module
res_inputs = Lambda(lambda x: x[:, :-1])(inputs)
res_model = res_net((input_shape[0] - 1, ), hns)
#res_inputs = inputs
#res_model = res_net(input_shape, hns)
res_pre_sigmoid = Model(res_model.input, res_model.get_layer('pre_sigmoid').output)(res_inputs)
# boost
pre_sigmoid = Add(name = 'pre_sigmoid')([res_pre_sigmoid, boost_input])
proba = Activation('sigmoid', name = out_layer_name)(pre_sigmoid)
model = Model(inputs, proba)
model.compile(optimizer=Nadam(lr = 0.001), loss='binary_crossentropy')
return model
def rank_net(input_shape, hns = [6, 4, 4, 4], classes = 2):
"""
"""
res_model = res_net((input_shape[1],), hns)
res_model = Model(res_model.input, res_model.get_layer('pre_sigmoid').output)
inputs = Input(input_shape)
minor_inputs = Lambda(lambda x: x[:, 0], name = 'minor_input')(inputs)
pred_minor = res_model(minor_inputs)
minor_out_proba = Lambda(lambda x: x, name = 'minor_out_proba')(pred_minor)
major_inputs = Lambda(lambda x: x[:, 1], name = 'major_input')(inputs)
pred_major = res_model(major_inputs)
major_out_proba = Lambda(lambda x: x, name = 'major_out_proba')(pred_major)
sub = Subtract()([major_out_proba, minor_out_proba])
sub = Lambda(lambda x: x * RANK_SCALE, name = 'rank_scale_layer')(sub)
proba = Activation('sigmoid')(sub)
model = Model(inputs, proba)
# model.compile(optimizer=Nadam(lr = 0.0005), loss=min_pred)
model.compile(optimizer=Nadam(lr = 0.001), loss='binary_crossentropy')
return model
def ll_rank_net(input_shape, hns = [128, 64, 4, 4], classes = 2):
"""
"""
# res_model = boosting_dnn((input_shape[1],), hns)
res_model = create_dnn((input_shape[1],), hns)
# res_model = boosting_res_net((input_shape[1],), hns)
# res_model = create_dnn((input_shape[1],), hns)
# res_model = res_net((input_shape[1],), hns)
res_model = Model(res_model.input, res_model.get_layer('pre_sigmoid').output)
inputs = Input(input_shape)
minor_inputs = Lambda(lambda x: x[:, 0], name = 'minor_input')(inputs)
pred_minor = res_model(minor_inputs)
minor_pre_sigmoid = Lambda(lambda x: x, name = 'minor_pre_sigmoid')(pred_minor)
minor_proba = Activation('sigmoid', name = 'minor_pred')(minor_pre_sigmoid)
# minor_pred_loss = Lambda(lambda x: -1 * K.log(K.clip(x, EPSILON, 1)), name = 'minor_loss')(minor_proba)
minor_pred_loss = Lambda(lambda x: 1 * (1 - x), name = 'minor_loss')(minor_proba)
major_inputs = Lambda(lambda x: x[:, 1], name = 'major_input')(inputs)
pred_major = res_model(major_inputs)
major_pre_sigmoid = Lambda(lambda x: x, name = 'major_pre_sigmoid')(pred_major)
major_proba = Activation('sigmoid', name = 'major_pred')(major_pre_sigmoid)
# major_pred_loss = Lambda(lambda x: -1 * K.log(K.clip(1 - x, EPSILON, 1)), name = 'major_loss')(major_proba)
major_pred_loss = Lambda(lambda x: 1 * x, name = 'major_loss')(major_proba)
# sub = Subtract()([minor_pre_sigmoid, major_pre_sigmoid])
sub = Subtract()([minor_proba, major_proba])
sub = Lambda(lambda x: x * RANK_SCALE, name = 'rank_scale_layer')(sub)
sub = Lambda(lambda x: -1 * (x - R_RANK_GAMMA), name = 'r_rank_gamma_layer')(sub)
# rank_proba = Activation('sigmoid')(sub)
rank_proba = Activation('relu')(sub)
rank_loss = Lambda(lambda x: x ** R_RANK_P, name = 'rank_loss')(rank_proba)
# rank_loss = Activation('tanh')(rank_proba)
# rank_loss = Lambda(lambda x: -1 * K.log(K.clip(x, EPSILON, 1)), name = 'rank_loss')(rank_proba)
loss = Add()([minor_pred_loss, rank_loss, major_pred_loss])
model = Model(inputs, rank_loss)
model.compile(optimizer=Nadam(), loss=min_pred)
# model.compile(optimizer=Nadam(lr = 0.001), loss='binary_crossentropy')
return model
def boosting_rank_net(input_shape, hns = [8, 6, 4, 4], classes = 2):
"""
"""
res_model = boosting_res_net((input_shape[1],), hns, out_layer_name = 'proba')
res_model = Model(res_model.input, res_model.get_layer('pre_sigmoid').output)
inputs = Input(input_shape)
minor_inputs = Lambda(lambda x: x[:, 0], name = 'minor_input')(inputs)
pred_minor = res_model(minor_inputs)
minor_out_proba = Lambda(lambda x: x, name = 'minor_out_proba')(pred_minor)
major_inputs = Lambda(lambda x: x[:, 1], name = 'major_input')(inputs)
pred_major = res_model(major_inputs)
major_out_proba = Lambda(lambda x: x, name = 'major_out_proba')(pred_major)
sub = Subtract()([major_out_proba, minor_out_proba])
sub = Lambda(lambda x: x * RANK_SCALE, name = 'rank_scale_layer')(sub)
proba = Activation('sigmoid')(sub)
model = Model(inputs, proba)
model.compile(optimizer=Nadam(lr = 0.001), loss=min_pred)
return model
def boosting_parallel_res_net(input_shape, hns = [8, 6, 4, 7], classes = 2):
"""
"""
boost_input = Input(shape=(1,))
# res_module
res_shape = (input_shape[0] - 1,)
boost_res_net_model = boosting_res_net(input_shape)
res_inputs = Input(shape = res_shape)
boost_res_net_out_list = [boost_res_net_model([res_inputs, boost_input]) for i in range(8)]
boost_res_net_out = concatenate(boost_res_net_out_list, axis = 1)
x = Dense(4, activation = 'sigmoid')(boost_res_net_out)
proba = Dense(1, activation = 'sigmoid')(x)
model = Model([res_inputs, boost_input], proba)
model.compile(optimizer=Nadam(lr = 0.001), loss='binary_crossentropy')
return model
def create_embedding_layer():
input_list = []
embedding_list = []
for nunique in category_nunique:
input_ = Input(shape=(1, ), dtype='int32')
# x_ohe = Lambda(K.one_hot, arguments={'num_classes': nunique})(input_)
x_ohe = Lambda(one_hot, arguments={'num_classes': nunique})(input_)
# x_ohe = K.one_hot(input_, nunique)
input_list.append(input_)
embedding_list.append(x_ohe)
return input_list, concatenate(embedding_list, axis = 2)
def create_dnn(input_shape, HIDDEN_UNITS = [16, 8, 4], DNN_BN = False, DROPOUT_RATE = 0):
inputs = Input(input_shape)
x = BatchNormalization()(inputs)
x = dense_bn_act_layer(x, HIDDEN_UNITS[0], name = 'hn0', dropout = True)
x = dense_bn_act_layer(x, HIDDEN_UNITS[1], name = 'hn1', dropout = True)
x = dense_bn_act_layer(x, HIDDEN_UNITS[2], name = 'hn2', dropout = True)
x = Dense(6, name = 'pre_sigmoid')(x)
proba = Activation('sigmoid')(x)
model = Model(inputs, proba)
model.compile(optimizer=Adam(), loss='binary_crossentropy')
return model
def create_embedding_model():
"""
"""
dense_input = Input(shape=(len(continus_binary_indice),))
input_list, embedding_layer = create_embedding_layer()
embedding_layer = Flatten()(embedding_layer)
merge_input = concatenate([dense_input, embedding_layer], axis = 1)
merge_len = len(continus_binary_indice) + sum(category_nunique)
output = create_dnn(merge_len)(merge_input)
model = Model([dense_input] + input_list, output)
# optimizer = RMSprop(lr=1e-3, rho = 0.9, epsilon = 1e-8)
model.compile(optimizer=Adam(), loss='binary_crossentropy')
return model
def keras_train(train_part, train_part_label, valide_part, valide_part_label, fold_seed):
"""
Keras Training
"""
print("-----Keras training-----")
model = create_dnn((train_part.shape[1],), HIDDEN_UNITS)
# model = res_net((train_part.shape[1],), HIDDEN_UNITS)
callbacks = [
EarlyStopping(monitor='val_loss', patience=5, verbose=0),
RocAucEvaluation(validation_data=(valide_part, valide_part_label), interval=1)
]
model.fit(train_part, train_part_label, batch_size=BATCH_SIZE, epochs=DNN_EPOCHS,
shuffle=True, verbose=2,
validation_data=(valide_part, valide_part_label)
, callbacks=callbacks)
return model
|
|
"""Hierarchical Agglomerative Clustering
These routines perform some hierarchical agglomerative clustering of some
input data.
Authors : Vincent Michel, Bertrand Thirion, Alexandre Gramfort,
Gael Varoquaux
License: BSD 3 clause
"""
from heapq import heapify, heappop, heappush, heappushpop
import warnings
import sys
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..externals.joblib import Memory
from ..externals import six
from ..metrics.pairwise import paired_distances, pairwise_distances
from ..utils import check_array
from ..utils.sparsetools import connected_components
from . import _hierarchical
from ._feature_agglomeration import AgglomerationTransform
from ..utils.fast_dict import IntFloatDict
if sys.version_info[0] > 2:
xrange = range
###############################################################################
# For non fully-connected graphs
def _fix_connectivity(X, connectivity, n_components=None,
affinity="euclidean"):
"""
Fixes the connectivity matrix
- copies it
- makes it symmetric
- converts it to LIL if necessary
- completes it if necessary
"""
n_samples = X.shape[0]
if (connectivity.shape[0] != n_samples or
connectivity.shape[1] != n_samples):
raise ValueError('Wrong shape for connectivity matrix: %s '
'when X is %s' % (connectivity.shape, X.shape))
# Make the connectivity matrix symmetric:
connectivity = connectivity + connectivity.T
# Convert connectivity matrix to LIL
if not sparse.isspmatrix_lil(connectivity):
if not sparse.isspmatrix(connectivity):
connectivity = sparse.lil_matrix(connectivity)
else:
connectivity = connectivity.tolil()
# Compute the number of nodes
n_components, labels = connected_components(connectivity)
if n_components > 1:
warnings.warn("the number of connected components of the "
"connectivity matrix is %d > 1. Completing it to avoid "
"stopping the tree early." % n_components,
stacklevel=2)
# XXX: Can we do without completing the matrix?
for i in xrange(n_components):
idx_i = np.where(labels == i)[0]
Xi = X[idx_i]
for j in xrange(i):
idx_j = np.where(labels == j)[0]
Xj = X[idx_j]
D = pairwise_distances(Xi, Xj, metric=affinity)
ii, jj = np.where(D == np.min(D))
ii = ii[0]
jj = jj[0]
connectivity[idx_i[ii], idx_j[jj]] = True
connectivity[idx_j[jj], idx_i[ii]] = True
n_components = 1
return connectivity
###############################################################################
# Hierarchical tree building functions
def ward_tree(X, connectivity=None, n_components=None, n_clusters=None,
return_distance=False):
"""Ward clustering based on a Feature matrix.
Recursively merges the pair of clusters that minimally increases
within-cluster variance.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Parameters
----------
X : array, shape (n_samples, n_features)
feature matrix representing n_samples samples to be clustered
connectivity : sparse matrix (optional).
connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is None, i.e, the Ward algorithm is unstructured.
n_components : int (optional)
Number of connected components. If None the number of connected
components is estimated from the connectivity matrix.
n_clusters : int (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
return_distance: bool (optional)
If True, return the distance between the clusters.
Returns
-------
children : 2D array, shape (n_nodes, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
n_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree
parents : 1D array, shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : 1D array, shape (n_nodes, )
Only returned if return_distance is set to True (for compatibility).
The distances between the centers of the nodes. `distances[i]`
corresponds to a weighted euclidean distance between
the nodes `children[i, 1]` and `children[i, 2]`. If the nodes refer to
leaves of the tree, then `distances[i]` is their unweighted euclidean
distance. Distances are updated in the following way
(from scipy.hierarchy.linkage):
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
n_samples, n_features = X.shape
if connectivity is None:
from scipy.cluster import hierarchy # imports PIL
if n_clusters is not None:
warnings.warn('Partial build of the tree is implemented '
'only for structured clustering (i.e. with '
'explicit connectivity). The algorithm '
'will build the full tree and only '
'retain the lower branches required '
'for the specified number of clusters',
stacklevel=2)
out = hierarchy.ward(X)
children_ = out[:, :2].astype(np.intp)
if return_distance:
distances = out[:, 2]
return children_, 1, n_samples, None, distances
else:
return children_, 1, n_samples, None
connectivity = _fix_connectivity(X, connectivity,
n_components=n_components)
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
if n_clusters > n_samples:
raise ValueError('Cannot provide more clusters than samples. '
'%i n_clusters was asked, and there are %i samples.'
% (n_clusters, n_samples))
n_nodes = 2 * n_samples - n_clusters
# create inertia matrix
coord_row = []
coord_col = []
A = []
for ind, row in enumerate(connectivity.rows):
A.append(row)
# We keep only the upper triangular for the moments
# Generator expressions are faster than arrays on the following
row = [i for i in row if i < ind]
coord_row.extend(len(row) * [ind, ])
coord_col.extend(row)
coord_row = np.array(coord_row, dtype=np.intp, order='C')
coord_col = np.array(coord_col, dtype=np.intp, order='C')
# build moments as a list
moments_1 = np.zeros(n_nodes, order='C')
moments_1[:n_samples] = 1
moments_2 = np.zeros((n_nodes, n_features), order='C')
moments_2[:n_samples] = X
inertia = np.empty(len(coord_row), dtype=np.float, order='C')
_hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col,
inertia)
inertia = list(six.moves.zip(inertia, coord_row, coord_col))
heapify(inertia)
# prepare the main fields
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=bool)
children = []
if return_distance:
distances = np.empty(n_nodes - n_samples)
not_visited = np.empty(n_nodes, dtype=np.int8, order='C')
# recursive merge loop
for k in range(n_samples, n_nodes):
# identify the merge
while True:
inert, i, j = heappop(inertia)
if used_node[i] and used_node[j]:
break
parent[i], parent[j] = k, k
children.append((i, j))
used_node[i] = used_node[j] = False
if return_distance: # store inertia value
distances[k - n_samples] = inert
# update the moments
moments_1[k] = moments_1[i] + moments_1[j]
moments_2[k] = moments_2[i] + moments_2[j]
# update the structure matrix A and the inertia matrix
coord_col = []
not_visited.fill(1)
not_visited[k] = 0
_hierarchical._get_parents(A[i], coord_col, parent, not_visited)
_hierarchical._get_parents(A[j], coord_col, parent, not_visited)
# List comprehension is faster than a for loop
[A[l].append(k) for l in coord_col]
A.append(coord_col)
coord_col = np.array(coord_col, dtype=np.intp, order='C')
coord_row = np.empty(coord_col.shape, dtype=np.intp, order='C')
coord_row.fill(k)
n_additions = len(coord_row)
ini = np.empty(n_additions, dtype=np.float, order='C')
_hierarchical.compute_ward_dist(moments_1, moments_2,
coord_row, coord_col, ini)
# List comprehension is faster than a for loop
[heappush(inertia, (ini[idx], k, coord_col[idx]))
for idx in range(n_additions)]
# Separate leaves in children (empty lists up to now)
n_leaves = n_samples
# sort children to get consistent output with unstructured version
children = [c[::-1] for c in children]
children = np.array(children) # return numpy array for efficient caching
if return_distance:
# 2 is scaling factor to compare w/ unstructured version
distances = np.sqrt(2. * distances)
return children, n_components, n_leaves, parent, distances
else:
return children, n_components, n_leaves, parent
# average and complete linkage
def linkage_tree(X, connectivity=None, n_components=None,
n_clusters=None, linkage='complete', affinity="euclidean",
return_distance=False):
"""Linkage agglomerative clustering based on a Feature matrix.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Parameters
----------
X : array, shape (n_samples, n_features)
feature matrix representing n_samples samples to be clustered
connectivity : sparse matrix (optional).
connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is None, i.e, the Ward algorithm is unstructured.
n_components : int (optional)
Number of connected components. If None the number of connected
components is estimated from the connectivity matrix.
n_clusters : int (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
linkage : {"average", "complete"}, optional, default: "complete"
Which linkage critera to use. The linkage criterion determines which
distance to use between sets of observation.
- average uses the average of the distances of each observation of
the two sets
- complete or maximum linkage uses the maximum distances between
all observations of the two sets.
affinity : string or callable, optional, default: "euclidean".
which metric to use. Can be "euclidean", "manhattan", or any
distance know to paired distance (see metric.pairwise)
return_distance : bool, default False
whether or not to return the distances between the clusters.
Returns
-------
children : 2D array, shape (n_nodes, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
n_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree.
parents : 1D array, shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : ndarray, shape (n_nodes,)
Returned when return_distance is set to True.
distances[i] refers to the distance between children[i][0] and
children[i][1] when they are merged.
See also
--------
ward_tree : hierarchical clustering with ward linkage
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
n_samples, n_features = X.shape
linkage_choices = {'complete': _hierarchical.max_merge,
'average': _hierarchical.average_merge,
}
try:
join_func = linkage_choices[linkage]
except KeyError:
raise ValueError(
'Unknown linkage option, linkage should be one '
'of %s, but %s was given' % (linkage_choices.keys(), linkage))
if connectivity is None:
from scipy.cluster import hierarchy # imports PIL
if n_clusters is not None:
warnings.warn('Partial build of the tree is implemented '
'only for structured clustering (i.e. with '
'explicit connectivity). The algorithm '
'will build the full tree and only '
'retain the lower branches required '
'for the specified number of clusters',
stacklevel=2)
if affinity == 'precomputed':
# for the linkage function of hierarchy to work on precomputed
# data, provide as first argument an ndarray of the shape returned
# by pdist: it is a flat array containing the upper triangular of
# the distance matrix.
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
elif affinity == 'l2':
# Translate to something understood by scipy
affinity = 'euclidean'
elif affinity in ('l1', 'manhattan'):
affinity = 'cityblock'
elif callable(affinity):
X = affinity(X)
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
out = hierarchy.linkage(X, method=linkage, metric=affinity)
children_ = out[:, :2].astype(np.int)
if return_distance:
distances = out[:, 2]
return children_, 1, n_samples, None, distances
return children_, 1, n_samples, None
connectivity = _fix_connectivity(X, connectivity,
n_components=n_components)
connectivity = connectivity.tocoo()
# Put the diagonal to zero
diag_mask = (connectivity.row != connectivity.col)
connectivity.row = connectivity.row[diag_mask]
connectivity.col = connectivity.col[diag_mask]
connectivity.data = connectivity.data[diag_mask]
del diag_mask
# FIXME We compute all the distances, while we could have only computed
# the "interesting" distances
distances = paired_distances(X[connectivity.row],
X[connectivity.col],
metric=affinity)
connectivity.data = distances
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
assert n_clusters <= n_samples
n_nodes = 2 * n_samples - n_clusters
if return_distance:
distances = np.empty(n_nodes - n_samples)
# create inertia heap and connection matrix
A = np.empty(n_nodes, dtype=object)
inertia = list()
# LIL seems to the best format to access the rows quickly,
# without the numpy overhead of slicing CSR indices and data.
connectivity = connectivity.tolil()
# We are storing the graph in a list of IntFloatDict
for ind, (data, row) in enumerate(zip(connectivity.data,
connectivity.rows)):
A[ind] = IntFloatDict(np.asarray(row, dtype=np.intp),
np.asarray(data, dtype=np.float64))
# We keep only the upper triangular for the heap
# Generator expressions are faster than arrays on the following
inertia.extend(_hierarchical.WeightedEdge(d, ind, r)
for r, d in zip(row, data) if r < ind)
del connectivity
heapify(inertia)
# prepare the main fields
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=np.intp)
children = []
# recursive merge loop
for k in xrange(n_samples, n_nodes):
# identify the merge
while True:
edge = heappop(inertia)
if used_node[edge.a] and used_node[edge.b]:
break
i = edge.a
j = edge.b
if return_distance:
# store distances
distances[k - n_samples] = edge.weight
parent[i] = parent[j] = k
children.append((i, j))
# Keep track of the number of elements per cluster
n_i = used_node[i]
n_j = used_node[j]
used_node[k] = n_i + n_j
used_node[i] = used_node[j] = False
# update the structure matrix A and the inertia matrix
# a clever 'min', or 'max' operation between A[i] and A[j]
coord_col = join_func(A[i], A[j], used_node, n_i, n_j)
for l, d in coord_col:
A[l].append(k, d)
# Here we use the information from coord_col (containing the
# distances) to update the heap
heappush(inertia, _hierarchical.WeightedEdge(d, k, l))
A[k] = coord_col
# Clear A[i] and A[j] to save memory
A[i] = A[j] = 0
# Separate leaves in children (empty lists up to now)
n_leaves = n_samples
# # return numpy array for efficient caching
children = np.array(children)[:, ::-1]
if return_distance:
return children, n_components, n_leaves, parent, distances
return children, n_components, n_leaves, parent
# Matching names to tree-building strategies
def _complete_linkage(*args, **kwargs):
kwargs['linkage'] = 'complete'
return linkage_tree(*args, **kwargs)
def _average_linkage(*args, **kwargs):
kwargs['linkage'] = 'average'
return linkage_tree(*args, **kwargs)
_TREE_BUILDERS = dict(
ward=ward_tree,
complete=_complete_linkage,
average=_average_linkage,
)
###############################################################################
# Functions for cutting hierarchical clustering tree
def _hc_cut(n_clusters, children, n_leaves):
"""Function cutting the ward tree for a given number of clusters.
Parameters
----------
n_clusters : int or ndarray
The number of clusters to form.
children : list of pairs. Length of n_nodes
The children of each non-leaf node. Values less than `n_samples` refer
to leaves of the tree. A greater value `i` indicates a node with
children `children[i - n_samples]`.
n_leaves : int
Number of leaves of the tree.
Returns
-------
labels : array [n_samples]
cluster labels for each point
"""
if n_clusters > n_leaves:
raise ValueError('Cannot extract more clusters than samples: '
'%s clusters where given for a tree with %s leaves.'
% (n_clusters, n_leaves))
# In this function, we store nodes as a heap to avoid recomputing
# the max of the nodes: the first element is always the smallest
# We use negated indices as heaps work on smallest elements, and we
# are interested in largest elements
# children[-1] is the root of the tree
nodes = [-(max(children[-1]) + 1)]
for i in xrange(n_clusters - 1):
# As we have a heap, nodes[0] is the smallest element
these_children = children[-nodes[0] - n_leaves]
# Insert the 2 children and remove the largest node
heappush(nodes, -these_children[0])
heappushpop(nodes, -these_children[1])
label = np.zeros(n_leaves, dtype=np.intp)
for i, node in enumerate(nodes):
label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i
return label
###############################################################################
class AgglomerativeClustering(BaseEstimator, ClusterMixin):
"""
Agglomerative Clustering
Recursively merges the pair of clusters that minimally increases
a given linkage distance.
Parameters
----------
n_clusters : int, default=2
The number of clusters to find.
connectivity : array-like or callable, optional
Connectivity matrix. Defines for each sample the neighboring
samples following a given structure of the data.
This can be a connectivity matrix itself or a callable that transforms
the data into a connectivity matrix, such as derived from
kneighbors_graph. Default is None, i.e, the
hierarchical clustering algorithm is unstructured.
affinity : string or callable, default: "euclidean"
Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
"manhattan", "cosine", or 'precomputed'.
If linkage is "ward", only "euclidean" is accepted.
memory : Instance of joblib.Memory or string (optional)
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
n_components : int (optional)
The number of connected components in the graph defined by the
connectivity matrix. If not set, it is estimated.
compute_full_tree : bool or 'auto' (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. This option is
useful only when specifying a connectivity matrix. Note also that
when varying the number of clusters and using caching, it may
be advantageous to compute the full tree.
linkage : {"ward", "complete", "average"}, optional, default: "ward"
Which linkage criterion to use. The linkage criterion determines which
distance to use between sets of observation. The algorithm will merge
the pairs of cluster that minimize this criterion.
- ward minimizes the variance of the clusters being merged.
- average uses the average of the distances of each observation of
the two sets.
- complete or maximum linkage uses the maximum distances between
all observations of the two sets.
pooling_func : callable, default=np.mean
This combines the values of agglomerated features into a single
value, and should accept an array of shape [M, N] and the keyword
argument ``axis=1``, and reduce it to an array of size [M].
Attributes
----------
labels_ : array [n_samples]
cluster labels for each point
n_leaves_ : int
Number of leaves in the hierarchical tree.
n_components_ : int
The estimated number of connected components in the graph.
children_ : array-like, shape (n_nodes, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
"""
def __init__(self, n_clusters=2, affinity="euclidean",
memory=Memory(cachedir=None, verbose=0),
connectivity=None, n_components=None,
compute_full_tree='auto', linkage='ward',
pooling_func=np.mean):
self.n_clusters = n_clusters
self.memory = memory
self.n_components = n_components
self.connectivity = connectivity
self.compute_full_tree = compute_full_tree
self.linkage = linkage
self.affinity = affinity
self.pooling_func = pooling_func
def fit(self, X):
"""Fit the hierarchical clustering on the data
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The samples a.k.a. observations.
Returns
-------
self
"""
X = check_array(X)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory, verbose=0)
if self.linkage == "ward" and self.affinity != "euclidean":
raise ValueError("%s was provided as affinity. Ward can only "
"work with euclidean distances." %
(self.affinity, ))
if self.linkage not in _TREE_BUILDERS:
raise ValueError("Unknown linkage type %s."
"Valid options are %s" % (self.linkage,
_TREE_BUILDERS.keys()))
tree_builder = _TREE_BUILDERS[self.linkage]
connectivity = self.connectivity
if self.connectivity is not None:
if callable(self.connectivity):
connectivity = self.connectivity(X)
connectivity = check_array(
connectivity, accept_sparse=['csr', 'coo', 'lil'])
n_samples = len(X)
compute_full_tree = self.compute_full_tree
if self.connectivity is None:
compute_full_tree = True
if compute_full_tree == 'auto':
# Early stopping is likely to give a speed up only for
# a large number of clusters. The actual threshold
# implemented here is heuristic
compute_full_tree = self.n_clusters < max(100, .02 * n_samples)
n_clusters = self.n_clusters
if compute_full_tree:
n_clusters = None
# Construct the tree
kwargs = {}
if self.linkage != 'ward':
kwargs['linkage'] = self.linkage
kwargs['affinity'] = self.affinity
self.children_, self.n_components_, self.n_leaves_, parents = \
memory.cache(tree_builder)(X, connectivity,
n_components=self.n_components,
n_clusters=n_clusters,
**kwargs)
# Cut the tree
if compute_full_tree:
self.labels_ = _hc_cut(self.n_clusters, self.children_,
self.n_leaves_)
else:
labels = _hierarchical.hc_get_heads(parents, copy=False)
# copy to avoid holding a reference on the original array
labels = np.copy(labels[:n_samples])
# Reasign cluster numbers
self.labels_ = np.searchsorted(np.unique(labels), labels)
return self
class FeatureAgglomeration(AgglomerativeClustering, AgglomerationTransform):
"""Agglomerate features.
Similar to AgglomerativeClustering, but recursively merges features
instead of samples.
Parameters
----------
n_clusters : int, default 2
The number of clusters to find.
connectivity : array-like or callable, optional
Connectivity matrix. Defines for each feature the neighboring
features following a given structure of the data.
This can be a connectivity matrix itself or a callable that transforms
the data into a connectivity matrix, such as derived from
kneighbors_graph. Default is None, i.e, the
hierarchical clustering algorithm is unstructured.
affinity : string or callable, default "euclidean"
Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
"manhattan", "cosine", or 'precomputed'.
If linkage is "ward", only "euclidean" is accepted.
memory : Instance of joblib.Memory or string, optional
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
n_components : int, optional
The number of connected components in the graph defined by the
connectivity matrix. If not set, it is estimated.
compute_full_tree : bool or 'auto', optional, default "auto"
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of features. This option is
useful only when specifying a connectivity matrix. Note also that
when varying the number of clusters and using caching, it may
be advantageous to compute the full tree.
linkage : {"ward", "complete", "average"}, optional, default "ward"
Which linkage criterion to use. The linkage criterion determines which
distance to use between sets of features. The algorithm will merge
the pairs of cluster that minimize this criterion.
- ward minimizes the variance of the clusters being merged.
- average uses the average of the distances of each feature of
the two sets.
- complete or maximum linkage uses the maximum distances between
all features of the two sets.
pooling_func : callable, default np.mean
This combines the values of agglomerated features into a single
value, and should accept an array of shape [M, N] and the keyword
argument `axis=1`, and reduce it to an array of size [M].
Attributes
----------
labels_ : array-like, (n_features,)
cluster labels for each feature.
n_leaves_ : int
Number of leaves in the hierarchical tree.
n_components_ : int
The estimated number of connected components in the graph.
children_ : array-like, shape (n_nodes, 2)
The children of each non-leaf node. Values less than `n_features`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_features` is a non-leaf
node and has children `children_[i - n_features]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_features + i`
"""
def fit(self, X, y=None, **params):
"""Fit the hierarchical clustering on the data
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The data
Returns
-------
self
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if not (len(X.shape) == 2 and X.shape[0] > 0):
raise ValueError('At least one sample is required to fit the '
'model. A data matrix of shape %s was given.'
% (X.shape, ))
return AgglomerativeClustering.fit(self, X.T, **params)
@property
def fit_predict(self):
raise AttributeError
###############################################################################
# Backward compatibility: class for Ward hierarchical clustering
class Ward(AgglomerativeClustering):
"""Ward hierarchical clustering: constructs a tree and cuts it.
Recursively merges the pair of clusters that minimally increases
within-cluster variance.
Parameters
----------
n_clusters : int or ndarray
The number of clusters to find.
connectivity : sparse matrix (optional)
Connectivity matrix. Defines for each sample the neighboring
samples following a given structure of the data.
Default is None, i.e, the hierarchical clustering algorithm is
unstructured.
memory : Instance of joblib.Memory or string (optional)
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
n_components : int (optional)
The number of connected components in the graph defined by the
connectivity matrix. If not set, it is estimated.
compute_full_tree : bool or 'auto' (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. This option is
useful only when specifying a connectivity matrix. Note also that
when varying the number of clusters and using caching, it may
be advantageous to compute the full tree.
Attributes
----------
labels_ : array [n_features]
cluster labels for each feature
n_leaves_ : int
Number of leaves in the hierarchical tree.
n_components_ : int
The estimated number of connected components in the graph.
children_ : array-like, shape (n_nodes, 2)
The children of each non-leaf node. Values less than `n_samples`
refer to leaves of the tree. A greater value `i` indicates a node with
children `children_[i - n_samples]`.
See also
--------
AgglomerativeClustering : agglomerative hierarchical clustering
"""
linkage = 'ward'
def __init__(self, n_clusters=2, memory=Memory(cachedir=None, verbose=0),
connectivity=None, n_components=None,
compute_full_tree='auto', pooling_func=np.mean):
warnings.warn("The Ward class is deprecated since 0.14 and will be "
"removed in 0.17. Use the AgglomerativeClustering "
"instead.", DeprecationWarning)
self.n_clusters = n_clusters
self.memory = memory
self.n_components = n_components
self.connectivity = connectivity
self.compute_full_tree = compute_full_tree
self.affinity = "euclidean"
self.pooling_func = pooling_func
class WardAgglomeration(AgglomerationTransform, Ward):
"""Feature agglomeration based on Ward hierarchical clustering
Parameters
----------
n_clusters : int or ndarray
The number of clusters.
connectivity : array-like or callable, optional
Connectivity matrix. Defines for each sample the neighboring
samples following a given structure of the data.
This can be a connectivity matrix itself or a callable that transforms
the data into a connectivity matrix, such as derived from
kneighbors_graph. Default is None, i.e, the
hierarchical clustering algorithm is unstructured.
memory : Instance of joblib.Memory or string, optional
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
n_components : int (optional)
The number of connected components in the graph defined by the
connectivity matrix. If not set, it is estimated.
compute_full_tree : bool or 'auto' (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. This option is
useful only when specifying a connectivity matrix. Note also that
when varying the number of cluster and using caching, it may
be advantageous to compute the full tree.
pooling_func : callable, default=np.mean
This combines the values of agglomerated features into a single
value, and should accept an array of shape [M, N] and the keyword
argument `axis=1`, and reduce it to an array of size [M].
Attributes
----------
children_ : array-like, shape (n_nodes, 2)
The children of each non-leaf node. Values less than `n_features`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_features` is a non-leaf
node and has children `children_[i - n_features]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_features + i`
labels_ : array [n_features]
cluster labels for each feature
n_leaves_ : int
Number of leaves in the hierarchical tree.
n_components_ : int
The estimated number of connected components in the graph.
"""
def fit(self, X, y=None, **params):
"""Fit the hierarchical clustering on the data
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The data
Returns
-------
self
"""
X = check_array(X)
return Ward.fit(self, X.T, **params)
@property
def fit_predict(self):
raise AttributeError
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Manila base exception handling.
Includes decorator for re-raising Manila-type exceptions.
SHOULD include dedicated exception logging.
"""
import re
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log
import six
import webob.exc
from manila.i18n import _
LOG = log.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Whether to make exception message format errors fatal.'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
ProcessExecutionError = processutils.ProcessExecutionError
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=400, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class Error(Exception):
pass
class ManilaException(Exception):
"""Base Manila Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, detail_data={}, **kwargs):
self.kwargs = kwargs
self.detail_data = detail_data
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
for k, v in self.kwargs.items():
if isinstance(v, Exception):
self.kwargs[k] = six.text_type(v)
if not message:
try:
message = self.message % kwargs
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception('Exception in string format operation.')
for name, value in kwargs.items():
LOG.error("%(name)s: %(value)s", {
'name': name, 'value': value})
if CONF.fatal_exception_format_errors:
raise
else:
# at least get the core message out if something happened
message = self.message
elif isinstance(message, Exception):
message = six.text_type(message)
if re.match('.*[^\.]\.\.$', message):
message = message[:-1]
self.msg = message
super(ManilaException, self).__init__(message)
class NetworkException(ManilaException):
message = _("Exception due to network failure.")
class NetworkBindException(ManilaException):
message = _("Exception due to failed port status in binding.")
class NetworkBadConfigurationException(NetworkException):
message = _("Bad network configuration: %(reason)s.")
class BadConfigurationException(ManilaException):
message = _("Bad configuration: %(reason)s.")
class NotAuthorized(ManilaException):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges.")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class Conflict(ManilaException):
message = _("%(err)s")
code = 409
class Invalid(ManilaException):
message = _("Unacceptable parameters.")
code = 400
class InvalidRequest(Invalid):
message = _("The request is invalid.")
class InvalidResults(Invalid):
message = _("The results are invalid.")
class InvalidInput(Invalid):
message = _("Invalid input received: %(reason)s.")
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidHost(Invalid):
message = _("Invalid host: %(reason)s")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = _("%(err)s")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
class InvalidDriverMode(Invalid):
message = _("Invalid driver mode: %(driver_mode)s.")
class InvalidAPIVersionString(Invalid):
message = _("API Version String %(version)s is of invalid format. Must "
"be of format MajorNum.MinorNum.")
class VersionNotFoundForAPIMethod(Invalid):
message = _("API version %(version)s is not supported on this method.")
class InvalidGlobalAPIVersion(Invalid):
message = _("Version %(req_ver)s is not supported by the API. Minimum "
"is %(min_ver)s and maximum is %(max_ver)s.")
class InvalidCapacity(Invalid):
message = _("Invalid capacity: %(name)s = %(value)s.")
class NotFound(ManilaException):
message = _("Resource could not be found.")
code = 404
safe = True
class MessageNotFound(NotFound):
message = _("Message %(message_id)s could not be found.")
class Found(ManilaException):
message = _("Resource was found.")
code = 302
safe = True
class InUse(ManilaException):
message = _("Resource is in use.")
class AvailabilityZoneNotFound(NotFound):
message = _("Availability zone %(id)s could not be found.")
class ShareNetworkNotFound(NotFound):
message = _("Share network %(share_network_id)s could not be found.")
class ShareServerNotFound(NotFound):
message = _("Share server %(share_server_id)s could not be found.")
class ShareServerNotFoundByFilters(ShareServerNotFound):
message = _("Share server could not be found by "
"filters: %(filters_description)s.")
class ShareServerInUse(InUse):
message = _("Share server %(share_server_id)s is in use.")
class InvalidShareServer(Invalid):
message = _("Share server %(share_server_id)s is not valid.")
class ShareMigrationError(ManilaException):
message = _("Error in share migration: %(reason)s")
class ShareMigrationFailed(ManilaException):
message = _("Share migration failed: %(reason)s")
class ShareDataCopyFailed(ManilaException):
message = _("Share Data copy failed: %(reason)s")
class ShareDataCopyCancelled(ManilaException):
message = _("Copy of contents from share instance %(src_instance)s "
"to share instance %(dest_instance)s was cancelled.")
class ServiceIPNotFound(ManilaException):
message = _("Service IP for instance not found: %(reason)s")
class AdminIPNotFound(ManilaException):
message = _("Admin port IP for service instance not found: %(reason)s")
class ShareServerNotCreated(ManilaException):
message = _("Share server %(share_server_id)s failed on creation.")
class ShareServerNotReady(ManilaException):
message = _("Share server %(share_server_id)s failed to reach '%(state)s' "
"within %(time)s seconds.")
class ServiceNotFound(NotFound):
message = _("Service %(service_id)s could not be found.")
class ServiceIsDown(Invalid):
message = _("Service %(service)s is down.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler host filter %(filter_name)s could not be found.")
class SchedulerHostWeigherNotFound(NotFound):
message = _("Scheduler host weigher %(weigher_name)s could not be found.")
class HostBinaryNotFound(NotFound):
message = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
message = _("Change would make usage less than 0 for the following "
"resources: %(unders)s.")
class QuotaNotFound(NotFound):
message = _("Quota could not be found.")
class QuotaExists(ManilaException):
message = _("Quota exists for project %(project_id)s, "
"resource %(resource)s.")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class ProjectUserQuotaNotFound(QuotaNotFound):
message = _("Quota for user %(user_id)s in project %(project_id)s "
"could not be found.")
class ProjectShareTypeQuotaNotFound(QuotaNotFound):
message = _("Quota for share_type %(share_type)s in "
"project %(project_id)s could not be found.")
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
message = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
message = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(ManilaException):
message = _("Quota exceeded for resources: %(overs)s.")
class MigrationNotFound(NotFound):
message = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
message = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class MigrationError(ManilaException):
message = _("Migration error: %(reason)s.")
class MalformedRequestBody(ManilaException):
message = _("Malformed message body: %(reason)s.")
class ConfigNotFound(NotFound):
message = _("Could not find config at %(path)s.")
class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s.")
class NoValidHost(ManilaException):
message = _("No valid host was found. %(reason)s.")
class WillNotSchedule(ManilaException):
message = _("Host %(host)s is not up or doesn't exist.")
class QuotaError(ManilaException):
message = _("Quota exceeded: code=%(code)s.")
code = 413
headers = {'Retry-After': '0'}
safe = True
class ShareSizeExceedsAvailableQuota(QuotaError):
message = _(
"Requested share exceeds allowed project/user or share type "
"gigabytes quota.")
class SnapshotSizeExceedsAvailableQuota(QuotaError):
message = _(
"Requested snapshot exceeds allowed project/user or share type "
"gigabytes quota.")
class ShareLimitExceeded(QuotaError):
message = _(
"Maximum number of shares allowed (%(allowed)d) either per "
"project/user or share type quota is exceeded.")
class SnapshotLimitExceeded(QuotaError):
message = _(
"Maximum number of snapshots allowed (%(allowed)d) either per "
"project/user or share type quota is exceeded.")
class ShareNetworksLimitExceeded(QuotaError):
message = _("Maximum number of share-networks "
"allowed (%(allowed)d) exceeded.")
class ShareGroupsLimitExceeded(QuotaError):
message = _(
"Maximum number of allowed share-groups is exceeded.")
class ShareGroupSnapshotsLimitExceeded(QuotaError):
message = _(
"Maximum number of allowed share-group-snapshots is exceeded.")
class GlusterfsException(ManilaException):
message = _("Unknown Gluster exception.")
class InvalidShare(Invalid):
message = _("Invalid share: %(reason)s.")
class ShareBusyException(Invalid):
message = _("Share is busy with an active task: %(reason)s.")
class InvalidShareInstance(Invalid):
message = _("Invalid share instance: %(reason)s.")
class ManageInvalidShare(InvalidShare):
message = _("Manage existing share failed due to "
"invalid share: %(reason)s")
class UnmanageInvalidShare(InvalidShare):
message = _("Unmanage existing share failed due to "
"invalid share: %(reason)s")
class PortLimitExceeded(QuotaError):
message = _("Maximum number of ports exceeded.")
class ShareAccessExists(ManilaException):
message = _("Share access %(access_type)s:%(access)s exists.")
class ShareSnapshotAccessExists(InvalidInput):
message = _("Share snapshot access %(access_type)s:%(access)s exists.")
class InvalidSnapshotAccess(Invalid):
message = _("Invalid access rule: %(reason)s")
class InvalidShareAccess(Invalid):
message = _("Invalid access rule: %(reason)s")
class InvalidShareAccessLevel(Invalid):
message = _("Invalid or unsupported share access level: %(level)s.")
class ShareBackendException(ManilaException):
message = _("Share backend error: %(msg)s.")
class ExportLocationNotFound(NotFound):
message = _("Export location %(uuid)s could not be found.")
class ShareNotFound(NotFound):
message = _("Share %(share_id)s could not be found.")
class ShareSnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ShareSnapshotInstanceNotFound(NotFound):
message = _("Snapshot instance %(instance_id)s could not be found.")
class ShareSnapshotNotSupported(ManilaException):
message = _("Share %(share_name)s does not support snapshots.")
class ShareGroupSnapshotNotSupported(ManilaException):
message = _("Share group %(share_group)s does not support snapshots.")
class ShareSnapshotIsBusy(ManilaException):
message = _("Deleting snapshot %(snapshot_name)s that has "
"dependent shares.")
class InvalidShareSnapshot(Invalid):
message = _("Invalid share snapshot: %(reason)s.")
class InvalidShareSnapshotInstance(Invalid):
message = _("Invalid share snapshot instance: %(reason)s.")
class ManageInvalidShareSnapshot(InvalidShareSnapshot):
message = _("Manage existing share snapshot failed due to "
"invalid share snapshot: %(reason)s.")
class UnmanageInvalidShareSnapshot(InvalidShareSnapshot):
message = _("Unmanage existing share snapshot failed due to "
"invalid share snapshot: %(reason)s.")
class ShareMetadataNotFound(NotFound):
message = _("Metadata item is not found.")
class InvalidShareMetadata(Invalid):
message = _("Invalid metadata.")
class InvalidShareMetadataSize(Invalid):
message = _("Invalid metadata size.")
class SecurityServiceNotFound(NotFound):
message = _("Security service %(security_service_id)s could not be found.")
class ShareNetworkSecurityServiceAssociationError(ManilaException):
message = _("Failed to associate share network %(share_network_id)s"
" and security service %(security_service_id)s: %(reason)s.")
class ShareNetworkSecurityServiceDissociationError(ManilaException):
message = _("Failed to dissociate share network %(share_network_id)s"
" and security service %(security_service_id)s: %(reason)s.")
class InvalidVolume(Invalid):
message = _("Invalid volume.")
class InvalidShareType(Invalid):
message = _("Invalid share type: %(reason)s.")
class InvalidShareGroupType(Invalid):
message = _("Invalid share group type: %(reason)s.")
class InvalidExtraSpec(Invalid):
message = _("Invalid extra_spec: %(reason)s.")
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class VolumeSnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ShareTypeNotFound(NotFound):
message = _("Share type %(share_type_id)s could not be found.")
class ShareGroupTypeNotFound(NotFound):
message = _("Share group type %(type_id)s could not be found.")
class ShareTypeAccessNotFound(NotFound):
message = _("Share type access not found for %(share_type_id)s / "
"%(project_id)s combination.")
class ShareGroupTypeAccessNotFound(NotFound):
message = _("Share group type access not found for %(type_id)s / "
"%(project_id)s combination.")
class ShareTypeNotFoundByName(ShareTypeNotFound):
message = _("Share type with name %(share_type_name)s "
"could not be found.")
class ShareGroupTypeNotFoundByName(ShareTypeNotFound):
message = _("Share group type with name %(type_name)s "
"could not be found.")
class ShareTypeExtraSpecsNotFound(NotFound):
message = _("Share Type %(share_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class ShareGroupTypeSpecsNotFound(NotFound):
message = _("Share group type %(type_id)s has no group specs with "
"key %(specs_key)s.")
class ShareTypeInUse(ManilaException):
message = _("Share Type %(share_type_id)s deletion is not allowed with "
"shares present with the type.")
class IPAddressInUse(InUse):
message = _("IP address %(ip)s is already used.")
class ShareGroupTypeInUse(ManilaException):
message = _("Share group Type %(type_id)s deletion is not allowed "
"with groups present with the type.")
class ShareTypeExists(ManilaException):
message = _("Share Type %(id)s already exists.")
class ShareTypeDoesNotExist(NotFound):
message = _("Share Type %(share_type)s does not exist.")
class DefaultShareTypeNotConfigured(NotFound):
message = _("No default share type is configured. Either configure a "
"default share type or explicitly specify a share type.")
class ShareGroupTypeExists(ManilaException):
message = _("Share group type %(type_id)s already exists.")
class ShareTypeAccessExists(ManilaException):
message = _("Share type access for %(share_type_id)s / "
"%(project_id)s combination already exists.")
class ShareGroupTypeAccessExists(ManilaException):
message = _("Share group type access for %(type_id)s / "
"%(project_id)s combination already exists.")
class ShareTypeCreateFailed(ManilaException):
message = _("Cannot create share_type with "
"name %(name)s and specs %(extra_specs)s.")
class ShareGroupTypeCreateFailed(ManilaException):
message = _("Cannot create share group type with "
"name %(name)s and specs %(group_specs)s.")
class ManageExistingShareTypeMismatch(ManilaException):
message = _("Manage existing share failed due to share type mismatch: "
"%(reason)s")
class ShareExtendingError(ManilaException):
message = _("Share %(share_id)s could not be extended due to error "
"in the driver: %(reason)s")
class ShareShrinkingError(ManilaException):
message = _("Share %(share_id)s could not be shrunk due to error "
"in the driver: %(reason)s")
class ShareShrinkingPossibleDataLoss(ManilaException):
message = _("Share %(share_id)s could not be shrunk due to "
"possible data loss")
class InstanceNotFound(NotFound):
message = _("Instance %(instance_id)s could not be found.")
class BridgeDoesNotExist(ManilaException):
message = _("Bridge %(bridge)s does not exist.")
class ServiceInstanceException(ManilaException):
message = _("Exception in service instance manager occurred.")
class ServiceInstanceUnavailable(ServiceInstanceException):
message = _("Service instance is not available.")
class StorageResourceException(ManilaException):
message = _("Storage resource exception.")
class StorageResourceNotFound(StorageResourceException):
message = _("Storage resource %(name)s not found.")
code = 404
class SnapshotResourceNotFound(StorageResourceNotFound):
message = _("Snapshot %(name)s not found.")
class SnapshotUnavailable(StorageResourceException):
message = _("Snapshot %(name)s info not available.")
class NetAppException(ManilaException):
message = _("Exception due to NetApp failure.")
class VserverNotFound(NetAppException):
message = _("Vserver %(vserver)s not found.")
class VserverNotSpecified(NetAppException):
message = _("Vserver not specified.")
class EMCVmaxXMLAPIError(Invalid):
message = _("%(err)s")
class EMCVmaxLockRequiredException(ManilaException):
message = _("Unable to acquire lock(s).")
class EMCVmaxInvalidMoverID(ManilaException):
message = _("Invalid mover or vdm %(id)s.")
class EMCVnxXMLAPIError(Invalid):
message = _("%(err)s")
class EMCVnxLockRequiredException(ManilaException):
message = _("Unable to acquire lock(s).")
class EMCVnxInvalidMoverID(ManilaException):
message = _("Invalid mover or vdm %(id)s.")
class EMCUnityError(ShareBackendException):
message = _("%(err)s")
class HPE3ParInvalidClient(Invalid):
message = _("%(err)s")
class HPE3ParInvalid(Invalid):
message = _("%(err)s")
class HPE3ParUnexpectedError(ManilaException):
message = _("%(err)s")
class GPFSException(ManilaException):
message = _("GPFS exception occurred.")
class GPFSGaneshaException(ManilaException):
message = _("GPFS Ganesha exception occurred.")
class GaneshaCommandFailure(ProcessExecutionError):
_description = _("Ganesha management command failed.")
def __init__(self, **kw):
if 'description' not in kw:
kw['description'] = self._description
super(GaneshaCommandFailure, self).__init__(**kw)
class InvalidSqliteDB(Invalid):
message = _("Invalid Sqlite database.")
class SSHException(ManilaException):
message = _("Exception in SSH protocol negotiation or logic.")
class HDFSException(ManilaException):
message = _("HDFS exception occurred!")
class MapRFSException(ManilaException):
message = _("MapRFS exception occurred: %(msg)s")
class ZFSonLinuxException(ManilaException):
message = _("ZFSonLinux exception occurred: %(msg)s")
class QBException(ManilaException):
message = _("Quobyte exception occurred: %(msg)s")
class QBRpcException(ManilaException):
"""Quobyte backend specific exception."""
message = _("Quobyte JsonRpc call to backend raised "
"an exception: %(result)s, Quobyte error"
" code %(qbcode)s")
class SSHInjectionThreat(ManilaException):
message = _("SSH command injection detected: %(command)s")
class HNASBackendException(ManilaException):
message = _("HNAS Backend Exception: %(msg)s")
class HNASConnException(ManilaException):
message = _("HNAS Connection Exception: %(msg)s")
class HNASSSCIsBusy(ManilaException):
message = _("HNAS SSC is busy and cannot execute the command: %(msg)s")
class HNASSSCContextChange(ManilaException):
message = _("HNAS SSC Context has been changed unexpectedly: %(msg)s")
class HNASDirectoryNotEmpty(ManilaException):
message = _("HNAS Directory is not empty: %(msg)s")
class HNASItemNotFoundException(StorageResourceNotFound):
message = _("HNAS Item Not Found Exception: %(msg)s")
class HNASNothingToCloneException(ManilaException):
message = _("HNAS Nothing To Clone Exception: %(msg)s")
# ShareGroup
class ShareGroupNotFound(NotFound):
message = _("Share group %(share_group_id)s could not be found.")
class ShareGroupSnapshotNotFound(NotFound):
message = _(
"Share group snapshot %(share_group_snapshot_id)s could not be found.")
class ShareGroupSnapshotMemberNotFound(NotFound):
message = _("Share group snapshot member %(member_id)s could not be "
"found.")
class InvalidShareGroup(Invalid):
message = _("Invalid share group: %(reason)s")
class InvalidShareGroupSnapshot(Invalid):
message = _("Invalid share group snapshot: %(reason)s")
class DriverNotInitialized(ManilaException):
message = _("Share driver '%(driver)s' not initialized.")
class ShareResourceNotFound(StorageResourceNotFound):
message = _("Share id %(share_id)s could not be found "
"in storage backend.")
class ShareUmountException(ManilaException):
message = _("Failed to unmount share: %(reason)s")
class ShareMountException(ManilaException):
message = _("Failed to mount share: %(reason)s")
class ShareCopyDataException(ManilaException):
message = _("Failed to copy data: %(reason)s")
# Replication
class ReplicationException(ManilaException):
message = _("Unable to perform a replication action: %(reason)s.")
class ShareReplicaNotFound(NotFound):
message = _("Share Replica %(replica_id)s could not be found.")
# Tegile Storage drivers
class TegileAPIException(ShareBackendException):
message = _("Unexpected response from Tegile IntelliFlash API: "
"%(response)s")
class StorageCommunicationException(ShareBackendException):
message = _("Could not communicate with storage array.")
class EvaluatorParseException(ManilaException):
message = _("Error during evaluator parsing: %(reason)s")
# Hitachi Scaleout Platform driver
class HSPBackendException(ShareBackendException):
message = _("HSP Backend Exception: %(msg)s")
class HSPTimeoutException(ShareBackendException):
message = _("HSP Timeout Exception: %(msg)s")
class HSPItemNotFoundException(ShareBackendException):
message = _("HSP Item Not Found Exception: %(msg)s")
class NexentaException(ShareBackendException):
message = _("Exception due to Nexenta failure. %(reason)s")
# Tooz locking
class LockCreationFailed(ManilaException):
message = _('Unable to create lock. Coordination backend not started.')
class LockingFailed(ManilaException):
message = _('Lock acquisition failed.')
# Ganesha library
class GaneshaException(ManilaException):
message = _("Unknown NFS-Ganesha library exception.")
|
|
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handling of block device information and mapping.
This module contains helper methods for interpreting the block
device information and determining the suitable mapping to
guest devices and libvirt XML.
Throughout these methods there are a number of standard
variables / types used
* 'mapping': a dict contains the storage device mapping.
For the default disk types it will contain the following
keys & values:
'disk' -> disk_info
'disk.rescue' -> disk_info
'disk.local' -> disk_info
'disk.swap' -> disk_info
'disk.config' -> disk_info
If any of the default disks are overridden by the block
device info mappings, the hash value will be None
For any ephemeral device there will also be a dict entry
'disk.eph$NUM' -> disk_info
For any volume device there will also be a dict entry:
$path -> disk_info
Finally a special key will refer to the root device:
'root' -> disk_info
* 'disk_info': a tuple specifying disk configuration
It contains the following 3 fields
(disk bus, disk dev, device type)
and possibly these optional fields: ('format',)
* 'disk_bus': the guest bus type ('ide', 'virtio', 'scsi', etc)
* 'disk_dev': the device name 'vda', 'hdc', 'sdf', 'xvde' etc
* 'device_type': type of device eg 'disk', 'cdrom', 'floppy'
* 'format': Which format to apply to the device if applicable
* 'boot_index': Number designating the boot order of the device
"""
import itertools
import operator
from oslo.config import cfg
from nova import block_device
from nova.compute import arch
from nova import exception
from nova.i18n import _
from nova.objects import base as obj_base
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import driver
from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
SUPPORTED_DEVICE_TYPES = ('disk', 'cdrom', 'floppy', 'lun')
BOOT_DEV_FOR_TYPE = {'disk': 'hd', 'cdrom': 'cdrom', 'floppy': 'fd'}
def has_disk_dev(mapping, disk_dev):
"""Determine if a disk device name has already been used.
Looks at all the keys in mapping to see if any
corresponding disk_info tuple has a device name
matching disk_dev
Returns True if the disk_dev is in use.
"""
for disk in mapping:
info = mapping[disk]
if info['dev'] == disk_dev:
return True
return False
def get_dev_prefix_for_disk_bus(disk_bus):
"""Determine the dev prefix for a disk bus.
Determine the dev prefix to be combined
with a disk number to fix a disk_dev.
eg 'hd' for 'ide' bus can be used to
form a disk dev 'hda'
Returns the dev prefix or raises an
exception if the disk bus is unknown.
"""
if CONF.libvirt.disk_prefix:
return CONF.libvirt.disk_prefix
if disk_bus == "ide":
return "hd"
elif disk_bus == "virtio":
return "vd"
elif disk_bus == "xen":
# Two possible mappings for Xen, xvda or sda
# which are interchangeable, so we pick sda
return "sd"
elif disk_bus == "scsi":
return "sd"
elif disk_bus == "usb":
return "sd"
elif disk_bus == "fdc":
return "fd"
elif disk_bus == "uml":
return "ubd"
elif disk_bus == "lxc":
return None
else:
raise exception.NovaException(
_("Unable to determine disk prefix for %s") %
disk_bus)
def get_dev_count_for_disk_bus(disk_bus):
"""Determine the number disks supported.
Determine how many disks can be supported in
a single VM for a particular disk bus.
Returns the number of disks supported.
"""
if disk_bus == "ide":
return 4
else:
return 26
def find_disk_dev_for_disk_bus(mapping, bus, last_device=False):
"""Identify a free disk dev name for a bus.
Determines the possible disk dev names for
the bus, and then checks them in order until
it identifies one that is not yet used in the
disk mapping. If 'last_device' is set, it will
only consider the last available disk dev name.
Returns the chosen disk_dev name, or raises an
exception if none is available.
"""
dev_prefix = get_dev_prefix_for_disk_bus(bus)
if dev_prefix is None:
return None
max_dev = get_dev_count_for_disk_bus(bus)
if last_device:
devs = [max_dev - 1]
else:
devs = range(max_dev)
for idx in devs:
disk_dev = dev_prefix + chr(ord('a') + idx)
if not has_disk_dev(mapping, disk_dev):
return disk_dev
raise exception.NovaException(
_("No free disk device names for prefix '%s'"),
dev_prefix)
def is_disk_bus_valid_for_virt(virt_type, disk_bus):
valid_bus = {
'qemu': ['virtio', 'scsi', 'ide', 'usb', 'fdc'],
'kvm': ['virtio', 'scsi', 'ide', 'usb', 'fdc'],
'xen': ['xen', 'ide'],
'uml': ['uml'],
'lxc': ['lxc'],
}
if virt_type not in valid_bus:
raise exception.UnsupportedVirtType(virt=virt_type)
return disk_bus in valid_bus[virt_type]
def get_disk_bus_for_device_type(virt_type,
image_meta=None,
device_type="disk"):
"""Determine the best disk bus to use for a device type.
Considering the currently configured virtualization
type, return the optimal disk_bus to use for a given
device type. For example, for a disk on KVM it will
return 'virtio', while for a CDROM it will return 'ide'
on x86_64 and 'scsi' on ppc64.
Returns the disk_bus, or returns None if the device
type is not supported for this virtualization
"""
# Prefer a disk bus set against the image first of all
if image_meta:
key = "hw_" + device_type + "_bus"
disk_bus = image_meta.get('properties', {}).get(key)
if disk_bus is not None:
if not is_disk_bus_valid_for_virt(virt_type, disk_bus):
raise exception.UnsupportedHardware(model=disk_bus,
virt=virt_type)
return disk_bus
# Otherwise pick a hypervisor default disk bus
if virt_type == "uml":
if device_type == "disk":
return "uml"
elif virt_type == "lxc":
return "lxc"
elif virt_type == "xen":
if device_type == "cdrom":
return "ide"
elif device_type == "disk":
return "xen"
elif virt_type in ("qemu", "kvm"):
if device_type == "cdrom":
guestarch = libvirt_utils.get_arch(image_meta)
if guestarch in (arch.PPC, arch.PPC64):
return "scsi"
else:
return "ide"
elif device_type == "disk":
return "virtio"
elif device_type == "floppy":
return "fdc"
return None
def get_disk_bus_for_disk_dev(virt_type, disk_dev):
"""Determine the disk bus for a disk device.
Given a disk device like 'hda', 'sdf', 'xvdb', etc
guess what the most appropriate disk bus is for
the currently configured virtualization technology
Returns the disk bus, or raises an Exception if
the disk device prefix is unknown.
"""
if disk_dev[:2] == 'hd':
return "ide"
elif disk_dev[:2] == 'sd':
# Reverse mapping 'sd' is not reliable
# there are many possible mappings. So
# this picks the most likely mappings
if virt_type == "xen":
return "xen"
else:
return "scsi"
elif disk_dev[:2] == 'vd':
return "virtio"
elif disk_dev[:2] == 'fd':
return "fdc"
elif disk_dev[:3] == 'xvd':
return "xen"
elif disk_dev[:3] == 'ubd':
return "uml"
else:
raise exception.NovaException(
_("Unable to determine disk bus for '%s'") %
disk_dev[:1])
def get_next_disk_info(mapping, disk_bus,
device_type='disk',
last_device=False,
boot_index=None):
"""Determine the disk info for the next device on disk_bus.
Considering the disks already listed in the disk mapping,
determine the next available disk dev that can be assigned
for the disk bus.
Returns the disk_info for the next available disk.
"""
disk_dev = find_disk_dev_for_disk_bus(mapping,
disk_bus,
last_device)
info = {'bus': disk_bus,
'dev': disk_dev,
'type': device_type}
if boot_index is not None and boot_index >= 0:
info['boot_index'] = str(boot_index)
return info
def get_eph_disk(index):
return 'disk.eph' + str(index)
def get_config_drive_type():
"""Determine the type of config drive.
If config_drive_format is set to iso9660 then the config drive will
be 'cdrom', otherwise 'disk'.
Returns a string indicating the config drive type.
"""
if CONF.config_drive_format == 'iso9660':
config_drive_type = 'cdrom'
elif CONF.config_drive_format == 'vfat':
config_drive_type = 'disk'
else:
raise exception.ConfigDriveUnknownFormat(
format=CONF.config_drive_format)
return config_drive_type
def get_info_from_bdm(virt_type, image_meta, bdm,
mapping=None, disk_bus=None,
dev_type=None, allowed_types=None,
assigned_devices=None):
mapping = mapping or {}
allowed_types = allowed_types or SUPPORTED_DEVICE_TYPES
device_name = block_device.strip_dev(get_device_name(bdm))
bdm_type = bdm.get('device_type') or dev_type
if bdm_type not in allowed_types:
bdm_type = 'disk'
bdm_bus = bdm.get('disk_bus') or disk_bus
if not is_disk_bus_valid_for_virt(virt_type, bdm_bus):
if device_name:
bdm_bus = get_disk_bus_for_disk_dev(virt_type, device_name)
else:
bdm_bus = get_disk_bus_for_device_type(virt_type, image_meta,
bdm_type)
if not device_name:
if assigned_devices:
padded_mapping = {dev: {'dev': dev} for dev in assigned_devices}
padded_mapping.update(mapping)
else:
padded_mapping = mapping
device_name = find_disk_dev_for_disk_bus(padded_mapping, bdm_bus)
bdm_info = {'bus': bdm_bus,
'dev': device_name,
'type': bdm_type}
bdm_format = bdm.get('guest_format')
if bdm_format:
bdm_info.update({'format': bdm_format})
boot_index = bdm.get('boot_index')
if boot_index is not None and boot_index >= 0:
# NOTE(ndipanov): libvirt starts ordering from 1, not 0
bdm_info['boot_index'] = str(boot_index + 1)
return bdm_info
def get_device_name(bdm):
"""Get the device name if present regardless of the bdm format."""
if isinstance(bdm, obj_base.NovaObject):
return bdm.device_name
else:
return bdm.get('device_name') or bdm.get('mount_device')
def get_root_info(virt_type, image_meta, root_bdm, disk_bus, cdrom_bus,
root_device_name=None):
# NOTE (ndipanov): This is a hack to avoid considering an image
# BDM with local target, as we don't support them
# yet. Only applies when passed non-driver format
no_root_bdm = (not root_bdm or (
root_bdm.get('source_type') == 'image' and
root_bdm.get('destination_type') == 'local'))
if no_root_bdm:
if (image_meta and image_meta.get('disk_format') == 'iso'):
root_device_bus = cdrom_bus
root_device_type = 'cdrom'
else:
root_device_bus = disk_bus
root_device_type = 'disk'
if root_device_name:
root_device_bus = get_disk_bus_for_disk_dev(virt_type,
root_device_name)
else:
root_device_name = find_disk_dev_for_disk_bus({}, root_device_bus)
return {'bus': root_device_bus,
'type': root_device_type,
'dev': block_device.strip_dev(root_device_name),
'boot_index': '1'}
else:
if not get_device_name(root_bdm) and root_device_name:
root_bdm = root_bdm.copy()
root_bdm['device_name'] = root_device_name
return get_info_from_bdm(virt_type, image_meta,
root_bdm, {}, disk_bus)
def default_device_names(virt_type, context, instance, root_device_name,
ephemerals, swap, block_device_mapping,
image_meta):
block_device_info = {
'root_device_name': root_device_name,
'swap': driver_block_device.get_swap(
driver_block_device.convert_swap(swap)),
'ephemerals': driver_block_device.convert_ephemerals(ephemerals),
'block_device_mapping': (
driver_block_device.convert_volumes(
block_device_mapping) +
driver_block_device.convert_snapshots(
block_device_mapping) +
driver_block_device.convert_blanks(
block_device_mapping))
}
get_disk_info(virt_type, instance, image_meta, block_device_info)
for driver_bdm in itertools.chain(block_device_info['ephemerals'],
[block_device_info['swap']] if
block_device_info['swap'] else [],
block_device_info['block_device_mapping']):
driver_bdm.save(context)
def has_default_ephemeral(instance, disk_bus, block_device_info, mapping):
ephemerals = driver.block_device_info_get_ephemerals(block_device_info)
if instance['ephemeral_gb'] <= 0 or ephemerals:
return None
else:
info = get_next_disk_info(mapping, disk_bus)
if block_device.volume_in_mapping(info['dev'], block_device_info):
return None
return info
def update_bdm(bdm, info):
device_name_field = ('device_name'
if 'device_name' in bdm
else 'mount_device')
# Do not update the device name if it was already present
bdm.update(dict(zip((device_name_field,
'disk_bus', 'device_type'),
((bdm.get(device_name_field) or
block_device.prepend_dev(info['dev'])),
info['bus'], info['type']))))
def get_disk_mapping(virt_type, instance,
disk_bus, cdrom_bus,
image_meta,
block_device_info=None,
rescue=False):
"""Determine how to map default disks to the virtual machine.
This is about figuring out whether the default 'disk',
'disk.local', 'disk.swap' and 'disk.config' images have
been overridden by the block device mapping.
Returns the guest disk mapping for the devices.
"""
inst_type = instance.get_flavor()
mapping = {}
pre_assigned_device_names = \
[block_device.strip_dev(get_device_name(bdm)) for bdm in itertools.chain(
driver.block_device_info_get_ephemerals(block_device_info),
[driver.block_device_info_get_swap(block_device_info)],
driver.block_device_info_get_mapping(block_device_info))
if get_device_name(bdm)]
if rescue:
rescue_info = get_next_disk_info(mapping,
disk_bus, boot_index=1)
mapping['disk.rescue'] = rescue_info
mapping['root'] = rescue_info
os_info = get_next_disk_info(mapping,
disk_bus)
mapping['disk'] = os_info
return mapping
# NOTE (ndipanov): root_bdm can be None when we boot from image
# as there is no driver represenation of local targeted images
# and they will not be in block_device_info list.
root_bdm = block_device.get_root_bdm(
driver.block_device_info_get_mapping(block_device_info))
root_device_name = block_device.strip_dev(
driver.block_device_info_get_root(block_device_info))
root_info = get_root_info(virt_type, image_meta, root_bdm,
disk_bus, cdrom_bus, root_device_name)
mapping['root'] = root_info
# NOTE (ndipanov): This implicitly relies on image->local BDMs not
# being considered in the driver layer - so missing
# bdm with boot_index 0 means - use image, unless it was
# overridden. This can happen when using legacy syntax and
# no root_device_name is set on the instance.
if not root_bdm and not block_device.volume_in_mapping(root_info['dev'],
block_device_info):
mapping['disk'] = root_info
default_eph = has_default_ephemeral(instance, disk_bus, block_device_info,
mapping)
if default_eph:
mapping['disk.local'] = default_eph
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
eph_info = get_info_from_bdm(
virt_type, image_meta, eph, mapping, disk_bus,
assigned_devices=pre_assigned_device_names)
mapping[get_eph_disk(idx)] = eph_info
update_bdm(eph, eph_info)
swap = driver.block_device_info_get_swap(block_device_info)
if swap and swap.get('swap_size', 0) > 0:
swap_info = get_info_from_bdm(virt_type, image_meta,
swap, mapping, disk_bus)
mapping['disk.swap'] = swap_info
update_bdm(swap, swap_info)
elif inst_type['swap'] > 0:
swap_info = get_next_disk_info(mapping,
disk_bus)
if not block_device.volume_in_mapping(swap_info['dev'],
block_device_info):
mapping['disk.swap'] = swap_info
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
vol_info = get_info_from_bdm(
virt_type, image_meta, vol, mapping,
assigned_devices=pre_assigned_device_names)
mapping[block_device.prepend_dev(vol_info['dev'])] = vol_info
update_bdm(vol, vol_info)
if configdrive.required_by(instance):
device_type = get_config_drive_type()
disk_bus = get_disk_bus_for_device_type(virt_type,
image_meta,
device_type)
config_info = get_next_disk_info(mapping,
disk_bus,
device_type,
last_device=True)
mapping['disk.config'] = config_info
return mapping
def get_disk_info(virt_type, instance, image_meta,
block_device_info=None, rescue=False):
"""Determine guest disk mapping info.
This is a wrapper around get_disk_mapping, which
also returns the chosen disk_bus and cdrom_bus.
The returned data is in a dict
- disk_bus: the bus for harddisks
- cdrom_bus: the bus for CDROMs
- mapping: the disk mapping
Returns the disk mapping disk.
"""
disk_bus = get_disk_bus_for_device_type(virt_type, image_meta, "disk")
cdrom_bus = get_disk_bus_for_device_type(virt_type, image_meta, "cdrom")
mapping = get_disk_mapping(virt_type, instance,
disk_bus, cdrom_bus,
image_meta,
block_device_info,
rescue)
return {'disk_bus': disk_bus,
'cdrom_bus': cdrom_bus,
'mapping': mapping}
def get_boot_order(disk_info):
boot_mapping = (info for name, info in disk_info['mapping'].iteritems()
if name != 'root' and info.get('boot_index') is not None)
boot_devs_dup = (BOOT_DEV_FOR_TYPE[dev['type']] for dev in
sorted(boot_mapping,
key=operator.itemgetter('boot_index')))
def uniq(lst):
s = set()
return [el for el in lst if el not in s and not s.add(el)]
return uniq(boot_devs_dup)
|
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: lr-model-meta.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='lr-model-meta.proto',
package='com.webank.ai.fate.core.mlmodel.buffer',
syntax='proto3',
serialized_options=_b('B\020LRModelMetaProto'),
serialized_pb=_b('\n\x13lr-model-meta.proto\x12&com.webank.ai.fate.core.mlmodel.buffer\" \n\x0bPredictMeta\x12\x11\n\tthreshold\x18\x01 \x01(\x01\"\xe6\x02\n\x0bLRModelMeta\x12\x0f\n\x07penalty\x18\x01 \x01(\t\x12\x0b\n\x03tol\x18\x02 \x01(\x01\x12\r\n\x05\x61lpha\x18\x03 \x01(\x01\x12\x11\n\toptimizer\x18\x04 \x01(\t\x12\x14\n\x0cparty_weight\x18\x05 \x01(\x01\x12\x12\n\nbatch_size\x18\x06 \x01(\x03\x12\x15\n\rlearning_rate\x18\x07 \x01(\x01\x12\x10\n\x08max_iter\x18\x08 \x01(\x03\x12\x12\n\nearly_stop\x18\t \x01(\t\x12\x1a\n\x12re_encrypt_batches\x18\n \x01(\x03\x12\x15\n\rfit_intercept\x18\x0b \x01(\x08\x12\x18\n\x10need_one_vs_rest\x18\x0c \x01(\x08\x12J\n\rpredict_param\x18\r \x01(\x0b\x32\x33.com.webank.ai.fate.core.mlmodel.buffer.PredictMeta\x12\x17\n\x0freveal_strategy\x18\x0e \x01(\tB\x12\x42\x10LRModelMetaProtob\x06proto3')
)
_PREDICTMETA = _descriptor.Descriptor(
name='PredictMeta',
full_name='com.webank.ai.fate.core.mlmodel.buffer.PredictMeta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='threshold', full_name='com.webank.ai.fate.core.mlmodel.buffer.PredictMeta.threshold', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=63,
serialized_end=95,
)
_LRMODELMETA = _descriptor.Descriptor(
name='LRModelMeta',
full_name='com.webank.ai.fate.core.mlmodel.buffer.LRModelMeta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='penalty', full_name='com.webank.ai.fate.core.mlmodel.buffer.LRModelMeta.penalty', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tol', full_name='com.webank.ai.fate.core.mlmodel.buffer.LRModelMeta.tol', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alpha', full_name='com.webank.ai.fate.core.mlmodel.buffer.LRModelMeta.alpha', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='optimizer', full_name='com.webank.ai.fate.core.mlmodel.buffer.LRModelMeta.optimizer', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='party_weight', full_name='com.webank.ai.fate.core.mlmodel.buffer.LRModelMeta.party_weight', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='com.webank.ai.fate.core.mlmodel.buffer.LRModelMeta.batch_size', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='learning_rate', full_name='com.webank.ai.fate.core.mlmodel.buffer.LRModelMeta.learning_rate', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_iter', full_name='com.webank.ai.fate.core.mlmodel.buffer.LRModelMeta.max_iter', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='early_stop', full_name='com.webank.ai.fate.core.mlmodel.buffer.LRModelMeta.early_stop', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='re_encrypt_batches', full_name='com.webank.ai.fate.core.mlmodel.buffer.LRModelMeta.re_encrypt_batches', index=9,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fit_intercept', full_name='com.webank.ai.fate.core.mlmodel.buffer.LRModelMeta.fit_intercept', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='need_one_vs_rest', full_name='com.webank.ai.fate.core.mlmodel.buffer.LRModelMeta.need_one_vs_rest', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='predict_param', full_name='com.webank.ai.fate.core.mlmodel.buffer.LRModelMeta.predict_param', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reveal_strategy', full_name='com.webank.ai.fate.core.mlmodel.buffer.LRModelMeta.reveal_strategy', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=456,
)
_LRMODELMETA.fields_by_name['predict_param'].message_type = _PREDICTMETA
DESCRIPTOR.message_types_by_name['PredictMeta'] = _PREDICTMETA
DESCRIPTOR.message_types_by_name['LRModelMeta'] = _LRMODELMETA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PredictMeta = _reflection.GeneratedProtocolMessageType('PredictMeta', (_message.Message,), {
'DESCRIPTOR' : _PREDICTMETA,
'__module__' : 'lr_model_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.PredictMeta)
})
_sym_db.RegisterMessage(PredictMeta)
LRModelMeta = _reflection.GeneratedProtocolMessageType('LRModelMeta', (_message.Message,), {
'DESCRIPTOR' : _LRMODELMETA,
'__module__' : 'lr_model_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.LRModelMeta)
})
_sym_db.RegisterMessage(LRModelMeta)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
|
import sys
import re;
import time;
import string;
import random;
import math;
import numpy;
import scipy;
import scipy.special;
import scipy.io;
import scipy.sparse;
#import nchar;
import nltk;
import nltk.corpus;
"""
Implements online variational Bayesian for LDA.
"""
class Hybrid:
"""
"""
def __init__(self,
minimum_word_length=3,
maximum_word_length=20,
dict_list=None,
N=3,
word_model_smooth=1e6,
char_list=string.lowercase,
#char_list=string.lowercase + string.digits
ranking_statistics_scale=1e30
):
from nltk.stem.porter import PorterStemmer
self._stemmer = PorterStemmer();
self._minimum_word_length = minimum_word_length;
self._maximum_word_length = maximum_word_length;
self._word_model_smooth = word_model_smooth;
self._char_list = char_list;
self._n_char_model = N;
'''
if dict_list != None:
tokens = [];
for line in open(dict_list, 'r'):
line = line.strip();
if len(line) <= 0:
continue;
#tokens.append(line);
tokens.append(self._stemmer.stem(line));
#tokens = set(tokens);
#self._word_model = nchar.NcharModel(self._n_char_model, tokens, self._word_model_smooth, self._maximum_word_length, self._minimum_word_length, self._char_list);
else:
self._word_model = None;
'''
self._word_model = None;
self._ranking_statistics_scale = ranking_statistics_scale;
self._setting_title = "settings-";
self._param_title = "param-";
self._exp_beta_title = "exp_beta-";
#self._new_word_title = "new_word-";
self._gamma_title = "gamma-";
self._nu_1_title = "nu_1-";
self._nu_2_title = "nu_2-";
self._index_title = "index-";
self._nupos_title = "nupos-";
self._ranking_statistics_title = "ranking-";
self._trace_title = "trace-";
"""
"""
def _initialize(self,
vocab,
number_of_topics,
number_of_documents,
batch_size,
expected_truncation_size,
alpha_theta=1e-2,
alpha_beta=1e6,
tau=1.,
kappa=0.5,
refine_vocab_interval=10,
save_word_trace=False,
ranking_smooth_factor=1e-12,
#gamma_converge_threshold=1e-3,
#number_of_samples=50,
number_of_samples=10,
#burn_in_sweeps=2
burn_in_sweeps=5
):
self._number_of_topics = number_of_topics;
self._number_of_documents = number_of_documents;
self._batch_size = batch_size;
self._word_to_index = {};
self._index_to_word = {};
for word in set(vocab):
self._index_to_word[len(self._index_to_word)] = word;
self._word_to_index[word] = len(self._word_to_index);
vocab = self._index_to_word.keys();
self._new_words = [len(self._index_to_word)];
self._word_trace=None;
if save_word_trace:
self._word_trace = [];
for index in vocab:
self._word_trace.append(numpy.zeros((self._number_of_topics, self._number_of_documents/self._batch_size + 1), dtype='int32') + numpy.iinfo(numpy.int32).max);
self._index_to_nupos = [];
self._nupos_to_index = [];
for k in xrange(self._number_of_topics):
self._index_to_nupos.append(dict());
self._nupos_to_index.append(dict());
random.shuffle(vocab);
for index in vocab:
self._nupos_to_index[k][len(self._nupos_to_index[k])] = index;
self._index_to_nupos[k][index] = len(self._index_to_nupos[k]);
self._alpha_theta = alpha_theta;
self._alpha_beta = alpha_beta;
self._tau = tau;
self._kappa = kappa;
self._truncation_size = [];
self._truncation_size_prime = [];
self._nu_1 = {};
self._nu_2 = {};
for k in xrange(self._number_of_topics):
self._truncation_size.append(len(self._index_to_nupos[k]));
self._truncation_size_prime.append(len(self._index_to_nupos[k]));
self._nu_1[k] = numpy.ones((1, self._truncation_size[k]));
self._nu_2[k] = numpy.ones((1, self._truncation_size[k])) * self._alpha_beta;
self._expected_truncation_size = expected_truncation_size;
#self._gamma_converge_threshold = gamma_converge_threshold;
self._number_of_samples = number_of_samples;
self._burn_in_sweeps = burn_in_sweeps;
assert(self._burn_in_sweeps < self._number_of_samples);
self._ranking_smooth_factor = ranking_smooth_factor;
self._reorder_vocab_interval = refine_vocab_interval;
self._counter = 0;
self._ranking_statistics = [];
for k in xrange(self._number_of_topics):
self._ranking_statistics.append(nltk.probability.FreqDist());
for index in self._index_to_nupos[k]:
#self._ranking_statistics[k].inc(index, self._ranking_smooth_factor);
self._ranking_statistics[k][index] += self._ranking_smooth_factor;
'''
if self._word_model != None:
self._ranking_statistics[k].inc(index, self._word_model.probability(self._index_to_word[index]) * self._ranking_statistics_scale);
else:
self._ranking_statistics[k].inc(index, self._ranking_smooth_factor);
'''
self._document_topic_distribution = None;
if self._word_trace!=None:
self.update_word_trace();
def update_word_trace(self):
if self._counter>self._number_of_documents/self._batch_size:
return;
for topic_index in xrange(self._number_of_topics):
temp_keys = self._ranking_statistics[topic_index].keys();
for word_rank in xrange(len(temp_keys)):
self._word_trace[temp_keys[word_rank]][topic_index, self._counter:] = word_rank+1;
def parse_doc_list(self, docs):
if (type(docs).__name__ == 'str'):
temp = list()
temp.append(docs)
docs = temp
assert self._batch_size == len(docs);
batch_documents = [];
for d in xrange(self._batch_size):
'''
docs[d] = docs[d].lower();
docs[d] = re.sub(r'-', ' ', docs[d]);
docs[d] = re.sub(r'[^a-z ]', '', docs[d]);
docs[d] = re.sub(r'[^a-z0-9 ]', '', docs[d]);
docs[d] = re.sub(r' +', ' ', docs[d]);
words = [];
for word in docs[d].split():
if word in nltk.corpus.stopwords.words('english'):
continue;
word = self._stemmer.stem(word);
if word in nltk.corpus.stopwords.words('english'):
continue;
if len(word)>=self.maximum_word_length or len(word)<=self._minimum_word_length
continue;
words.append(word);
'''
words = [word for word in docs[d].split() if len(word)<=self._maximum_word_length and len(word)>=self._minimum_word_length];
document_topics = numpy.zeros((self._number_of_topics, len(words)));
for word_index in xrange(len(words)):
word = words[word_index];
# valid only if limiting the ranking statistics
if word not in self._word_to_index:
#if this word never appeared before
index = len(self._word_to_index);
self._index_to_word[len(self._index_to_word)] = word;
self._word_to_index[word] = len(self._word_to_index);
if self._word_trace!=None:
self._word_trace.append(numpy.zeros((self._number_of_topics, self._number_of_documents/self._batch_size + 1), dtype='int32') + numpy.iinfo(numpy.int32).max);
for topic in xrange(self._number_of_topics):
#self._ranking_statistics[topic].inc(index, self._ranking_smooth_factor);
self._ranking_statistics[topic][index] += self._ranking_smooth_factor;
else:
index = self._word_to_index[word];
for topic in xrange(self._number_of_topics):
if index not in self._index_to_nupos[topic]:
# if this word is not in current vocabulary
self._nupos_to_index[topic][len(self._nupos_to_index[topic])] = index;
self._index_to_nupos[topic][index] = len(self._index_to_nupos[topic]);
self._truncation_size_prime[topic] += 1;
document_topics[topic, word_index]=self._index_to_nupos[topic][index];
batch_documents.append(document_topics);
if self._word_trace!=None:
self.update_word_trace();
self._new_words.append(len(self._word_to_index));
return batch_documents;
"""
Compute the aggregate digamma values, for phi update.
"""
def compute_exp_weights(self):
exp_weights = {};
exp_oov_weights = {};
for k in xrange(self._number_of_topics):
psi_nu_1_k = scipy.special.psi(self._nu_1[k]);
psi_nu_2_k = scipy.special.psi(self._nu_2[k]);
psi_nu_all_k = scipy.special.psi(self._nu_1[k] + self._nu_2[k]);
aggregate_psi_nu_2_minus_psi_nu_all_k = numpy.cumsum(psi_nu_2_k - psi_nu_all_k, axis=1);
exp_oov_weights[k] = numpy.exp(aggregate_psi_nu_2_minus_psi_nu_all_k[0, -1]);
aggregate_psi_nu_2_minus_psi_nu_all_k = numpy.hstack((numpy.zeros((1, 1)), aggregate_psi_nu_2_minus_psi_nu_all_k[:, :-1]));
assert(aggregate_psi_nu_2_minus_psi_nu_all_k.shape==psi_nu_1_k.shape);
exp_weights[k] = numpy.exp(psi_nu_1_k - psi_nu_all_k + aggregate_psi_nu_2_minus_psi_nu_all_k);
return exp_weights, exp_oov_weights;
"""
"""
def e_step(self, wordids, directory=None):
batch_size = len(wordids);
sufficient_statistics = {};
for k in xrange(self._number_of_topics):
sufficient_statistics[k] = numpy.zeros((1, self._truncation_size_prime[k]));
batch_document_topic_distribution = numpy.zeros((batch_size, self._number_of_topics));
#batch_document_topic_distribution = scipy.sparse.dok_matrix((batch_size, self._number_of_topics), dtype='int16');
#log_likelihood = 0;
exp_weights, exp_oov_weights = self.compute_exp_weights();
# Now, for each document document_index update that document's phi_d for every words
for document_index in xrange(batch_size):
phi = numpy.random.random(wordids[document_index].shape);
phi = phi / numpy.sum(phi, axis=0)[numpy.newaxis, :];
phi_sum = numpy.sum(phi, axis=1)[:, numpy.newaxis];
#assert(phi_sum.shape == (self.number_of_topics, 1));
for sample_index in xrange(self._number_of_samples):
for word_index in xrange(wordids[document_index].shape[1]):
phi_sum -= phi[:, word_index][:, numpy.newaxis];
# this is to get rid of the underflow error from the above summation, ideally, phi will become all integers after few iterations
phi_sum *= phi_sum > 0;
#assert(numpy.all(phi_sum >= 0));
temp_phi = phi_sum + self._alpha_theta;
#assert(temp_phi.shape == (self.number_of_topics, 1));
for k in xrange(self._number_of_topics):
id = wordids[document_index][k, word_index];
if id >= self._truncation_size[k]:
# if this word is an out-of-vocabulary term
temp_phi[k, 0] *= exp_oov_weights[k];
else:
# if this word is inside current vocabulary
temp_phi[k, 0] *= exp_weights[k][0, id];
temp_phi /= numpy.sum(temp_phi);
#assert(temp_phi.shape == (self.number_of_topics, 1));
# sample a topic for this word
temp_phi = temp_phi.T[0];
temp_phi = numpy.random.multinomial(1, temp_phi)[:, numpy.newaxis];
#assert(temp_phi.shape == (self.number_of_topics, 1));
phi[:, word_index][:, numpy.newaxis] = temp_phi;
phi_sum += temp_phi;
#assert(numpy.all(phi_sum >= 0));
# discard the first few burn-in sweeps
if sample_index >= self._burn_in_sweeps:
for k in xrange(self._number_of_topics):
id = wordids[document_index][k, word_index];
sufficient_statistics[k][0, id] += temp_phi[k, 0];
batch_document_topic_distribution[document_index, :] = self._alpha_theta + phi_sum.T[0, :];
for k in xrange(self._number_of_topics):
sufficient_statistics[k] /= (self._number_of_samples - self._burn_in_sweeps);
return sufficient_statistics, batch_document_topic_distribution;
"""
"""
def m_step(self, batch_size, sufficient_statistics, close_form_updates=False):
#sufficient_statistics = self.sort_sufficient_statistics(sufficient_statistics);
reverse_cumulated_phi = {};
for k in xrange(self._number_of_topics):
reverse_cumulated_phi[k] = self.reverse_cumulative_sum_matrix_over_axis(sufficient_statistics[k], 1);
if close_form_updates:
self._nu_1 = 1 + sufficient_statistics;
self._nu_2 = self._alpha_beta + reverse_cumulated_phi;
else:
# Epsilon will be between 0 and 1, and says how much to weight the information we got from this mini-batch.
self._epsilon = pow(self._tau + self._counter, -self._kappa);
self.update_accumulate_sufficient_statistics(sufficient_statistics);
for k in xrange(self._number_of_topics):
if self._truncation_size[k] < self._truncation_size_prime[k]:
self._nu_1[k] = numpy.append(self._nu_1[k], numpy.ones((1, self._truncation_size_prime[k] - self._truncation_size[k])), 1);
self._nu_2[k] = numpy.append(self._nu_2[k], numpy.ones((1, self._truncation_size_prime[k] - self._truncation_size[k])), 1);
self._truncation_size[k] = self._truncation_size_prime[k];
self._nu_1[k] += self._epsilon * (self._number_of_documents / batch_size * sufficient_statistics[k] + 1 - self._nu_1[k]);
self._nu_2[k] += self._epsilon * (self._alpha_beta + self._number_of_documents / batch_size * reverse_cumulated_phi[k] - self._nu_2[k]);
"""
"""
def update_accumulate_sufficient_statistics(self, sufficient_statistics):
for k in xrange(self._number_of_topics):
for index in self._index_to_word:
#self._ranking_statistics[k].inc(index, -self._epsilon*self._ranking_statistics[k][index]);
self._ranking_statistics[k][index] += -self._epsilon*self._ranking_statistics[k][index];
for index in self._index_to_nupos[k]:
if self._word_model != None:
adjustment = self._word_model.probability(self._index_to_word[index]) * self._ranking_statistics_scale;
else:
adjustment = 1.;
#self._ranking_statistics[k].inc(index, self._epsilon*adjustment*sufficient_statistics[k][0, self._index_to_nupos[k][index]]);
self._ranking_statistics[k][index] += self._epsilon*adjustment*sufficient_statistics[k][0, self._index_to_nupos[k][index]];
"""
"""
def prune_vocabulary(self):
# Re-order the nu values
new_index_to_nupos = [];
new_nupos_to_index = [];
new_nu_1 = {};
new_nu_2 = {};
for k in xrange(self._number_of_topics):
if len(self._index_to_nupos[k]) < self._expected_truncation_size:
new_nu_1[k] = numpy.zeros((1, len(self._index_to_nupos[k])));
new_nu_2[k] = numpy.zeros((1, len(self._index_to_nupos[k])));
else:
new_nu_1[k] = numpy.zeros((1, self._expected_truncation_size));
new_nu_2[k] = numpy.zeros((1, self._expected_truncation_size));
new_index_to_nupos.append(dict());
new_nupos_to_index.append(dict());
for index in self._ranking_statistics[k].keys():
if len(new_index_to_nupos[k])>=min(self._index_to_nupos[k], self._expected_truncation_size):
break;
#if index in words_to_keep and index in self._index_to_nupos[k].keys():
new_nupos_to_index[k][len(new_index_to_nupos[k])] = index;
new_index_to_nupos[k][index] = len(new_index_to_nupos[k]);
# TODO: verify with jordan
if index not in self._index_to_nupos[k]:
# TODO: this statement is never reached.
new_nu_1[k][0, new_index_to_nupos[k][index]] = 1;
new_nu_2[k][0, new_index_to_nupos[k][index]] = 1;
else:
new_nu_1[k][0, new_index_to_nupos[k][index]] = self._nu_1[k][0, self._index_to_nupos[k][index]];
new_nu_2[k][0, new_index_to_nupos[k][index]] = self._nu_2[k][0, self._index_to_nupos[k][index]];
self._truncation_size[k] = len(new_index_to_nupos[k]);
self._truncation_size_prime[k] = self._truncation_size[k];
self._index_to_nupos = new_index_to_nupos;
self._nupos_to_index = new_nupos_to_index;
self._nu_1 = new_nu_1;
self._nu_2 = new_nu_2;
"""
"""
def learning(self, batch):
self._counter += 1;
# This is to handle the case where someone just hands us a single document, not in a list.
if (type(batch).__name__ == 'string'):
temp = list();
temp.append(batch);
batch = temp;
batch_size = len(batch);
# Parse the document mini-batch
clock = time.time();
wordids = self.parse_doc_list(batch);
clock_p_step = time.time() - clock;
# E-step: hybrid approach, sample empirical topic assignment
clock = time.time();
sufficient_statistics, batch_document_topic_distribution = self.e_step(wordids);
clock_e_step = time.time() - clock;
# M-step: online variational inference
clock = time.time();
self.m_step(batch_size, sufficient_statistics);
if self._counter % self._reorder_vocab_interval==0:
self.prune_vocabulary();
clock_m_step = time.time() - clock;
print 'P-step, E-step and M-step take %d, %d, %d seconds respectively...' % (clock_p_step, clock_e_step, clock_m_step);
return batch_document_topic_distribution;
"""
"""
def reverse_cumulative_sum_matrix_over_axis(self, matrix, axis):
cumulative_sum = numpy.zeros(matrix.shape);
(k, n) = matrix.shape;
if axis == 1:
for j in xrange(n - 2, -1, -1):
cumulative_sum[:, j] = cumulative_sum[:, j + 1] + matrix[:, j + 1];
elif axis == 0:
for i in xrange(k - 2, -1, -1):
cumulative_sum[i, :] = cumulative_sum[i + 1, :] + matrix[i + 1, :];
return cumulative_sum;
def export_beta(self, exp_beta_path, top_display=-1):
exp_weights, exp_oov_weights = self.compute_exp_weights();
output = open(exp_beta_path, 'w');
for k in xrange(self._number_of_topics):
output.write("==========\t%d\t==========\n" % (k));
freqdist = nltk.probability.FreqDist();
freqdist.clear();
for index in self._index_to_nupos[k]:
#freqdist.inc(index, exp_weights[k][0, self._index_to_nupos[k][index]]);
freqdist[index]+=exp_weights[k][0, self._index_to_nupos[k][index]]
i = 0;
for key in freqdist.keys():
i += 1;
output.write(self._index_to_word[key] + "\t" + str(freqdist[key]) + "\n");
if top_display>0 and i>=top_display:
break
output.close();
def export_model_checkpoint(self, directory='../output/tmp/'):
if not directory.endswith('/'):
directory += "/";
directory += self._setting_title;
param_path = directory + self._param_title + str(self._counter);
self.export_parameters(param_path);
index_path = directory + self._index_title + str(self._counter);
self.export_word_index(index_path);
nupos_path = directory + self._nupos_title + str(self._counter);
self.export_index_nupos(nupos_path);
nu_1_path = directory + self._nu_1_title + str(self._counter);
nu_2_path = directory + self._nu_2_title + str(self._counter);
self.export_nu(nu_1_path, nu_2_path);
ranking_path = directory + self._ranking_statistics_title + str(self._counter);
self.export_ranking_statistics(ranking_path);
if self._word_trace!=None:
trace_path = directory + self._trace_title + str(self._counter);
self.export_word_trace(trace_path);
# TODO: add in counter
def import_model_checkpoint(self, directory='../output/tmp/', counter=0):
if not directory.endswith('/'):
directory += "/";
directory += self._setting_title;
self.import_parameters(directory, counter);
self.import_word_index(directory, counter);
self.import_ranking_statistics(directory, counter);
self.import_nu(directory, counter);
self.import_index_nupos(directory, counter);
self.import_word_trace(directory, counter);
def export_parameters(self, settings_path):
settings_output = open(settings_path, 'w');
settings_output.write("alpha_theta=" + str(self._alpha_theta) + "\n");
settings_output.write("alpha_beta=" + str(self._alpha_beta) + "\n");
settings_output.write("tau0=" + str(self._tau) + "\n");
settings_output.write("kappa=" + str(self._kappa) + "\n");
settings_output.write("number_of_documents=" + str(self._number_of_documents) + "\n");
settings_output.write("number_of_topics=" + str(self._number_of_topics) + "\n");
settings_output.write("desired_truncation_level=" + str(self._expected_truncation_size) + "\n");
settings_output.write("vocab_prune_interval=" + str(self._reorder_vocab_interval) + "\n");
settings_output.write("batch_size=" + str(self._batch_size) + "\n");
settings_output.write("number_of_samples=" + str(self._number_of_samples) + "\n");
settings_output.write("burn_in_sweeps=" + str(self._burn_in_sweeps) + "\n");
settings_output.write("ranking_smooth_factor=" + str(self._ranking_smooth_factor) + "\n");
settings_output.write("ranking_statistics_scale=" + str(self._ranking_statistics_scale) + "\n");
settings_output.write("counter=" + str(self._counter) + "\n");
settings_output.write("truncation_level=");
settings_output.write(" ".join([str(truncation_level) for truncation_level in self._truncation_size]) + "\n");
settings_output.write("truncation_level_prime=");
settings_output.write(" ".join([str(truncation_level) for truncation_level in self._truncation_size_prime]) + "\n");
settings_output.close();
def import_parameters(self, settings_path):
settings_input = open(settings_path, 'r');
self._alpha_theta = float(settings_input.readline().split('=')[1])
self._alpha_beta = float(settings_input.readline().split('=')[1])
self._tau = float(settings_input.readline().split('=')[1])
self._kappa = float(settings_input.readline().split('=')[1])
self._number_of_documents = int(settings_input.readline().split('=')[1])
self._number_of_topics = int(settings_input.readline().split('=')[1])
self._expected_truncation_size = int(settings_input.readline().split('=')[1])
self._reorder_vocab_interval = int(settings_input.readline().split('=')[1]);
self._batch_size = int(settings_input.readline().split('=')[1]);
#self._gamma_converge_threshold = float(settings_input.readline().split('=')[1]);
self._number_of_samples = int(settings_input.readline().split('=')[1]);
self._burn_in_sweeps = int(settings_input.readline().split('=')[1]);
self._ranking_smooth_factor = float(settings_input.readline().split('=')[1]);
self._ranking_statistics_scale = float(settings_input.readline().split('=')[1]);
self._counter = int(settings_input.readline().split('=')[1]);
self._epsilon = pow(self._tau + self._counter, -self._kappa);
self._truncation_size = [];
#assert settings_input.readline().strip()=="truncation_level=";
truncation = settings_input.readline().strip().split('=')[1];
truncation = truncation.split();
assert len(truncation)==self._number_of_topics
for value in truncation:
self._truncation_size.append(int(value));
#self._truncation_size[k] = int(truncation[k]);
assert len(self._truncation_size)==self._number_of_topics;
self._truncation_size_prime = [];
#assert settings_input.readline().strip()=="truncation_level_prime=";
truncation_prime = settings_input.readline().strip().split('=')[1];
truncation_prime = truncation_prime.split();
assert len(truncation_prime)==self._number_of_topics;
for value in truncation_prime:
self._truncation_size_prime.append(int(value));
#self._truncation_size_prime[k] = int(truncation_prime[k]);
assert len(self._truncation_size_prime)==self._number_of_topics;
def export_word_index(self, word_index_path):
settings_output = open(word_index_path, 'w');
settings_output.write(" ".join([str(value) for value in self._new_words]) + "\n");
for index in xrange(len(self._index_to_word)):
settings_output.write("%s\n" % self._index_to_word[index]);
settings_output.close();
def import_word_index(self, word_index_path):
settings_input = open(word_index_path, 'r');
self._new_words = [int(value) for value in settings_input.readline().strip().split()];
self._index_to_word = {};
self._word_to_index = {};
for line in settings_input:
line = line.strip();
self._index_to_word[len(self._index_to_word)] = line;
self._word_to_index[line] = len(self._word_to_index);
def export_index_nupos(self, index_nupos_path):
settings_output = open(index_nupos_path, 'w');
for k in xrange(self._number_of_topics):
#settings_output.write(str(k) + "\t");
for nupos in self._nupos_to_index[k]:
settings_output.write(" %d=%d" % (nupos, self._nupos_to_index[k][nupos]));
settings_output.write("\n");
settings_output.close();
def import_index_nupos(self, index_nupos_path):
settings_input = open(index_nupos_path, 'r');
self._index_to_nupos = [];
self._nupos_to_index = [];
for k in xrange(self._number_of_topics):
self._index_to_nupos.append(dict());
self._nupos_to_index.append(dict());
nuposes = settings_input.readline().split();
#assert nuposes[0] == str(k);
for token in nuposes:
tokens = token.split('=');
self._nupos_to_index[k][int(tokens[0])] = int(tokens[1]);
self._index_to_nupos[k][int(tokens[1])] = int(tokens[0]);
def export_nu(self, nu_1_path, nu_2_path):
settings_output = open(nu_1_path, 'w');
for k in xrange(self._number_of_topics):
#settings_output.write("nu_1 %d\n" % (k));
for row in self._nu_1[k]:
settings_output.write(" ".join([str(value) for value in row]) + "\n");
settings_output.close();
settings_output = open(nu_2_path, 'w');
for k in xrange(self._number_of_topics):
#settings_output.write("nu_2 %d\n" % (k));
for row in self._nu_2[k]:
settings_output.write(" ".join([str(value) for value in row]) + "\n");
settings_output.close();
def import_nu(self, nu_1_path, nu_2_path):
settings_input = open(nu_1_path, 'r');
self._nu_1 = {};
for k in xrange(self._number_of_topics):
nu_1_tokens = settings_input.readline().split();
self._nu_1[k] = numpy.zeros((1, self._truncation_size[k]));
count = 0;
for value in nu_1_tokens:
self._nu_1[k][0, count] = float(value);
count += 1;
settings_input = open(nu_2_path, 'r');
self._nu_2 = {};
for k in xrange(self._number_of_topics):
nu_2_tokens = settings_input.readline().split();
self._nu_2[k] = numpy.zeros((1, self._truncation_size[k]));
count = 0;
for value in nu_2_tokens:
self._nu_2[k][0, count] = float(value);
count += 1;
def export_word_trace(self, word_trace_path):
settings_output = open(word_trace_path, 'w');
settings_output.write("%d\t%d\t%d\n" % (len(self._word_trace), self._number_of_topics, self._number_of_documents/self._batch_size + 1));
for word_trace in self._word_trace:
for row in word_trace:
settings_output.write(" ".join([str(value) for value in row]) + "\n");
settings_output.close();
def import_word_trace(self, word_trace_path):
settings_input = open(word_trace_path, 'r');
self._word_trace = [];
dimensions = settings_input.readline().strip().split();
words_in_total = int(dimensions[0]);
rows = int(dimensions[1]);
cols = int(dimensions[2]);
for index in xrange(words_in_total):
#index = int(settings_input.readline().strip());
#assert index == index;
self._word_trace.append(numpy.zeros((rows, cols), dtype='int32'));
for row_index in xrange(rows):
count=0;
for value in settings_input.readline().strip().split():
self._word_trace[index][row_index, count] = int(value);
count += 1;
def export_ranking_statistics(self, ranking_statistics_path):
settings_output = open(ranking_statistics_path, 'w');
for k in xrange(self._number_of_topics):
#settings_output.write(str(k) + "\t");
for index in self._ranking_statistics[k].keys():
settings_output.write(" %d=%f" % (index, self._ranking_statistics[k][index]));
settings_output.write("\n");
settings_output.close();
def import_ranking_statistics(self, ranking_statistics_path):
settings_input = open(ranking_statistics_path, 'r');
self._ranking_statistics = {};
for k in xrange(self._number_of_topics):
self._ranking_statistics[k] = nltk.probability.FreqDist();
ranking_statistics = settings_input.readline().split();
#assert ranking_statistics[0]==str(k);
for token in ranking_statistics:
tokens = token.split('=');
#self._ranking_statistics[k].inc(int(tokens[0]), float(tokens[1]) + self._ranking_smooth_factor);
self._ranking_statistics[k][int(tokens[0])] += float(tokens[1]) + self._ranking_smooth_factor;
"""
"""
def export_intermediate_gamma(self, directory='../output/tmp/'):
if not directory.endswith('/'):
directory += "/";
if self._counter!=0:
gamma_path = directory + self._gamma_title + str(self._counter) + ".txt";
numpy.savetxt(gamma_path, self._document_topic_distribution);
#scipy.io.mmwrite(gamma_path, self._document_topic_distribution);
self._document_topic_distribution = None;
|
|
"""Test the Kuler Sky lights."""
from unittest.mock import MagicMock, patch
import pykulersky
import pytest
from pytest import approx
from homeassistant import setup
from homeassistant.components.kulersky.const import (
DATA_ADDRESSES,
DATA_DISCOVERY_SUBSCRIPTION,
DOMAIN,
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_MODE,
ATTR_HS_COLOR,
ATTR_RGB_COLOR,
ATTR_RGBW_COLOR,
ATTR_SUPPORTED_COLOR_MODES,
ATTR_XY_COLOR,
COLOR_MODE_RGBW,
SCAN_INTERVAL,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_SUPPORTED_FEATURES,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
import homeassistant.util.dt as dt_util
from tests.common import MockConfigEntry, async_fire_time_changed
@pytest.fixture
async def mock_entry(hass):
"""Create a mock light entity."""
return MockConfigEntry(domain=DOMAIN)
@pytest.fixture
async def mock_light(hass, mock_entry):
"""Create a mock light entity."""
await setup.async_setup_component(hass, "persistent_notification", {})
light = MagicMock(spec=pykulersky.Light)
light.address = "AA:BB:CC:11:22:33"
light.name = "Bedroom"
light.connect.return_value = True
light.get_color.return_value = (0, 0, 0, 0)
with patch(
"homeassistant.components.kulersky.light.pykulersky.discover",
return_value=[light],
):
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
assert light.connect.called
yield light
async def test_init(hass, mock_light):
"""Test platform setup."""
state = hass.states.get("light.bedroom")
assert state.state == STATE_OFF
assert dict(state.attributes) == {
ATTR_FRIENDLY_NAME: "Bedroom",
ATTR_SUPPORTED_COLOR_MODES: [COLOR_MODE_RGBW],
ATTR_SUPPORTED_FEATURES: 0,
}
with patch.object(hass.loop, "stop"):
await hass.async_stop()
await hass.async_block_till_done()
assert mock_light.disconnect.called
async def test_remove_entry(hass, mock_light, mock_entry):
"""Test platform setup."""
assert hass.data[DOMAIN][DATA_ADDRESSES] == {"AA:BB:CC:11:22:33"}
assert DATA_DISCOVERY_SUBSCRIPTION in hass.data[DOMAIN]
await hass.config_entries.async_remove(mock_entry.entry_id)
assert mock_light.disconnect.called
assert DOMAIN not in hass.data
async def test_remove_entry_exceptions_caught(hass, mock_light, mock_entry):
"""Assert that disconnect exceptions are caught."""
mock_light.disconnect.side_effect = pykulersky.PykulerskyException("Mock error")
await hass.config_entries.async_remove(mock_entry.entry_id)
assert mock_light.disconnect.called
async def test_update_exception(hass, mock_light):
"""Test platform setup."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_light.get_color.side_effect = pykulersky.PykulerskyException
await hass.helpers.entity_component.async_update_entity("light.bedroom")
state = hass.states.get("light.bedroom")
assert state is not None
assert state.state == STATE_UNAVAILABLE
async def test_light_turn_on(hass, mock_light):
"""Test KulerSkyLight turn_on."""
mock_light.get_color.return_value = (255, 255, 255, 255)
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.bedroom"},
blocking=True,
)
await hass.async_block_till_done()
mock_light.set_color.assert_called_with(255, 255, 255, 255)
mock_light.get_color.return_value = (50, 50, 50, 50)
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.bedroom", ATTR_BRIGHTNESS: 50},
blocking=True,
)
await hass.async_block_till_done()
mock_light.set_color.assert_called_with(50, 50, 50, 50)
mock_light.get_color.return_value = (50, 25, 13, 6)
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.bedroom", ATTR_RGBW_COLOR: (255, 128, 64, 32)},
blocking=True,
)
await hass.async_block_till_done()
mock_light.set_color.assert_called_with(50, 25, 13, 6)
# RGB color is converted to RGBW by assigning the white component to the white
# channel, see color_rgb_to_rgbw
mock_light.get_color.return_value = (0, 17, 50, 17)
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.bedroom", ATTR_RGB_COLOR: (64, 128, 255)},
blocking=True,
)
await hass.async_block_till_done()
mock_light.set_color.assert_called_with(0, 17, 50, 17)
# HS color is converted to RGBW by assigning the white component to the white
# channel, see color_rgb_to_rgbw
mock_light.get_color.return_value = (50, 41, 0, 50)
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.bedroom", ATTR_HS_COLOR: (50, 50)},
blocking=True,
)
await hass.async_block_till_done()
mock_light.set_color.assert_called_with(50, 41, 0, 50)
async def test_light_turn_off(hass, mock_light):
"""Test KulerSkyLight turn_on."""
mock_light.get_color.return_value = (0, 0, 0, 0)
await hass.services.async_call(
"light",
"turn_off",
{ATTR_ENTITY_ID: "light.bedroom"},
blocking=True,
)
await hass.async_block_till_done()
mock_light.set_color.assert_called_with(0, 0, 0, 0)
async def test_light_update(hass, mock_light):
"""Test KulerSkyLight update."""
utcnow = dt_util.utcnow()
state = hass.states.get("light.bedroom")
assert state.state == STATE_OFF
assert dict(state.attributes) == {
ATTR_FRIENDLY_NAME: "Bedroom",
ATTR_SUPPORTED_COLOR_MODES: [COLOR_MODE_RGBW],
ATTR_SUPPORTED_FEATURES: 0,
}
# Test an exception during discovery
mock_light.get_color.side_effect = pykulersky.PykulerskyException("TEST")
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
state = hass.states.get("light.bedroom")
assert state.state == STATE_UNAVAILABLE
assert dict(state.attributes) == {
ATTR_FRIENDLY_NAME: "Bedroom",
ATTR_SUPPORTED_COLOR_MODES: [COLOR_MODE_RGBW],
ATTR_SUPPORTED_FEATURES: 0,
}
mock_light.get_color.side_effect = None
mock_light.get_color.return_value = (80, 160, 255, 0)
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
state = hass.states.get("light.bedroom")
assert state.state == STATE_ON
assert dict(state.attributes) == {
ATTR_FRIENDLY_NAME: "Bedroom",
ATTR_SUPPORTED_COLOR_MODES: [COLOR_MODE_RGBW],
ATTR_SUPPORTED_FEATURES: 0,
ATTR_COLOR_MODE: COLOR_MODE_RGBW,
ATTR_BRIGHTNESS: 255,
ATTR_HS_COLOR: (approx(212.571), approx(68.627)),
ATTR_RGB_COLOR: (80, 160, 255),
ATTR_RGBW_COLOR: (80, 160, 255, 0),
ATTR_XY_COLOR: (approx(0.17), approx(0.193)),
}
mock_light.get_color.side_effect = None
mock_light.get_color.return_value = (80, 160, 200, 255)
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
state = hass.states.get("light.bedroom")
assert state.state == STATE_ON
assert dict(state.attributes) == {
ATTR_FRIENDLY_NAME: "Bedroom",
ATTR_SUPPORTED_COLOR_MODES: [COLOR_MODE_RGBW],
ATTR_SUPPORTED_FEATURES: 0,
ATTR_COLOR_MODE: COLOR_MODE_RGBW,
ATTR_BRIGHTNESS: 255,
ATTR_HS_COLOR: (approx(199.701), approx(26.275)),
ATTR_RGB_COLOR: (188, 233, 255),
ATTR_RGBW_COLOR: (80, 160, 200, 255),
ATTR_XY_COLOR: (approx(0.259), approx(0.306)),
}
mock_light.get_color.side_effect = None
mock_light.get_color.return_value = (80, 160, 200, 240)
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
state = hass.states.get("light.bedroom")
assert state.state == STATE_ON
assert dict(state.attributes) == {
ATTR_FRIENDLY_NAME: "Bedroom",
ATTR_SUPPORTED_COLOR_MODES: [COLOR_MODE_RGBW],
ATTR_SUPPORTED_FEATURES: 0,
ATTR_COLOR_MODE: COLOR_MODE_RGBW,
ATTR_BRIGHTNESS: 240,
ATTR_HS_COLOR: (approx(200.0), approx(27.059)),
ATTR_RGB_COLOR: (186, 232, 255),
ATTR_RGBW_COLOR: (85, 170, 212, 255),
ATTR_XY_COLOR: (approx(0.257), approx(0.305)),
}
|
|
# pyOCD debugger
# Copyright (c) 2018-2020 Arm Limited
# Copyright (c) 2021 Chris Reed
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import sleep
import logging
from typing import (Callable, Collection, Dict, List, Optional, overload, Sequence, Set, TYPE_CHECKING, Tuple, Union)
from typing_extensions import Literal
from pyocd.probe.pydapaccess.dap_access_api import DAPAccessIntf
from .debug_probe import DebugProbe
from ..core import exceptions
from ..core.plugin import Plugin
from .pydapaccess import DAPAccess
from ..board.mbed_board import MbedBoard
from ..board.board_ids import BOARD_ID_TO_INFO
if TYPE_CHECKING:
from ..board.board import Board
LOG = logging.getLogger(__name__)
TRACE = LOG.getChild("trace")
TRACE.setLevel(logging.CRITICAL)
class CMSISDAPProbe(DebugProbe):
"""@brief Wraps a pydapaccess link as a DebugProbe.
Supports CMSIS-DAP v1 and v2.
"""
# Masks for CMSIS-DAP capabilities.
SWD_CAPABILITY_MASK = 1
JTAG_CAPABILITY_MASK = 2
# Map from DebugProbe protocol types to/from DAPAccess port types.
#
# Note that Protocol.DEFAULT gets mapped to PORT.SWD. We need a concrete port type because some
# non-reference CMSIS-DAP implementations do not accept the default port type.
_PROTOCOL_TO_PORT: Dict[DebugProbe.Protocol, DAPAccess.PORT] = {
DebugProbe.Protocol.DEFAULT: DAPAccess.PORT.SWD,
DebugProbe.Protocol.SWD: DAPAccess.PORT.SWD,
DebugProbe.Protocol.JTAG: DAPAccess.PORT.JTAG,
}
_PORT_TO_PROTOCOL: Dict[DAPAccess.PORT, DebugProbe.Protocol] = {
DAPAccess.PORT.DEFAULT: DebugProbe.Protocol.DEFAULT,
DAPAccess.PORT.SWD: DebugProbe.Protocol.SWD,
DAPAccess.PORT.JTAG: DebugProbe.Protocol.JTAG,
}
# APnDP constants.
DP = 0
AP = 1
# Bitmasks for AP register address fields.
A32 = 0x0000000c
# Map from AP/DP and 2-bit register address to the enums used by pydapaccess.
REG_ADDR_TO_ID_MAP: Dict[Tuple[int, int], DAPAccess.REG] = {
# APnDP A32
( 0, 0x0 ) : DAPAccess.REG.DP_0x0,
( 0, 0x4 ) : DAPAccess.REG.DP_0x4,
( 0, 0x8 ) : DAPAccess.REG.DP_0x8,
( 0, 0xC ) : DAPAccess.REG.DP_0xC,
( 1, 0x0 ) : DAPAccess.REG.AP_0x0,
( 1, 0x4 ) : DAPAccess.REG.AP_0x4,
( 1, 0x8 ) : DAPAccess.REG.AP_0x8,
( 1, 0xC ) : DAPAccess.REG.AP_0xC,
}
## USB VID and PID pair for DAPLink firmware.
DAPLINK_VIDPID = (0x0d28, 0x0204)
@classmethod
def get_all_connected_probes(cls, unique_id: str = None, is_explicit: bool = False) -> Sequence["DebugProbe"]:
try:
return [cls(dev) for dev in DAPAccess.get_connected_devices()]
except DAPAccess.Error as exc:
raise cls._convert_exception(exc) from exc
@classmethod
def get_probe_with_id(cls, unique_id: str, is_explicit: bool = False) -> Optional["DebugProbe"]:
try:
dap_access = DAPAccess.get_device(unique_id)
if dap_access is not None:
return cls(dap_access)
else:
return None
except DAPAccess.Error as exc:
raise cls._convert_exception(exc) from exc
def __init__(self, device: DAPAccessIntf) -> None:
super(CMSISDAPProbe, self).__init__()
self._link = device
self._supported_protocols: List[DebugProbe.Protocol] = []
self._protocol: Optional[DebugProbe.Protocol] = None
self._is_open = False
self._caps: Set[DebugProbe.Capability] = set()
@property
def board_id(self) -> Optional[str]:
"""@brief Unique identifier for the board.
Only board IDs for DAPLink firmware are supported. We can't assume other
CMSIS-DAP firmware is using the same serial number format, so we cannot reliably
extract the board ID.
@return Either a 4-character board ID string, or None if the probe doesn't have a board ID.
"""
if self._link.vidpid == self.DAPLINK_VIDPID:
return self.unique_id[0:4]
else:
return None
@property
def description(self) -> str:
try:
# self.board_id may be None.
board_info = BOARD_ID_TO_INFO[self.board_id]
except KeyError:
return self.vendor_name + " " + self.product_name
else:
return "{0} [{1}]".format(board_info.name, board_info.target)
@property
def vendor_name(self) -> str:
return self._link.vendor_name
@property
def product_name(self) -> str:
return self._link.product_name
@property
def supported_wire_protocols(self) -> Collection[DebugProbe.Protocol]:
"""@brief Only valid after opening."""
return self._supported_protocols
@property
def unique_id(self) -> str:
return self._link.get_unique_id()
@property
def wire_protocol(self) -> Optional[DebugProbe.Protocol]:
return self._protocol
@property
def is_open(self) -> bool:
return self._is_open
@property
def capabilities(self) -> Set[DebugProbe.Capability]:
return self._caps
def create_associated_board(self) -> Optional["Board"]:
assert self.session is not None
# Only support associated Mbed boards for DAPLink firmware. We can't assume other
# CMSIS-DAP firmware is using the same serial number format, so we cannot reliably
# extract the board ID.
if self.board_id is not None:
return MbedBoard(self.session, board_id=self.board_id)
else:
return None
def open(self) -> None:
assert self.session
try:
TRACE.debug("trace: open")
self._link.open()
self._is_open = True
self._link.set_deferred_transfer(self.session.options.get('cmsis_dap.deferred_transfers'))
# Read CMSIS-DAP capabilities
self._capabilities = self._link.identify(DAPAccess.ID.CAPABILITIES)
self._supported_protocols = [DebugProbe.Protocol.DEFAULT]
if self._capabilities & self.SWD_CAPABILITY_MASK:
self._supported_protocols.append(DebugProbe.Protocol.SWD)
if self._capabilities & self.JTAG_CAPABILITY_MASK:
self._supported_protocols.append(DebugProbe.Protocol.JTAG)
self._caps = {
self.Capability.SWJ_SEQUENCE,
self.Capability.BANKED_DP_REGISTERS,
self.Capability.APv2_ADDRESSES,
self.Capability.JTAG_SEQUENCE,
}
if self._link.has_swd_sequence:
self._caps.add(self.Capability.SWD_SEQUENCE)
if self._link.has_swo():
self._caps.add(self.Capability.SWO)
except DAPAccess.Error as exc:
raise self._convert_exception(exc) from exc
def close(self) -> None:
try:
TRACE.debug("trace: close")
self._link.close()
self._is_open = False
except DAPAccess.Error as exc:
raise self._convert_exception(exc) from exc
# ------------------------------------------- #
# Target control functions
# ------------------------------------------- #
def connect(self, protocol: Optional[DebugProbe.Protocol] = None) -> None:
TRACE.debug("trace: connect(%s)", protocol.name if (protocol is not None) else "None")
# Convert protocol to port enum.
#
# We must get a non-default port, since some CMSIS-DAP implementations do not accept the default
# port. Note that the conversion of the default port type is contained in the PORT_MAP dict so it
# is one location.
port = (self._PROTOCOL_TO_PORT.get(protocol)
if protocol else self._PROTOCOL_TO_PORT[DebugProbe.Protocol.DEFAULT])
assert port is not DAPAccess.PORT.DEFAULT
try:
self._link.connect(port)
except DAPAccess.Error as exc:
raise self._convert_exception(exc) from exc
# Read the current mode and save it.
actualMode = self._link.get_swj_mode()
self._protocol = self._PORT_TO_PROTOCOL[actualMode]
def swj_sequence(self, length: int, bits: int) -> None:
TRACE.debug("trace: swj_sequence(length=%i, bits=%x)", length, bits)
try:
self._link.swj_sequence(length, bits)
except DAPAccess.Error as exc:
raise self._convert_exception(exc) from exc
def swd_sequence(self, sequences: Sequence[Union[Tuple[int], Tuple[int, int]]]) -> Tuple[int, Sequence[bytes]]:
TRACE.debug("trace: swd_sequence(sequences=%r)", sequences)
try:
return self._link.swd_sequence(sequences)
except DAPAccess.Error as exc:
raise self._convert_exception(exc) from exc
def jtag_sequence(self, cycles: int, tms: int, read_tdo: bool, tdi: int) -> Optional[int]:
TRACE.debug("trace: jtag_sequence(cycles=%i, tms=%x, read_tdo=%s, tdi=%x)", cycles, tms, read_tdo, tdi)
try:
self._link.jtag_sequence(cycles, tms, read_tdo, tdi)
except DAPAccess.Error as exc:
raise self._convert_exception(exc) from exc
def disconnect(self) -> None:
TRACE.debug("trace: disconnect")
try:
self._link.disconnect()
self._protocol = None
except DAPAccess.Error as exc:
raise self._convert_exception(exc) from exc
def set_clock(self, frequency: float) -> None:
TRACE.debug("trace: set_clock(freq=%i)", frequency)
try:
self._link.set_clock(frequency)
except DAPAccess.Error as exc:
raise self._convert_exception(exc) from exc
def reset(self) -> None:
assert self.session
TRACE.debug("trace: reset")
try:
self._link.assert_reset(True)
sleep(self.session.options.get('reset.hold_time'))
self._link.assert_reset(False)
sleep(self.session.options.get('reset.post_delay'))
except DAPAccess.Error as exc:
raise self._convert_exception(exc) from exc
def assert_reset(self, asserted: bool) -> None:
TRACE.debug("trace: assert_reset(%s)", asserted)
try:
self._link.assert_reset(asserted)
except DAPAccess.Error as exc:
raise self._convert_exception(exc) from exc
def is_reset_asserted(self) -> bool:
try:
result = self._link.is_reset_asserted()
TRACE.debug("trace: is_reset_asserted -> %s", result)
return result
except DAPAccess.Error as exc:
raise self._convert_exception(exc) from exc
def flush(self) -> None:
TRACE.debug("trace: flush")
try:
self._link.flush()
except DAPAccess.Error as exc:
TRACE.debug("trace: error from flush: %r", exc)
raise self._convert_exception(exc) from exc
# ------------------------------------------- #
# DAP Access functions
# ------------------------------------------- #
@overload
def read_dp(self, addr: int) -> int:
...
@overload
def read_dp(self, addr: int, now: Literal[True] = True) -> int:
...
@overload
def read_dp(self, addr: int, now: Literal[False]) -> Callable[[], int]:
...
@overload
def read_dp(self, addr: int, now: bool) -> Union[int, Callable[[], int]]:
...
def read_dp(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]:
reg_id = self.REG_ADDR_TO_ID_MAP[self.DP, addr]
try:
if not now:
TRACE.debug("trace: read_dp(addr=%#010x) -> ...", addr)
result = self._link.read_reg(reg_id, now=now)
except DAPAccess.Error as error:
TRACE.debug("trace: read_dp(addr=%#010x) -> error(%s)", addr, error)
raise self._convert_exception(error) from error
# Read callback returned for async reads.
def read_dp_result_callback():
try:
value = result()
TRACE.debug("trace: ... read_dp(addr=%#010x) -> %#010x", addr, value)
return value
except DAPAccess.Error as error:
TRACE.debug("trace: ... read_dp(addr=%#010x) -> error(%s)", addr, error)
raise self._convert_exception(error) from error
if now:
TRACE.debug("trace: read_dp(addr=%#010x) -> %#010x", addr, result)
return result
else:
return read_dp_result_callback
def write_dp(self, addr: int, data: int) -> None:
reg_id = self.REG_ADDR_TO_ID_MAP[self.DP, addr]
# Write the DP register.
try:
self._link.write_reg(reg_id, data)
TRACE.debug("trace: write_dp(addr=%#010x, data=%#010x)", addr, data)
except DAPAccess.Error as error:
TRACE.debug("trace: write_dp(addr=%#010x, data=%#010x) -> error(%s)", addr, data, error)
raise self._convert_exception(error) from error
@overload
def read_ap(self, addr: int) -> int:
...
@overload
def read_ap(self, addr: int, now: Literal[True] = True) -> int:
...
@overload
def read_ap(self, addr: int, now: Literal[False]) -> Callable[[], int]:
...
@overload
def read_ap(self, addr: int, now: bool) -> Union[int, Callable[[], int]]:
...
def read_ap(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]:
assert isinstance(addr, int)
ap_reg = self.REG_ADDR_TO_ID_MAP[self.AP, (addr & self.A32)]
try:
if not now:
TRACE.debug("trace: read_ap(addr=%#010x) -> ...", addr)
result = self._link.read_reg(ap_reg, now=now)
except DAPAccess.Error as error:
raise self._convert_exception(error) from error
# Read callback returned for async reads.
def read_ap_result_callback():
try:
value = result()
TRACE.debug("trace: ... read_ap(addr=%#010x) -> %#010x", addr, value)
return value
except DAPAccess.Error as error:
TRACE.debug("trace: ... read_ap(addr=%#010x) -> error(%s)", addr, error)
raise self._convert_exception(error) from error
if now:
TRACE.debug("trace: read_ap(addr=%#010x) -> %#010x", addr, result)
return result
else:
return read_ap_result_callback
def write_ap(self, addr: int, data) -> None:
assert isinstance(addr, int)
ap_reg = self.REG_ADDR_TO_ID_MAP[self.AP, (addr & self.A32)]
try:
# Perform the AP register write.
self._link.write_reg(ap_reg, data)
TRACE.debug("trace: write_ap(addr=%#010x, data=%#010x)", addr, data)
except DAPAccess.Error as error:
TRACE.debug("trace: write_ap(addr=%#010x, data=%#010x) -> error(%s)", addr, data, error)
raise self._convert_exception(error) from error
@overload
def read_ap_multiple(self, addr: int, count: int = 1) -> Sequence[int]:
...
@overload
def read_ap_multiple(self, addr: int, count: int, now: Literal[True] = True) -> Sequence[int]:
...
@overload
def read_ap_multiple(self, addr: int, count: int, now: Literal[False]) -> Callable[[], Sequence[int]]:
...
@overload
def read_ap_multiple(self, addr: int, count: int, now: bool) -> Union[Sequence[int], Callable[[], Sequence[int]]]:
...
def read_ap_multiple(self, addr: int, count: int = 1, now: bool = True) \
-> Union[Sequence[int], Callable[[], Sequence[int]]]:
assert isinstance(addr, int)
ap_reg = self.REG_ADDR_TO_ID_MAP[self.AP, (addr & self.A32)]
try:
if not now:
TRACE.debug("trace: read_ap_multi(addr=%#010x, count=%i) -> ...", addr, count)
result = self._link.reg_read_repeat(count, ap_reg, dap_index=0, now=now)
except DAPAccess.Error as exc:
raise self._convert_exception(exc) from exc
# Need to wrap the deferred callback to convert exceptions.
def read_ap_repeat_callback():
try:
values = result()
TRACE.debug("trace: ... read_ap_multi(addr=%#010x, count=%i) -> [%s]", addr, count,
", ".join(["%#010x" % v for v in values]))
return values
except DAPAccess.Error as exc:
TRACE.debug("trace: ... read_ap_multi(addr=%#010x, count=%i) -> error(%s)",
addr, count, exc)
raise self._convert_exception(exc) from exc
if now:
TRACE.debug("trace: read_ap_multi(addr=%#010x, count=%i) -> [%s]", addr, count,
", ".join(["%#010x" % v for v in result]))
return result
else:
return read_ap_repeat_callback
def write_ap_multiple(self, addr: int, values) -> None:
assert isinstance(addr, int)
ap_reg = self.REG_ADDR_TO_ID_MAP[self.AP, (addr & self.A32)]
try:
self._link.reg_write_repeat(len(values), ap_reg, values, dap_index=0)
TRACE.debug("trace: write_ap_multi(addr=%#010x, (%i)[%s])", addr, len(values),
", ".join(["%#010x" % v for v in values]))
except DAPAccess.Error as exc:
TRACE.debug("trace: write_ap_multi(addr=%#010x, (%i)[%s]) -> error(%s)", addr, len(values),
", ".join(["%#010x" % v for v in values]), exc)
raise self._convert_exception(exc) from exc
# ------------------------------------------- #
# SWO functions
# ------------------------------------------- #
def swo_start(self, baudrate: float) -> None:
TRACE.debug("trace: swo_start(baud=%i)", baudrate)
try:
self._link.swo_configure(True, baudrate)
self._link.swo_control(True)
except DAPAccess.Error as exc:
raise self._convert_exception(exc) from exc
def swo_stop(self) -> None:
TRACE.debug("trace: swo_stop")
try:
self._link.swo_configure(False, 0)
except DAPAccess.Error as exc:
raise self._convert_exception(exc) from exc
def swo_read(self) -> bytearray:
try:
data = self._link.swo_read()
TRACE.debug("trace: swo_read -> %i bytes", len(data))
return data
except DAPAccess.Error as exc:
raise self._convert_exception(exc) from exc
@staticmethod
def _convert_exception(exc: Exception) -> Exception:
if isinstance(exc, DAPAccess.TransferFaultError):
return exceptions.TransferFaultError(*exc.args)
elif isinstance(exc, DAPAccess.TransferTimeoutError):
return exceptions.TransferTimeoutError(*exc.args)
elif isinstance(exc, DAPAccess.TransferError):
return exceptions.TransferError(*exc.args)
elif isinstance(exc, (DAPAccess.DeviceError, DAPAccess.CommandError)):
return exceptions.ProbeError(*exc.args)
elif isinstance(exc, DAPAccess.Error):
return exceptions.Error(*exc.args)
else:
return exc
class CMSISDAPProbePlugin(Plugin):
"""@brief Plugin class for CMSISDAPProbe."""
def load(self):
return CMSISDAPProbe
@property
def name(self):
return "cmsisdap"
@property
def description(self):
return "CMSIS-DAP debug probe"
|
|
#!/usr/bin/env python
from __future__ import print_function
"""
test_collate.py
test branching dependencies
"""
import os
import sys
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# funky code to import by file name
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
ruffus_name = os.path.basename(parent_dir)
ruffus = __import__ (ruffus_name)
try:
attrlist = ruffus.__all__
except AttributeError:
attrlist = dir (ruffus)
for attr in attrlist:
if attr[0:2] != "__":
globals()[attr] = getattr (ruffus, attr)
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import unittest
import shutil
try:
from StringIO import StringIO
except:
from io import StringIO
from collections import defaultdict
# use simplejson in place of json for python < 2.6
import json
#try:
# import json
#except ImportError:
# import simplejson
# json = simplejson
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Main logic
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
species_list = defaultdict(list)
species_list["mammals"].append("cow" )
species_list["mammals"].append("horse" )
species_list["mammals"].append("sheep" )
species_list["reptiles"].append("snake" )
species_list["reptiles"].append("lizard" )
species_list["reptiles"].append("crocodile" )
species_list["fish" ].append("pufferfish")
tempdir = "temp_filesre_combine/"
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Tasks
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#
# task1
#
def do_write(file_name, what):
with open(file_name, "a") as oo:
oo.write(what)
test_file = tempdir + "task.done"
@follows(mkdir(tempdir, tempdir + "test"))
@posttask(lambda: do_write(tempdir + "task.done", "Task 1 Done\n"))
@split(None, tempdir + '*.animal')
def prepare_files (no_inputs, outputs):
# cleanup previous
for f in outputs:
os.unlink(f)
for grouping in species_list:
for species_name in species_list[grouping]:
filename = tempdir + "%s.%s.animal" % (species_name, grouping)
with open(filename, "w") as oo:
oo.write(species_name + "\n")
#
# task2
#
@collate(prepare_files, regex(r'(.*/).*\.(.*)\.animal'), r'\1\2.results')
@posttask(lambda: do_write(tempdir + "task.done", "Task 2 Done\n"))
def summarise_by_grouping(infiles, outfile):
"""
Summarise by each species group, e.g. mammals, reptiles, fish
"""
with open(tempdir + "jobs.start", "a") as oo:
oo.write('job = %s\n' % json.dumps([infiles, outfile]))
with open(outfile, "w") as oo:
for i in infiles:
with open(i) as ii:
oo.write(ii.read())
with open(tempdir + "jobs.finish", "a") as oo:
oo.write('job = %s\n' % json.dumps([infiles, outfile]))
def check_species_correct():
"""
#cow.mammals.animal
#horse.mammals.animal
#sheep.mammals.animal
# -> mammals.results
#
#snake.reptiles.animal
#lizard.reptiles.animal
#crocodile.reptiles.animal
# -> reptiles.results
#
#pufferfish.fish.animal
# -> fish.results
"""
for grouping in species_list:
with open(tempdir + grouping + ".results") as oo:
assert(oo.read() == "".join(s + "\n" for s in sorted(species_list[grouping])))
class Test_ruffus(unittest.TestCase):
def setUp(self):
try:
shutil.rmtree(tempdir)
except:
pass
def tearDown(self):
try:
shutil.rmtree(tempdir)
except:
pass
def test_ruffus (self):
pipeline_run(multiprocess = 10, verbose = 0, pipeline= "main")
check_species_correct()
def test_newstyle_ruffus (self):
test_pipeline = Pipeline("test")
test_pipeline.split(task_func = prepare_files,
input = None,
output = tempdir + '*.animal')\
.follows(mkdir(tempdir, tempdir + "test"))\
.posttask(lambda: do_write(tempdir + "task.done", "Task 1 Done\n"))
test_pipeline.collate(task_func = summarise_by_grouping,
input = prepare_files,
filter = regex(r'(.*/).*\.(.*)\.animal'),
output = r'\1\2.results')\
.posttask(lambda: do_write(tempdir + "task.done", "Task 2 Done\n"))
test_pipeline.run(multiprocess = 10, verbose = 0)
check_species_correct()
if __name__ == '__main__':
unittest.main()
|
|
import contextlib, itertools, os, time, unittest
from . import matrix_results
from bibliopixel.drivers.driver_base import DriverBase
from bibliopixel.layout.matrix import Matrix
from bibliopixel.project import data_maker
from bibliopixel.util import log
WHITE = (255, 255, 255)
DUMP_FILENAME = os.environ.get('BP_MATRIX_DUMP')
if DUMP_FILENAME:
with open(DUMP_FILENAME, 'w') as fp:
fp.write('# This was file was automatically generated on ')
fp.write(time.strftime('%X %x %Z'))
fp.write('\n')
class BaseMatrixTest(unittest.TestCase):
def text_at(self, x, y):
"""Return text for a given pixel"""
return '*' if any(self.matrix.get(x, y)) else ' '
def line_at(self, y):
return ''.join(self.text_at(x, y) for x in range(self.matrix.width))
def to_strings(self):
return tuple(self.line_at(y) for y in range(self.matrix.height))
def name_of_test(self):
name = self.id().split('.')[-1]
if name.startswith('test_'):
name = name[len('test_'):]
return name.upper()
def dump(self):
# Dump the result to a file, if enabled.
pass
@contextlib.contextmanager
def matrix_test(self, width=16, height=16):
d = DriverBase(num=width * height)
self.matrix = Matrix(d, width=width, height=height, maker=self.maker)
yield # Perform your operation here.
self.dump()
expected = getattr(matrix_results, self.name_of_test())
actual = self.to_strings()
if expected != actual:
log.printer('Expected:', *(repr(s) for s in expected), sep='\n')
log.printer('Actual:', *(repr(s) for s in actual), sep='\n')
self.assertTrue(False)
def test_empty(self):
with self.matrix_test(4, 4):
pass
def test_horizontal_line(self):
with self.matrix_test():
self.matrix.drawLine(0, 0, 15, 0, WHITE)
def test_vertical_line(self):
with self.matrix_test():
self.matrix.drawLine(0, 0, 0, 15, WHITE)
def test_vertical_line2(self):
with self.matrix_test():
self.matrix.drawLine(1, 0, 1, 15, WHITE)
def test_draw_circle1(self):
with self.matrix_test():
self.matrix.drawCircle(8, 8, 6, WHITE)
def test_draw_circle2(self):
with self.matrix_test(8, 8):
self.matrix.drawCircle(4, 4, 15, WHITE)
def test_draw_circle3(self):
with self.matrix_test(4, 12):
self.matrix.drawCircle(4, 6, 20, WHITE)
def test_fill_circle1(self):
with self.matrix_test():
self.matrix.fillCircle(8, 8, 6, WHITE)
def test_fill_circle2(self):
with self.matrix_test(8, 8):
self.matrix.fillCircle(4, 4, 15, WHITE)
def test_fill_circle3(self):
with self.matrix_test(4, 12):
self.matrix.fillCircle(4, 6, 20, WHITE)
def test_bresenham0(self):
with self.matrix_test(8, 8):
self.matrix.bresenham_line(0, 0, 8, 8, WHITE)
def test_bresenham1(self):
with self.matrix_test(8, 8):
self.matrix.bresenham_line(8, 8, 0, 0, WHITE)
def test_bresenham2(self):
with self.matrix_test():
self.matrix.bresenham_line(3, 5, 15, 18, WHITE)
def test_bresenham3(self):
with self.matrix_test():
self.matrix.bresenham_line(15, 18, 3, 5, WHITE)
def test_wu0(self):
with self.matrix_test(8, 8):
self.matrix.wu_line(0, 0, 8, 8, WHITE)
def test_wu1(self):
with self.matrix_test(8, 8):
self.matrix.wu_line(8, 8, 0, 0, WHITE)
def test_wu2(self):
with self.matrix_test():
self.matrix.wu_line(3, 5, 15, 18, WHITE)
def test_wu3(self):
with self.matrix_test():
self.matrix.wu_line(15, 18, 3, 5, WHITE)
def test_draw_rect(self):
with self.matrix_test():
self.matrix.drawRect(3, 5, 3, 2, WHITE)
def test_fill_rect(self):
with self.matrix_test():
self.matrix.fillRect(3, 5, 6, 4, WHITE)
def test_fill_screen(self):
with self.matrix_test():
self.matrix.fillScreen(WHITE)
def DISABLED_test_draw_round_rect(self):
with self.matrix_test():
self.matrix.drawRoundRect(3, 5, 6, 7, 7, WHITE)
def DISABLED_test_fill_round_rect(self):
with self.matrix_test():
self.matrix.fillRoundRect(3, 5, 6, 7, 7, WHITE)
def test_draw_triangle(self):
with self.matrix_test():
self.matrix.drawTriangle(0, 0, 11, 4, 5, 12, WHITE)
def DISABLED_test_fill_triangle(self):
with self.matrix_test():
self.matrix.fillTriangle(0, 0, 11, 4, 5, 12, WHITE)
def test_draw_text(self):
with self.matrix_test(32, 10):
self.matrix.drawText('abc', color=WHITE)
class MatrixTest(BaseMatrixTest):
maker = data_maker.Maker()
class SharedMatrixTest(BaseMatrixTest):
maker = data_maker.Maker(shared_memory=True, floating=True)
class SharedMatrixIntegerTest(BaseMatrixTest):
maker = data_maker.Maker(shared_memory=True, floating=False)
class FloatNumpyMatrixTest(BaseMatrixTest):
maker = data_maker.Maker(numpy_dtype='float')
class Uint8NumpyMatrixTest(BaseMatrixTest):
maker = data_maker.Maker(numpy_dtype='uint8')
class Int8NumpyMatrixTest(BaseMatrixTest):
maker = data_maker.Maker(numpy_dtype='int8')
class DumpTest(BaseMatrixTest):
maker = data_maker.Maker()
indent = ''
def dump(self):
if not DUMP_FILENAME:
return
with open(DUMP_FILENAME, 'a') as fp:
def writeln(*parts):
if parts:
fp.write(self.indent)
fp.writelines(itertools.chain(*parts))
fp.write('\n')
writeln()
writeln(self.name_of_test(), ' = (')
for row in self.to_strings():
writeln(" '", row, "',")
writeln(')')
del BaseMatrixTest # http://stackoverflow.com/a/22836015/43839
|
|
# coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from wavefront_api_client.api_client import ApiClient
class CloudIntegrationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_aws_external_id(self, **kwargs): # noqa: E501
"""Create an external id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_aws_external_id(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: ResponseContainerString
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_aws_external_id_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_aws_external_id_with_http_info(**kwargs) # noqa: E501
return data
def create_aws_external_id_with_http_info(self, **kwargs): # noqa: E501
"""Create an external id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_aws_external_id_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: ResponseContainerString
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_aws_external_id" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/cloudintegration/awsExternalId', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerString', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_cloud_integration(self, **kwargs): # noqa: E501
"""Create a cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_cloud_integration(async_req=True)
>>> result = thread.get()
:param async_req bool
:param CloudIntegration body: Example Body: <pre>{ \"name\":\"CloudWatch integration\", \"service\":\"CLOUDWATCH\", \"cloudWatch\":{ \"baseCredentials\":{ \"roleArn\":\"arn:aws:iam::<accountid>:role/<rolename>\" }, \"metricFilterRegex\":\"^aws.(sqs|ec2|ebs|elb).*$\", \"pointTagFilterRegex\":\"(region|name)\" }, \"serviceRefreshRateInMins\":5 }</pre>
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_cloud_integration_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_cloud_integration_with_http_info(**kwargs) # noqa: E501
return data
def create_cloud_integration_with_http_info(self, **kwargs): # noqa: E501
"""Create a cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_cloud_integration_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param CloudIntegration body: Example Body: <pre>{ \"name\":\"CloudWatch integration\", \"service\":\"CLOUDWATCH\", \"cloudWatch\":{ \"baseCredentials\":{ \"roleArn\":\"arn:aws:iam::<accountid>:role/<rolename>\" }, \"metricFilterRegex\":\"^aws.(sqs|ec2|ebs|elb).*$\", \"pointTagFilterRegex\":\"(region|name)\" }, \"serviceRefreshRateInMins\":5 }</pre>
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_cloud_integration" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/cloudintegration', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerCloudIntegration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_aws_external_id(self, id, **kwargs): # noqa: E501
"""DELETEs an external id that was created by Wavefront # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_aws_external_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerString
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_aws_external_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_aws_external_id_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_aws_external_id_with_http_info(self, id, **kwargs): # noqa: E501
"""DELETEs an external id that was created by Wavefront # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_aws_external_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerString
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_aws_external_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `delete_aws_external_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/cloudintegration/awsExternalId/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerString', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_cloud_integration(self, id, **kwargs): # noqa: E501
"""Delete a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_cloud_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param bool skip_trash:
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_cloud_integration_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_cloud_integration_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param bool skip_trash:
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'skip_trash'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_cloud_integration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `delete_cloud_integration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'skip_trash' in params:
query_params.append(('skipTrash', params['skip_trash'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/cloudintegration/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerCloudIntegration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def disable_cloud_integration(self, id, **kwargs): # noqa: E501
"""Disable a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.disable_cloud_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.disable_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.disable_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
return data
def disable_cloud_integration_with_http_info(self, id, **kwargs): # noqa: E501
"""Disable a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.disable_cloud_integration_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method disable_cloud_integration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `disable_cloud_integration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/cloudintegration/{id}/disable', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerCloudIntegration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def enable_cloud_integration(self, id, **kwargs): # noqa: E501
"""Enable a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.enable_cloud_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.enable_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.enable_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
return data
def enable_cloud_integration_with_http_info(self, id, **kwargs): # noqa: E501
"""Enable a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.enable_cloud_integration_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method enable_cloud_integration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `enable_cloud_integration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/cloudintegration/{id}/enable', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerCloudIntegration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_cloud_integration(self, **kwargs): # noqa: E501
"""Get all cloud integrations for a customer # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_cloud_integration(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_cloud_integration_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_cloud_integration_with_http_info(**kwargs) # noqa: E501
return data
def get_all_cloud_integration_with_http_info(self, **kwargs): # noqa: E501
"""Get all cloud integrations for a customer # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_cloud_integration_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_cloud_integration" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/cloudintegration', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedCloudIntegration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_aws_external_id(self, id, **kwargs): # noqa: E501
"""GETs (confirms) a valid external id that was created by Wavefront # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_aws_external_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerString
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_aws_external_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_aws_external_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_aws_external_id_with_http_info(self, id, **kwargs): # noqa: E501
"""GETs (confirms) a valid external id that was created by Wavefront # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_aws_external_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerString
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_aws_external_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `get_aws_external_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/cloudintegration/awsExternalId/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerString', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_cloud_integration(self, id, **kwargs): # noqa: E501
"""Get a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_cloud_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
return data
def get_cloud_integration_with_http_info(self, id, **kwargs): # noqa: E501
"""Get a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_cloud_integration_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_cloud_integration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `get_cloud_integration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/cloudintegration/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerCloudIntegration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def undelete_cloud_integration(self, id, **kwargs): # noqa: E501
"""Undelete a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.undelete_cloud_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.undelete_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.undelete_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
return data
def undelete_cloud_integration_with_http_info(self, id, **kwargs): # noqa: E501
"""Undelete a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.undelete_cloud_integration_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method undelete_cloud_integration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `undelete_cloud_integration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/cloudintegration/{id}/undelete', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerCloudIntegration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_cloud_integration(self, id, **kwargs): # noqa: E501
"""Update a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_cloud_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param CloudIntegration body: Example Body: <pre>{ \"name\":\"CloudWatch integration\", \"service\":\"CLOUDWATCH\", \"cloudWatch\":{ \"baseCredentials\":{ \"roleArn\":\"arn:aws:iam::<accountid>:role/<rolename>\" }, \"metricFilterRegex\":\"^aws.(sqs|ec2|ebs|elb).*$\", \"pointTagFilterRegex\":\"(region|name)\" }, \"serviceRefreshRateInMins\":5 }</pre>
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
return data
def update_cloud_integration_with_http_info(self, id, **kwargs): # noqa: E501
"""Update a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_cloud_integration_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param CloudIntegration body: Example Body: <pre>{ \"name\":\"CloudWatch integration\", \"service\":\"CLOUDWATCH\", \"cloudWatch\":{ \"baseCredentials\":{ \"roleArn\":\"arn:aws:iam::<accountid>:role/<rolename>\" }, \"metricFilterRegex\":\"^aws.(sqs|ec2|ebs|elb).*$\", \"pointTagFilterRegex\":\"(region|name)\" }, \"serviceRefreshRateInMins\":5 }</pre>
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_cloud_integration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `update_cloud_integration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/cloudintegration/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerCloudIntegration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
|
from decimal import *
import datetime
from operator import attrgetter
from django.forms.formsets import formset_factory
from django.contrib.sites.models import Site
from models import *
from forms import *
try:
from notification import models as notification
except ImportError:
notification = None
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def unpaid_orders():
orders = Order.objects.exclude(state__contains="ubmitted")
unpaid = []
for order in orders:
if not order.is_fully_paid():
unpaid.append(order)
return unpaid
class SalesRow(object):
def __init__(self, product, customer, quantity, extended_price):
self.product = product
self.customer = customer
self.quantity = quantity
self.extended_price = extended_price
def sales_table(from_date, to_date):
items = OrderItem.objects.filter(
order__delivery_date__range=(from_date, to_date))
rows = {}
for item in items:
key = "-".join([str(item.product.id), str(item.order.customer.id)])
if not key in rows:
rows[key] = SalesRow(item.product.long_name,
item.order.customer.long_name,
Decimal("0"), Decimal("0"))
rows[key].quantity += item.quantity
rows[key].extended_price += item.extended_price()
return sorted(rows.values(), key=attrgetter('product', 'customer'))
def weekly_production_plans(week_date):
monday = week_date - datetime.timedelta(days=datetime.date.weekday(week_date))
saturday = monday + datetime.timedelta(days=5)
plans = ProductPlan.objects.select_related(depth=1).filter(
role="producer",
from_date__lte=week_date,
to_date__gte=saturday)
for plan in plans:
plan.category = plan.product.parent_string()
plan.product_name = plan.product.short_name
plans = sorted(plans, key=attrgetter('category',
'product_name'))
return plans
def plan_columns(from_date, to_date):
columns = []
wkdate = from_date
while wkdate <= to_date:
columns.append(wkdate.strftime('%Y-%m-%d'))
wkdate = wkdate + datetime.timedelta(days=7)
return columns
def sd_columns(from_date, to_date):
columns = []
wkdate = from_date
while wkdate <= to_date:
columns.append(wkdate.strftime('%Y_%m_%d'))
wkdate = wkdate + datetime.timedelta(days=7)
return columns
# shd plan_weeks go to the view and include headings?
# somebody needs headings!
def create_weekly_plan_forms(rows, data=None):
form_list = []
PlanCellFormSet = formset_factory(PlanCellForm, extra=0)
for row in rows:
product = row[0]
row_form = PlanRowForm(data, prefix=product.id, initial={'product_id': product.id})
row_form.product = product.long_name
cells = row[1:len(row)]
initial_data = []
for cell in cells:
plan_id = ""
if cell.plan:
plan_id = cell.plan.id
dict = {
'plan_id': plan_id,
'product_id': cell.product.id,
'from_date': cell.from_date,
'to_date': cell.to_date,
'quantity': cell.quantity,
}
initial_data.append(dict)
row_form.formset = PlanCellFormSet(data, prefix=product.id, initial=initial_data)
form_list.append(row_form)
return form_list
class SupplyDemandTable(object):
def __init__(self, columns, rows):
self.columns = columns
self.rows = rows
def supply_demand_table(from_date, to_date, member=None):
plans = ProductPlan.objects.all()
cps = ProducerProduct.objects.filter(
inventoried=False,
default_avail_qty__gt=0,
)
constants = {}
for cp in cps:
constants.setdefault(cp.product, Decimal("0"))
constants[cp.product] += cp.default_avail_qty
if member:
plans = plans.filter(member=member)
rows = {}
for plan in plans:
wkdate = from_date
product = plan.product.supply_demand_product()
constant = Decimal('0')
cp = constants.get(product)
if cp:
constant = cp
row = []
while wkdate <= to_date:
row.append(constant)
wkdate = wkdate + datetime.timedelta(days=7)
row.insert(0, product)
rows.setdefault(product, row)
wkdate = from_date
week = 0
while wkdate <= to_date:
if plan.from_date <= wkdate and plan.to_date >= wkdate:
if plan.role == "producer":
rows[product][week + 1] += plan.quantity
else:
rows[product][week + 1] -= plan.quantity
wkdate = wkdate + datetime.timedelta(days=7)
week += 1
label = "Product/Weeks"
columns = [label]
wkdate = from_date
while wkdate <= to_date:
columns.append(wkdate)
wkdate = wkdate + datetime.timedelta(days=7)
rows = rows.values()
rows.sort(lambda x, y: cmp(x[0].short_name, y[0].short_name))
sdtable = SupplyDemandTable(columns, rows)
return sdtable
def supply_demand_rows(from_date, to_date, member=None):
plans = ProductPlan.objects.select_related(depth=1).all()
cps = ProducerProduct.objects.filter(
inventoried=False,
default_avail_qty__gt=0,
)
constants = {}
rows = {}
#import pdb; pdb.set_trace()
#todo: what if some NIPs and some inventoried for same product?
#does code does allow for that?
for cp in cps:
constants.setdefault(cp.product, Decimal("0"))
constant = cp.default_avail_qty
product = cp.product
constants[product] += constant
row = {}
row["product"] = product.long_name
row["id"] = product.id
rows.setdefault(product, row)
wkdate = from_date
while wkdate <= to_date:
row[wkdate.strftime('%Y_%m_%d')] = str(constant)
wkdate = wkdate + datetime.timedelta(days=7)
if member:
plans = plans.filter(member=member)
#todo:
# spread storage items over many weeks
# if plan.product expiration_days > 1 week:
# spread remainder over weeks until consumed or expired.
# means plannable parents cd determine expiration.
# may require another pass thru storage plans...
for plan in plans:
wkdate = from_date
#this is too slow:
#product = plan.product.supply_demand_product()
product = plan.product
#constant = Decimal('0')
#constant = ""
#cp = constants.get(product)
#if cp:
# constant = str(cp)
row = {}
#while wkdate <= to_date:
# row[wkdate.strftime('%Y_%m_%d')] = str(constant)
# wkdate = wkdate + datetime.timedelta(days=7)
row["product"] = product.long_name
row["id"] = product.id
rows.setdefault(product, row)
#import pdb; pdb.set_trace()
wkdate = from_date
while wkdate <= to_date:
if plan.from_date <= wkdate and plan.to_date >= wkdate:
key = wkdate.strftime('%Y_%m_%d')
try:
value = rows[product][key]
except KeyError:
value = Decimal("0")
if value == "":
value = Decimal("0")
else:
value = Decimal(value)
if plan.role == "producer":
value += plan.quantity
else:
value -= plan.quantity
rows[product][key] = str(value)
wkdate = wkdate + datetime.timedelta(days=7)
rows = rows.values()
rows.sort(lambda x, y: cmp(x["product"], y["product"]))
return rows
def supply_demand_weekly_table(week_date):
plans = ProductPlan.objects.filter(
from_date__lte=week_date,
to_date__gte=week_date,
).order_by("-role", "member__short_name")
columns = []
rows = {}
cps = ProducerProduct.objects.filter(
inventoried=False,
default_avail_qty__gt=0,
)
for cp in cps:
if not cp.producer in columns:
columns.append(cp.producer)
for plan in plans:
if not plan.member in columns:
columns.append(plan.member)
columns.insert(0, "Product\Member")
columns.append("Balance")
for cp in cps:
if not rows.get(cp.product):
row = []
for i in range(0, len(columns)-1):
row.append(Decimal("0"))
row.insert(0, cp.product)
rows[cp.product] = row
rows[cp.product][columns.index(cp.producer)] += cp.default_avail_qty
rows[cp.product][len(columns)-1] += cp.default_avail_qty
for plan in plans:
if not rows.get(plan.product):
row = []
for i in range(0, len(columns)-1):
row.append(Decimal("0"))
row.insert(0, plan.product)
rows[plan.product] = row
if plan.role == "producer":
rows[plan.product][columns.index(plan.member)] += plan.quantity
rows[plan.product][len(columns)-1] += plan.quantity
else:
rows[plan.product][columns.index(plan.member)] -= plan.quantity
rows[plan.product][len(columns)-1] -= plan.quantity
rows = rows.values()
rows.sort(lambda x, y: cmp(x[0].short_name, y[0].short_name))
sdtable = SupplyDemandTable(columns, rows)
return sdtable
def dojo_supply_demand_weekly_table(week_date):
plans = ProductPlan.objects.filter(
from_date__lte=week_date,
to_date__gte=week_date,
).order_by("-role", "member__short_name")
# for columns: product, member.short_name(s), balance
# but only members are needed here...product and balance can be added in
# template
# for rows: dictionaries with the above keys
columns = []
rows = {}
cps = ProducerProduct.objects.filter(
inventoried=False,
default_avail_qty__gt=0,
)
for cp in cps:
if not cp.producer in columns:
columns.append(cp.producer.short_name)
for plan in plans:
if not plan.member.short_name in columns:
columns.append(plan.member.short_name)
columns.append("Balance")
for cp in cps:
if not rows.get(cp.product):
row = {}
for column in columns:
row[column] = 0
row["product"] = cp.product.long_name
row["id"] = cp.product.id
row["Balance"] = 0
rows[cp.product] = row
rows[cp.product][cp.producer.short_name] += int(cp.default_avail_qty)
rows[cp.product]["Balance"] += int(cp.default_avail_qty)
for plan in plans:
if not rows.get(plan.product):
row = {}
for column in columns:
row[column] = 0
row["product"] = plan.product.long_name
row["id"] = plan.product.id
row["Balance"] = 0
rows[plan.product] = row
if plan.role == "producer":
rows[plan.product][plan.member.short_name] += int(plan.quantity)
rows[plan.product]["Balance"] += int(plan.quantity)
else:
rows[plan.product][plan.member.short_name] -= int(plan.quantity)
rows[plan.product]["Balance"] -= int(plan.quantity)
rows = rows.values()
rows.sort(lambda x, y: cmp(x["product"], y["product"]))
sdtable = SupplyDemandTable(columns, rows)
return sdtable
class SuppliableDemandCell(object):
def __init__(self, supply, demand):
self.supply = supply
self.demand = demand
def suppliable(self):
answer = Decimal("0")
if self.supply and self.demand:
if self.supply > self.demand:
answer = self.demand
else:
answer = self.supply
return answer
def suppliable_demand(from_date, to_date, member=None):
#import pdb; pdb.set_trace()
plans = ProductPlan.objects.all()
if member:
plans = plans.filter(member=member)
rows = {}
for plan in plans:
wkdate = from_date
row = []
while wkdate <= to_date:
row.append(SuppliableDemandCell(Decimal("0"), Decimal("0")))
wkdate = wkdate + datetime.timedelta(days=7)
product = plan.product.supply_demand_product()
row.insert(0, product)
rows.setdefault(product, row)
wkdate = from_date
week = 0
while wkdate <= to_date:
if plan.from_date <= wkdate and plan.to_date >= wkdate:
if plan.role == "producer":
rows[product][week + 1].supply += plan.quantity
else:
rows[product][week + 1].demand += plan.quantity
wkdate = wkdate + datetime.timedelta(days=7)
week += 1
rows = rows.values()
cust_fee = customer_fee()
for row in rows:
for x in range(1, len(row)):
sd = row[x].suppliable()
if sd >= 0:
income = sd * row[0].price
row[x] = income
else:
row[x] = Decimal("0")
income_rows = []
for row in rows:
base = Decimal("0")
total = Decimal("0")
for x in range(1, len(row)):
cell = row[x]
base += cell
cell += cell * cust_fee
total += cell
row[x] = cell.quantize(Decimal('.1'), rounding=ROUND_UP)
if total:
net = base * cust_fee + (base * producer_fee())
net = net.quantize(Decimal('1.'), rounding=ROUND_UP)
total = total.quantize(Decimal('1.'), rounding=ROUND_UP)
row.append(total)
row.append(net)
income_rows.append(row)
label = "Item\Weeks"
columns = [label]
wkdate = from_date
while wkdate <= to_date:
columns.append(wkdate)
wkdate = wkdate + datetime.timedelta(days=7)
columns.append("Total")
columns.append("Net")
income_rows.sort(lambda x, y: cmp(x[0].long_name, y[0].short_name))
sdtable = SupplyDemandTable(columns, income_rows)
return sdtable
#todo: does not use contants (NIPs)
#or correct logic for storage items
def json_income_rows(from_date, to_date, member=None):
#import pdb; pdb.set_trace()
plans = ProductPlan.objects.all()
if member:
plans = plans.filter(member=member)
rows = {}
for plan in plans:
wkdate = from_date
row = {}
while wkdate <= to_date:
row[wkdate.strftime('%Y_%m_%d')] = SuppliableDemandCell(Decimal("0"), Decimal("0"))
wkdate = wkdate + datetime.timedelta(days=7)
product = plan.product.supply_demand_product()
row["product"] = product.long_name
row["id"] = product.id
row["price"] = product.price
rows.setdefault(product, row)
wkdate = from_date
while wkdate <= to_date:
key = wkdate.strftime('%Y_%m_%d')
if plan.from_date <= wkdate and plan.to_date >= wkdate:
if plan.role == "producer":
rows[product][key].supply += plan.quantity
else:
rows[product][key].demand += plan.quantity
wkdate = wkdate + datetime.timedelta(days=7)
rows = rows.values()
cust_fee = customer_fee()
#import pdb; pdb.set_trace()
for row in rows:
wkdate = from_date
while wkdate <= to_date:
key = wkdate.strftime('%Y_%m_%d')
sd = row[key].suppliable()
if sd > 0:
income = sd * row["price"]
row[key] = income
else:
row[key] = Decimal("0")
wkdate = wkdate + datetime.timedelta(days=7)
income_rows = []
for row in rows:
base = Decimal("0")
total = Decimal("0")
wkdate = from_date
while wkdate <= to_date:
key = wkdate.strftime('%Y_%m_%d')
cell = row[key]
base += cell
cell += cell * cust_fee
total += cell
row[key] = str(cell.quantize(Decimal('.1'), rounding=ROUND_UP))
wkdate = wkdate + datetime.timedelta(days=7)
if total:
net = base * cust_fee + (base * producer_fee())
net = net.quantize(Decimal('1.'), rounding=ROUND_UP)
total = total.quantize(Decimal('1.'), rounding=ROUND_UP)
row["total"] = str(total)
row["net"] = str(net)
row["price"] = str(row["price"])
income_rows.append(row)
income_rows.sort(lambda x, y: cmp(x["product"], y["product"]))
return income_rows
class PlannedWeek(object):
def __init__(self, product, from_date, to_date, quantity):
self.product = product
self.from_date = from_date
self.to_date = to_date
self.quantity = quantity
self.plan = None
def plan_weeks(member, products, from_date, to_date):
plans = ProductPlan.objects.filter(member=member)
#if member.is_customer():
# products = CustomerProduct.objects.filter(customer=member, planned=True)
#else:
# products = ProducerProduct.objects.filter(producer=member, planned=True)
#if not products:
# products = Product.objects.filter(plannable=True)
rows = {}
for pp in products:
try:
product = pp.product
except:
product = pp
wkdate = from_date
row = [product]
while wkdate <= to_date:
enddate = wkdate + datetime.timedelta(days=6)
row.append(PlannedWeek(product, wkdate, enddate, Decimal("0")))
wkdate = enddate + datetime.timedelta(days=1)
#row.insert(0, product)
rows.setdefault(product, row)
for plan in plans:
product = plan.product
wkdate = from_date
week = 0
while wkdate <= to_date:
enddate = wkdate + datetime.timedelta(days=6)
if plan.from_date <= wkdate and plan.to_date >= wkdate:
rows[product][week + 1].quantity = plan.quantity
rows[product][week + 1].plan = plan
wkdate = wkdate + datetime.timedelta(days=7)
week += 1
label = "Product/Weeks"
columns = [label]
wkdate = from_date
while wkdate <= to_date:
columns.append(wkdate)
wkdate = wkdate + datetime.timedelta(days=7)
rows = rows.values()
rows.sort(lambda x, y: cmp(x[0].short_name, y[0].short_name))
sdtable = SupplyDemandTable(columns, rows)
return sdtable
def plans_for_dojo(member, products, from_date, to_date):
#import pdb; pdb.set_trace()
plans = ProductPlan.objects.filter(member=member)
rows = {}
for pp in products:
yearly = 0
try:
product = pp.product
yearly = pp.default_quantity
except:
product = pp
if not yearly:
try:
pp = ProducerProduct.objects.get(producer=member, product=product)
yearly = pp.default_quantity
except:
pass
wkdate = from_date
row = {}
row["product"] = product.long_name
row["yearly"] = int(yearly)
row["id"] = product.id
row["member_id"] = member.id
row["from_date"] = from_date.strftime('%Y-%m-%d')
row["to_date"] = to_date.strftime('%Y-%m-%d')
while wkdate <= to_date:
enddate = wkdate + datetime.timedelta(days=6)
row[wkdate.strftime('%Y-%m-%d')] = "0"
wkdate = enddate + datetime.timedelta(days=1)
rows.setdefault(product, row)
#import pdb; pdb.set_trace()
for plan in plans:
product = plan.product
wkdate = from_date
week = 0
while wkdate <= to_date:
enddate = wkdate + datetime.timedelta(days=6)
if plan.from_date <= wkdate and plan.to_date >= wkdate:
rows[product][wkdate.strftime('%Y-%m-%d')] = str(plan.quantity)
rows[product][":".join([wkdate.strftime('%Y-%m-%d'), "plan_id"])] = plan.id
wkdate = wkdate + datetime.timedelta(days=7)
week += 1
rows = rows.values()
rows.sort(lambda x, y: cmp(x["product"], y["product"]))
return rows
def create_all_inventory_item_forms(avail_date, plans, items, data=None):
item_dict = {}
for item in items:
# This means one lot per producer per product per week
item_dict["-".join([str(item.product.id), str(item.producer.id)])] = item
form_list = []
for plan in plans:
#import pdb; pdb.set_trace()
custodian_id = ""
try:
member = plan.member
except:
member = plan.producer
try:
item = item_dict["-".join([str(plan.product.id),
str(member.id)])]
if item.custodian:
custodian_id = item.custodian.id
except KeyError:
item = False
try:
plan_qty = plan.quantity
except:
plan_qty = 0
#import pdb; pdb.set_trace()
if item:
pref = "-".join(["item", str(item.id)])
the_form = AllInventoryItemForm(data, prefix=pref, initial={
'item_id': item.id,
'product_id': item.product.id,
'producer_id': item.producer.id,
'freeform_lot_id': item.freeform_lot_id,
'field_id': item.field_id,
'custodian': custodian_id,
'inventory_date': item.inventory_date,
'expiration_date': item.expiration_date,
'planned': item.planned,
'received': item.received,
'notes': item.notes})
else:
pref = "-".join(["plan", str(plan.id)])
expiration_date = avail_date + datetime.timedelta(days=plan.product.expiration_days)
the_form = AllInventoryItemForm(data, prefix=pref, initial={
'item_id': 0,
'product_id': plan.product.id,
'producer_id': member.id,
'inventory_date': avail_date,
'expiration_date': expiration_date,
'planned': 0,
'received': 0,
'notes': ''})
the_form.description = plan.product.long_name
the_form.producer = member.short_name
the_form.plan_qty = plan_qty
form_list.append(the_form)
#import pdb; pdb.set_trace()
#form_list.sort(lambda x, y: cmp(x.producer, y.producer))
form_list = sorted(form_list, key=attrgetter('producer', 'description'))
return form_list
def create_delivery_cycle_selection_forms(data=None):
dcs = DeliveryCycle.objects.all()
form_list = []
for dc in dcs:
form = DeliveryCycleSelectionForm(data, prefix=dc.id)
form.cycle = dc
form.delivery_date = dc.next_delivery_date_using_closing()
form_list.append(form)
return form_list
def create_avail_item_forms(avail_date, data=None):
fn = food_network()
items = fn.avail_items_for_customer(avail_date)
form_list = []
for item in items:
pref = "-".join(["item", str(item.id)])
the_form = AvailableItemForm(data, prefix=pref, initial={
'item_id': item.id,
'inventory_date': item.inventory_date,
'expiration_date': item.expiration_date,
'quantity': item.avail_qty(),
})
the_form.description = item.product.long_name
the_form.producer = item.producer.short_name
the_form.ordered = item.product.total_ordered_for_timespan(
item.inventory_date, item.expiration_date)
form_list.append(the_form)
form_list = sorted(form_list, key=attrgetter('description', 'producer'))
return form_list
def send_avail_emails(cycle):
fn = food_network()
food_network_name = fn.long_name
delivery_date = cycle.next_delivery_date_using_closing()
fresh_list = fn.email_availability(delivery_date)
users = []
for customer in cycle.customers.all():
users.append(customer)
for contact in customer.contacts.all():
if contact.email != customer.email:
users.append(contact)
users.append(fn)
users = list(set(users))
intro = avail_email_intro()
domain = Site.objects.get_current().domain
notification.send(users, "distribution_fresh_list", {
"intro": intro.message,
"domain": domain,
"fresh_list": fresh_list,
"delivery_date": delivery_date,
"food_network_name": food_network_name,
"cycle": cycle,
})
|
|
import io
import os
import shutil
import tempfile
from docker import errors
import six
from .base import BaseAPIIntegrationTest
from ..helpers import requires_api_version
class BuildTest(BaseAPIIntegrationTest):
def test_build_streaming(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
stream = self.client.build(fileobj=script, stream=True, decode=True)
logs = []
for chunk in stream:
logs.append(chunk)
assert len(logs) > 0
def test_build_from_stringio(self):
if six.PY3:
return
script = io.StringIO(six.text_type('\n').join([
'FROM busybox',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]))
stream = self.client.build(fileobj=script, stream=True)
logs = ''
for chunk in stream:
if six.PY3:
chunk = chunk.decode('utf-8')
logs += chunk
self.assertNotEqual(logs, '')
@requires_api_version('1.8')
def test_build_with_dockerignore(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("\n".join([
'FROM busybox',
'ADD . /test',
]))
with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
f.write("\n".join([
'ignored',
'Dockerfile',
'.dockerignore',
'!ignored/subdir/excepted-file',
'', # empty line
]))
with open(os.path.join(base_dir, 'not-ignored'), 'w') as f:
f.write("this file should not be ignored")
subdir = os.path.join(base_dir, 'ignored', 'subdir')
os.makedirs(subdir)
with open(os.path.join(subdir, 'file'), 'w') as f:
f.write("this file should be ignored")
with open(os.path.join(subdir, 'excepted-file'), 'w') as f:
f.write("this file should not be ignored")
tag = 'docker-py-test-build-with-dockerignore'
stream = self.client.build(
path=base_dir,
tag=tag,
)
for chunk in stream:
pass
c = self.client.create_container(tag, ['find', '/test', '-type', 'f'])
self.client.start(c)
self.client.wait(c)
logs = self.client.logs(c)
if six.PY3:
logs = logs.decode('utf-8')
self.assertEqual(
sorted(list(filter(None, logs.split('\n')))),
sorted(['/test/ignored/subdir/excepted-file',
'/test/not-ignored']),
)
@requires_api_version('1.21')
def test_build_with_buildargs(self):
script = io.BytesIO('\n'.join([
'FROM scratch',
'ARG test',
'USER $test'
]).encode('ascii'))
stream = self.client.build(
fileobj=script, tag='buildargs', buildargs={'test': 'OK'}
)
self.tmp_imgs.append('buildargs')
for chunk in stream:
pass
info = self.client.inspect_image('buildargs')
self.assertEqual(info['Config']['User'], 'OK')
@requires_api_version('1.22')
def test_build_shmsize(self):
script = io.BytesIO('\n'.join([
'FROM scratch',
'CMD sh -c "echo \'Hello, World!\'"',
]).encode('ascii'))
tag = 'shmsize'
shmsize = 134217728
stream = self.client.build(
fileobj=script, tag=tag, shmsize=shmsize
)
self.tmp_imgs.append(tag)
for chunk in stream:
pass
# There is currently no way to get the shmsize
# that was used to build the image
@requires_api_version('1.23')
def test_build_labels(self):
script = io.BytesIO('\n'.join([
'FROM scratch',
]).encode('ascii'))
labels = {'test': 'OK'}
stream = self.client.build(
fileobj=script, tag='labels', labels=labels
)
self.tmp_imgs.append('labels')
for chunk in stream:
pass
info = self.client.inspect_image('labels')
self.assertEqual(info['Config']['Labels'], labels)
@requires_api_version('1.25')
def test_build_with_cache_from(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'ENV FOO=bar',
'RUN touch baz',
'RUN touch bax',
]).encode('ascii'))
stream = self.client.build(fileobj=script, tag='build1')
self.tmp_imgs.append('build1')
for chunk in stream:
pass
stream = self.client.build(
fileobj=script, tag='build2', cache_from=['build1'],
decode=True
)
self.tmp_imgs.append('build2')
counter = 0
for chunk in stream:
if 'Using cache' in chunk.get('stream', ''):
counter += 1
assert counter == 3
self.client.remove_image('build2')
counter = 0
stream = self.client.build(
fileobj=script, tag='build2', cache_from=['nosuchtag'],
decode=True
)
for chunk in stream:
if 'Using cache' in chunk.get('stream', ''):
counter += 1
assert counter == 0
@requires_api_version('1.29')
def test_build_container_with_target(self):
script = io.BytesIO('\n'.join([
'FROM busybox as first',
'RUN mkdir -p /tmp/test',
'RUN touch /tmp/silence.tar.gz',
'FROM alpine:latest',
'WORKDIR /root/'
'COPY --from=first /tmp/silence.tar.gz .',
'ONBUILD RUN echo "This should not be in the final image"'
]).encode('ascii'))
stream = self.client.build(
fileobj=script, target='first', tag='build1'
)
self.tmp_imgs.append('build1')
for chunk in stream:
pass
info = self.client.inspect_image('build1')
self.assertEqual(info['Config']['OnBuild'], [])
def test_build_stderr_data(self):
control_chars = ['\x1b[91m', '\x1b[0m']
snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)'
script = io.BytesIO(b'\n'.join([
b'FROM busybox',
'RUN sh -c ">&2 echo \'{0}\'"'.format(snippet).encode('utf-8')
]))
stream = self.client.build(
fileobj=script, stream=True, decode=True, nocache=True
)
lines = []
for chunk in stream:
lines.append(chunk.get('stream'))
expected = '{0}{2}\n{1}'.format(
control_chars[0], control_chars[1], snippet
)
self.assertTrue(any([line == expected for line in lines]))
def test_build_gzip_encoding(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("\n".join([
'FROM busybox',
'ADD . /test',
]))
stream = self.client.build(
path=base_dir, stream=True, decode=True, nocache=True,
gzip=True
)
lines = []
for chunk in stream:
lines.append(chunk)
assert 'Successfully built' in lines[-1]['stream']
def test_build_gzip_custom_encoding(self):
with self.assertRaises(errors.DockerException):
self.client.build(path='.', gzip=True, encoding='text/html')
|
|
#! /usr/bin/python
# FileSR: local-file storage repository
import FileSR
import SR
import SRCommand
import cleanup
import errno
import nfs
import os
import stat
import sys
import util
import vhdutil
import xs_errors
from lock import Lock
CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_CACHING", \
"VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", \
"VDI_UPDATE", "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", \
"VDI_RESIZE_ONLINE", "VDI_RESET_ON_BOOT", "ATOMIC_PAUSE"]
CONFIGURATION = [['server', 'hostname or IP address of NFS server (required)'], \
['serverpath', 'path on remote server (required)']]
DRIVER_INFO = {
'name': 'NFS VHD',
'description': 'SR plugin which stores disks as VHD files on a remote NFS filesystem',
'vendor': 'The Apache Software Foundation',
'copyright': 'Copyright (c) 2012 The Apache Software Foundation',
'driver_version': '1.0',
'required_api_version': '1.0',
'capabilities': CAPABILITIES,
'configuration': CONFIGURATION
}
# The mountpoint for the directory when performing an sr_probe. All probes
PROBE_MOUNTPOINT = "probe"
NFSPORT = 2049
DEFAULT_TRANSPORT = "tcp"
class NFSSR(FileSR.FileSR):
"""NFS file-based storage repository"""
def handles(type):
return type == 'nfs'
handles = staticmethod(handles)
def load(self, sr_uuid):
self.ops_exclusive = FileSR.OPS_EXCLUSIVE
self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid)
self.sr_vditype = SR.DEFAULT_TAP
if not self.dconf.has_key('server'):
raise xs_errors.XenError('ConfigServerMissing')
self.remoteserver = self.dconf['server']
self.path = os.path.join(SR.MOUNT_BASE, sr_uuid)
# Test for the optional 'nfsoptions' dconf attribute
self.transport = DEFAULT_TRANSPORT
if self.dconf.has_key('useUDP') and self.dconf['useUDP'] == 'true':
self.transport = "udp"
def validate_remotepath(self, scan):
if not self.dconf.has_key('serverpath'):
if scan:
try:
self.scan_exports(self.dconf['server'])
except:
pass
raise xs_errors.XenError('ConfigServerPathMissing')
if not self._isvalidpathstring(self.dconf['serverpath']):
raise xs_errors.XenError('ConfigServerPathBad', \
opterr='serverpath is %s' % self.dconf['serverpath'])
def check_server(self):
try:
nfs.check_server_tcp(self.remoteserver)
except nfs.NfsException, exc:
raise xs_errors.XenError('NFSVersion',
opterr=exc.errstr)
def mount(self, mountpoint, remotepath):
try:
nfs.soft_mount(mountpoint, self.remoteserver, remotepath, self.transport)
except nfs.NfsException, exc:
raise xs_errors.XenError('NFSMount', opterr=exc.errstr)
def attach(self, sr_uuid):
self.validate_remotepath(False)
# self.remotepath = os.path.join(self.dconf['serverpath'], sr_uuid)
self.remotepath = self.dconf['serverpath']
util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
self.mount_remotepath(sr_uuid)
def mount_remotepath(self, sr_uuid):
if not self._checkmount():
self.check_server()
self.mount(self.path, self.remotepath)
return super(NFSSR, self).attach(sr_uuid)
def probe(self):
# Verify NFS target and port
util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
self.validate_remotepath(True)
self.check_server()
temppath = os.path.join(SR.MOUNT_BASE, PROBE_MOUNTPOINT)
self.mount(temppath, self.dconf['serverpath'])
try:
return nfs.scan_srlist(temppath)
finally:
try:
nfs.unmount(temppath, True)
except:
pass
def detach(self, sr_uuid):
"""Detach the SR: Unmounts and removes the mountpoint"""
if not self._checkmount():
return
util.SMlog("Aborting GC/coalesce")
cleanup.abort(self.uuid)
# Change directory to avoid unmount conflicts
os.chdir(SR.MOUNT_BASE)
try:
nfs.unmount(self.path, True)
except nfs.NfsException, exc:
raise xs_errors.XenError('NFSUnMount', opterr=exc.errstr)
return super(NFSSR, self).detach(sr_uuid)
def create(self, sr_uuid, size):
util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
self.validate_remotepath(True)
if self._checkmount():
raise xs_errors.XenError('NFSAttached')
# Set the target path temporarily to the base dir
# so that we can create the target SR directory
self.remotepath = self.dconf['serverpath']
try:
self.mount_remotepath(sr_uuid)
except Exception, exn:
try:
os.rmdir(self.path)
except:
pass
raise exn
# newpath = os.path.join(self.path, sr_uuid)
# if util.ioretry(lambda: util.pathexists(newpath)):
# if len(util.ioretry(lambda: util.listdir(newpath))) != 0:
# self.detach(sr_uuid)
# raise xs_errors.XenError('SRExists')
# else:
# try:
# util.ioretry(lambda: util.makedirs(newpath))
# except util.CommandException, inst:
# if inst.code != errno.EEXIST:
# self.detach(sr_uuid)
# raise xs_errors.XenError('NFSCreate',
# opterr='remote directory creation error is %d'
# % inst.code)
self.detach(sr_uuid)
def delete(self, sr_uuid):
# try to remove/delete non VDI contents first
super(NFSSR, self).delete(sr_uuid)
try:
if self._checkmount():
self.detach(sr_uuid)
# Set the target path temporarily to the base dir
# so that we can remove the target SR directory
self.remotepath = self.dconf['serverpath']
self.mount_remotepath(sr_uuid)
newpath = os.path.join(self.path, sr_uuid)
if util.ioretry(lambda: util.pathexists(newpath)):
util.ioretry(lambda: os.rmdir(newpath))
self.detach(sr_uuid)
except util.CommandException, inst:
self.detach(sr_uuid)
if inst.code != errno.ENOENT:
raise xs_errors.XenError('NFSDelete')
def vdi(self, uuid, loadLocked=False):
if not loadLocked:
return NFSFileVDI(self, uuid)
return NFSFileVDI(self, uuid)
def _checkmount(self):
return util.ioretry(lambda: util.pathexists(self.path)) \
and util.ioretry(lambda: util.ismount(self.path))
def scan_exports(self, target):
util.SMlog("scanning2 (target=%s)" % target)
dom = nfs.scan_exports(target)
print >> sys.stderr, dom.toprettyxml()
class NFSFileVDI(FileSR.FileVDI):
def attach(self, sr_uuid, vdi_uuid):
try:
vdi_ref = self.sr.srcmd.params['vdi_ref']
self.session.xenapi.VDI.remove_from_xenstore_data(vdi_ref, \
"vdi-type")
self.session.xenapi.VDI.remove_from_xenstore_data(vdi_ref, \
"storage-type")
self.session.xenapi.VDI.add_to_xenstore_data(vdi_ref, \
"storage-type", "nfs")
except:
util.logException("NFSSR:attach")
pass
return super(NFSFileVDI, self).attach(sr_uuid, vdi_uuid)
def get_mtime(self, path):
st = util.ioretry_stat(lambda: os.stat(path))
return st[stat.ST_MTIME]
def clone(self, sr_uuid, vdi_uuid):
timestamp_before = int(self.get_mtime(self.sr.path))
ret = super(NFSFileVDI, self).clone(sr_uuid, vdi_uuid)
timestamp_after = int(self.get_mtime(self.sr.path))
if timestamp_after == timestamp_before:
util.SMlog("SR dir timestamp didn't change, updating")
timestamp_after += 1
os.utime(self.sr.path, (timestamp_after, timestamp_after))
return ret
if __name__ == '__main__':
SRCommand.run(NFSSR, DRIVER_INFO)
else:
SR.registerSR(NFSSR)
|
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
import json
from flask.ext.oauthlib.client import OAuthResponse
from mock import patch
from app import create_app
from app.database import db
from moto import mock_s3
from app.profile.models import User, Publisher, PublisherUser, UserRoleEnum
from app.package.models import Package
class AuthTokenTestCase(unittest.TestCase):
auth_token_url = '/api/auth/token'
def setUp(self):
self.app = create_app()
self.client = self.app.test_client()
with self.app.app_context():
db.drop_all()
db.create_all()
self.user = User()
self.user.user_id = 'trial_id'
self.user.email, self.user.name, self.user.secret = \
'[email protected]', 'test_user', 'super_secret'
db.session.add(self.user)
db.session.commit()
def tearDown(self):
with self.app.app_context():
db.session.remove()
db.drop_all()
db.engine.dispose()
def test_return_200_if_email_and_secret_matches(self):
rv = self.client.post(self.auth_token_url,
data=json.dumps({
'username': None,
'email': '[email protected]',
'secret': 'super_secret'
}),
content_type='application/json')
self.assertEqual(rv.status_code, 200)
def test_throw_400_if_user_name_and_email_is_none(self):
rv = self.client.post(self.auth_token_url,
data=json.dumps({
'secret': 'abc',
'username': '[email protected]'
}),
content_type='application/json')
data = json.loads(rv.data)
assert rv.status_code == 403
assert data['message'] == 'Invalid secret for user'
class CallbackHandlingTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app()
self.client = self.app.test_client()
@patch('flask_oauthlib.client.OAuthRemoteApp.authorized_response')
def test_throw_500_if_error_getting_user_info_from_oauth(self, authorized_response):
authorized_response.return_value = {'name': 'bond','access_token': 'token'}
response = self.client.get('/api/auth/callback?code=123')
data = json.loads(response.data)
print (data)
self.assertEqual(data['message'], 'Internal Server Error')
self.assertEqual(response.status_code, 500)
@patch('flask_oauthlib.client.OAuthRemoteApp.authorized_response')
@patch('flask_oauthlib.client.OAuthRemoteApp.get')
def test_throw_404_if_email_not_found(self, get_user,authorized_response):
authorized_response.return_value = {'access_token': 'token'}
get_user.side_effect = lambda k:{
'user': OAuthResponse(
resp=None,
content=json.dumps(dict()),
content_type='application/json'
),
'user/emails': OAuthResponse(
resp=None,
content=json.dumps([]),
content_type='application/json')
}.get(k, 'unhandled request %s'%k)
response = self.client.get('/api/auth/callback?code=123')
data = json.loads(response.data)
self.assertEqual(data['message'], 'Email Not Found')
self.assertEqual(response.status_code, 404)
@patch('flask_oauthlib.client.OAuthRemoteApp.authorized_response')
def test_throw_400_access_denied_if_authorized_response_is_none(self, authorized_response):
authorized_response.return_value = None
response = self.client.get('/api/auth/callback?code=123')
data = json.loads(response.data)
self.assertEqual(data['message'], 'Access Denied')
self.assertEqual(response.status_code, 400)
@patch('flask_oauthlib.client.OAuthRemoteApp.authorized_response')
@patch('flask_oauthlib.client.OAuthRemoteApp.get')
@patch('app.auth.jwt.JWT.encode')
@patch('app.logic.User.find_or_create')
def test_gets_private_email_and_return_200_if_all_right(self,
create_user, jwt_helper, get_user,
authorized_response):
authorized_response.return_value = {'access_token': 'token'}
get_user.side_effect = lambda k:{
'user': OAuthResponse(
resp=None,
content=json.dumps(dict()),
content_type='application/json'
),
'user/emails': OAuthResponse(
resp=None,
content=json.dumps([{
"email": "[email protected]",
"verified": True,
"primary": True
}]),
content_type='application/json')
}.get(k, 'unhandled request %s'%k)
jwt_helper.return_value = "132432"
create_user.return_value = User(id=1, email="[email protected]", name='abc', secret='12345')
response = self.client.get('/api/auth/callback?code=123')
self.assertEqual(create_user.call_count, 1)
self.assertEqual(jwt_helper.call_count, 1)
self.assertEqual(response.status_code, 200)
@patch('flask_oauthlib.client.OAuthRemoteApp.authorized_response')
@patch('flask_oauthlib.client.OAuthRemoteApp.get')
@patch('app.auth.jwt.JWT.encode')
@patch('app.logic.User.find_or_create')
def test_return_200_if_all_right(self,
create_user, jwt_helper, get_user,
authorized_response):
authorized_response.return_value = {'access_token': 'token'}
get_user.return_value = OAuthResponse(resp=None,
content=json.dumps(dict({'email': '[email protected]'})),
content_type='application/json')
jwt_helper.return_value = "132432"
create_user.return_value = User(id=1, email="[email protected]", name='abc', secret='12345')
response = self.client.get('/api/auth/callback?code=123')
self.assertEqual(create_user.call_count, 1)
self.assertEqual(jwt_helper.call_count, 1)
self.assertEqual(response.status_code, 200)
def tearDown(self):
with self.app.app_context():
db.session.remove()
db.drop_all()
db.engine.dispose()
import tests.base as base
class AuthorizeUploadTestCase(unittest.TestCase):
publisher = 'test_publisher'
package = 'test_package'
user_id = 1
url = '/api/datastore/authorize'
jwt_url = '/api/auth/token'
def setUp(self):
self.app = create_app()
self.client = self.app.test_client()
with self.app.app_context():
db.drop_all()
db.create_all()
base.make_fixtures(self.app, self.package, self.publisher, self.user_id)
self.jwt = base.get_valid_token(self.publisher)
self.jwt1 = base.get_valid_token('test1')
@mock_s3
def test_should_return_200_if_all_right(self):
auth = "%s" % self.jwt
data = {
'metadata': {
"owner": self.publisher,
"name": self.package
},
"filedata": {
"package.json": {
"name": "package.json",
"md5": "12345y65uyhgfed23243y6"
}
}
}
response = self.client.post(self.url,
headers={'Auth-Token': auth},
data=json.dumps(data),
content_type='application/json')
self.assertEqual(200, response.status_code)
@mock_s3
def test_should_return_500_if_data_not_present(self):
auth = "%s" % self.jwt
data = {
'metadata': {
"owner": self.publisher,
"name": self.package
},
"filedata": {
"package.json": {
"name": "package.json"
}
}
}
response = self.client.post(self.url,
headers={'Auth-Token': auth},
data=json.dumps(data),
content_type='application/json')
self.assertEqual(500, response.status_code)
@mock_s3
def test_should_return_400_if_unauthorized(self):
auth = "%s" % self.jwt1
data = {
'metadata': {
"owner": self.publisher,
"name": self.package
},
"filedata": {
"package.json": {
"name": "package.json"
}
}
}
response = self.client.post(self.url,
headers={'Auth-Token': auth},
data=json.dumps(data),
content_type='application/json')
self.assertEqual(400, response.status_code)
|
|
#!/usr/bin/python
"""
SUNTANS NetCDF plotting GUI
"""
import os
import wx
# The recommended way to use wx with mpl is with the WXAgg
# backend.
#
import matplotlib
matplotlib.use('WXAgg')
# Set some default parameters
matplotlib.rcParams['text.color']='white'
matplotlib.rcParams['savefig.facecolor']='black'
matplotlib.rcParams['savefig.edgecolor']='black'
matplotlib.rcParams['figure.facecolor']='black'
matplotlib.rcParams['figure.edgecolor']='black'
matplotlib.rcParams['axes.facecolor']='black'
matplotlib.rcParams['axes.edgecolor']='white'
matplotlib.rcParams['axes.labelcolor']='white'
matplotlib.rcParams['xtick.color']='white'
matplotlib.rcParams['ytick.color']='white'
#matplotlib.rcParams['font.family']='serif'
#matplotlib.rcParams['font.sans-serif']=['Verdana']
from matplotlib.figure import Figure
from matplotlib.collections import PolyCollection, LineCollection
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
import matplotlib.animation as animation
from romsio import ROMS
from datetime import datetime
import numpy as np
import pdb
class ROMSPlotPy(wx.Frame, ROMS ):
"""
The main frame of the application
"""
title = 'romsplot(py)'
# Plotting options
autoclim=True
showedges=False
bgcolor='k'
textcolor='w'
cmap='ocean'
# other flags
collectiontype='cells'
oldcollectiontype='cells'
tindex=0
def __init__(self):
wx.Frame.__init__(self, None, -1, self.title)
self.create_menu()
self.create_status_bar()
self.create_main_panel()
#self.draw_figure()
def create_menu(self):
self.menubar = wx.MenuBar()
###
# File Menu
###
menu_file = wx.Menu()
# Load a hydro output file
m_expt = menu_file.Append(-1, "&Open file\tCtrl-O", "Open netcdf file")
self.Bind(wx.EVT_MENU, self.on_open_file, m_expt)
# Load a grid file
#m_grid = menu_file.Append(-1, "&Load grid\tCtrl-G", "Load SUNTANS grid from folder")
#self.Bind(wx.EVT_MENU, self.on_load_grid, m_grid)
# Load a particle file
#m_part = menu_file.Append(-1, "&Load PTM file\tCtrl-Shift-P", "Load a PTM file")
#self.Bind(wx.EVT_MENU, self.on_load_ptm, m_part)
# Save current scene as an animation
#m_anim = menu_file.Append(-1,"&Save animation of current scene\tCtrl-S","Save animation")
#self.Bind(wx.EVT_MENU, self.on_save_anim, m_anim)
# Save the current figure
m_prin = menu_file.Append(-1,"&Print current scene\tCtrl-P","Save figure")
self.Bind(wx.EVT_MENU, self.on_save_fig, m_prin)
menu_file.AppendSeparator()
# Exit
m_exit = menu_file.Append(-1, "E&xit\tCtrl-X", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
###
# Tools menu
###
#menu_tools = wx.Menu()
#m_gridstat = menu_tools.Append(-1, "&Plot grid size statistics", "SUNTANS grid size")
#self.Bind(wx.EVT_MENU, self.on_plot_gridstat, m_gridstat)
###
# Help Menu
###
menu_help = wx.Menu()
m_about = menu_help.Append(-1, "&About\tF1", "About the demo")
self.Bind(wx.EVT_MENU, self.on_about, m_about)
# Add all of the menu bars
self.menubar.Append(menu_file, "&File")
#self.menubar.Append(menu_tools, "&Tools")
self.menubar.Append(menu_help, "&Help")
self.SetMenuBar(self.menubar)
def create_main_panel(self):
""" Creates the main panel with all the controls on it:
* mpl canvas
* mpl navigation toolbar
* Control panel for interaction
"""
self.panel = wx.Panel(self)
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
#self.fig = Figure((7.0, 6.0), dpi=self.dpi,facecolor=self.bgcolor)
self.fig = Figure((7.0, 6.0), dpi=self.dpi)
self.canvas = FigCanvas(self.panel, -1, self.fig)
# Since we have only one plot, we can use add_axes
# instead of add_subplot, but then the subplot
# configuration tool in the navigation toolbar wouldn't
# work.
#
self.axes = self.fig.add_subplot(111)
#SetAxColor(self.axes,self.textcolor,self.bgcolor)
# Bind the 'pick' event for clicking on one of the bars
#
#self.canvas.mpl_connect('pick_event', self.on_pick)
########
# Create widgets
########
self.variable_list = wx.ComboBox(
self.panel,
size=(200,-1),
choices=['Select a variable...'],
style=wx.CB_READONLY)
self.variable_list.Bind(wx.EVT_COMBOBOX, self.on_select_variable)
self.time_list = wx.ComboBox(
self.panel,
size=(200,-1),
choices=['Select a time step...'],
style=wx.CB_READONLY)
self.time_list.Bind(wx.EVT_COMBOBOX, self.on_select_time)
self.depthlayer_list = wx.ComboBox(
self.panel,
size=(200,-1),
choices=['Select a vertical layer...'],
style=wx.CB_READONLY)
self.depthlayer_list.Bind(wx.EVT_COMBOBOX, self.on_select_depth)
#self.show_edge_check = wx.CheckBox(self.panel, -1,
# "Show Edges",
# style=wx.ALIGN_RIGHT)
#self.show_edge_check.Bind(wx.EVT_CHECKBOX, self.on_show_edges)
cmaps = matplotlib.cm.datad.keys()
cmaps.sort()
self.colormap_list = wx.ComboBox(
self.panel,
size=(100,-1),
choices=cmaps,
style=wx.CB_READONLY)
self.colormap_list.Bind(wx.EVT_COMBOBOX, self.on_select_cmap)
self.colormap_label = wx.StaticText(self.panel, -1,"Colormap:")
self.clim_check = wx.CheckBox(self.panel, -1,
"Manual color limits ",
style=wx.ALIGN_RIGHT)
self.clim_check.Bind(wx.EVT_CHECKBOX, self.on_clim_check)
self.climlow = wx.TextCtrl(
self.panel,
size=(100,-1),
style=wx.TE_PROCESS_ENTER)
self.climlow.Bind(wx.EVT_TEXT_ENTER, self.on_climlow)
self.climhigh = wx.TextCtrl(
self.panel,
size=(100,-1),
style=wx.TE_PROCESS_ENTER)
self.climhigh.Bind(wx.EVT_TEXT_ENTER, self.on_climhigh)
# Labels
self.variable_label = wx.StaticText(self.panel, -1,"Variable:",size=(200,-1))
self.time_label = wx.StaticText(self.panel, -1,"Time step:",size=(200,-1))
self.depth_label = wx.StaticText(self.panel, -1,"Vertical level:",size=(200,-1))
# Create the navigation toolbar, tied to the canvas
#
self.toolbar = NavigationToolbar(self.canvas)
#self.toolbar.toolitems[8][3]='my_save_fig'
#def my_save_fig(self,*args):
# print 'saving figure'
# return "break"
#########
# Layout with box sizers
#########
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.toolbar, 0, wx.EXPAND)
self.vbox.AddSpacer(10)
#self.vbox.Add((-1,25))
flags = wx.ALIGN_LEFT | wx.ALL | wx.ALIGN_CENTER_VERTICAL
self.hbox0 = wx.BoxSizer(wx.HORIZONTAL)
#self.hbox0.Add(self.show_edge_check, 0, border=10, flag=flags)
self.hbox0.Add(self.colormap_label, 0, border=10, flag=flags)
self.hbox0.Add(self.colormap_list, 0, border=10, flag=flags)
self.hbox0.Add(self.clim_check, 0, border=10, flag=flags)
self.hbox0.Add(self.climlow, 0, border=10, flag=flags)
self.hbox0.Add(self.climhigh, 0, border=10, flag=flags)
self.vbox.AddSpacer(5)
self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox1.Add(self.variable_label, 0, border=10, flag=flags)
self.hbox1.Add(self.time_label, 0, border=10, flag=flags)
self.hbox1.Add(self.depth_label, 0, border=10, flag=flags)
self.vbox.AddSpacer(5)
self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox2.Add(self.variable_list, 0, border=10, flag=flags)
self.hbox2.Add(self.time_list, 0, border=10, flag=flags)
self.hbox2.Add(self.depthlayer_list, 0, border=10, flag=flags)
self.vbox.Add(self.hbox1, 0, flag = wx.ALIGN_LEFT | wx.TOP)
self.vbox.Add(self.hbox2, 0, flag = wx.ALIGN_LEFT | wx.TOP)
self.vbox.Add(self.hbox0, 0, flag = wx.ALIGN_LEFT | wx.TOP)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
##########
# Event functions
##########
def create_figure(self):
"""
Creates the figure
"""
# Find the colorbar limits if unspecified
if self.autoclim:
self.clim = [self.data.min(),self.data.max()]
self.climlow.SetValue('%3.1f'%self.clim[0])
self.climhigh.SetValue('%3.1f'%self.clim[1])
else:
self.clim = [float(self.climlow.GetValue()),\
float(self.climhigh.GetValue())]
if self.__dict__.has_key('collection'):
#self.collection.remove()
self.axes.collections.remove(self.collection)
else:
# First call - set the axes limits
self.axes.set_aspect('equal')
self.axes.set_xlim(self.xlims)
self.axes.set_ylim(self.ylims)
self.collection = self.pcolor(data=self.data,titlestr='',colorbar=False,\
ax=self.axes,fig=self.fig,cmap=self.cmap)
#self.axes.add_collection(self.collection)
self.title=self.axes.set_title(self._genTitle(self.tstep[0]),color=self.textcolor)
self.axes.set_xlabel('Longitude [E]')
self.axes.set_ylabel('Latitude [N]')
# create a colorbar
if not self.__dict__.has_key('cbar'):
self.cbar = self.fig.colorbar(self.collection)
else:
#print 'Updating colorbar...'
self.cbar.on_mappable_changed(self.collection)
self.canvas.draw()
def update_figure(self):
#if self.autoclim:
# self.clim = [self.data.min(),self.data.max()]
# self.climlow.SetValue('%3.1f'%self.clim[0])
# self.climhigh.SetValue('%3.1f'%self.clim[1])
#else:
# self.clim = [float(self.climlow.GetValue()),\
# float(self.climhigh.GetValue())]
#
#if self.X.shape == self.collection._coordinates.shape[0:2]:
# self.collection.set_array(np.array(self.data.ravel()))
# self.collection.set_clim(vmin=self.clim[0],vmax=self.clim[1])
#else:
# self.create_figure()
## Update the title
#self.title=self.axes.set_title(self._genTitle(self.tstep[0]),color=self.textcolor)
self.create_figure()
#Update the colorbar
self.cbar.update_normal(self.collection)
# redraw the figure
self.canvas.draw()
def on_pick(self, event):
# The event received here is of the type
# matplotlib.backend_bases.PickEvent
#
# It carries lots of information, of which we're using
# only a small amount here.
#
box_points = event.artist.get_bbox().get_points()
msg = "You've clicked on a bar with coords:\n %s" % box_points
dlg = wx.MessageDialog(
self,
msg,
"Click!",
wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def on_select_variable(self, event):
vname = event.GetString()
self.flash_status_message("Selecting variable: %s"%vname)
# update the spatial object and load the data
self.varname = vname
self.loadData(varname=self.varname)
# Check if the variable has a depth coordinate
depthstr = ['']
# If so populate the vertical layer box
if self.ndim==4:
depthstr = ['%1.4f'%self[self.zcoord][k] for k in range(self.Nz)]
self.depthlayer_list.SetItems(depthstr)
# Update the plot
self.update_figure()
def on_select_time(self, event):
self.tindex = event.GetSelection()
# Update the object time index and reload the data
if self.plot_type=='hydro':
if not self.tstep[0]==self.tindex:
self.tstep=[self.tindex]
self.loadData(varname=self.varname,tstep=self.tstep)
self.flash_status_message("Selecting variable: %s..."%event.GetString())
# Update the plot
self.update_figure()
def on_select_depth(self, event):
kindex = event.GetSelection()
if not self.K[0]==kindex:
# Check if its the seabed or surface value
if kindex>=self.Nz:
kindex=event.GetString()
self.K = [kindex]
self.loadData()
self.flash_status_message("Selecting depth: %s..."%event.GetString())
# Update the plot
self.update_figure()
def on_open_file(self, event):
file_choices = "ROMS NetCDF (*.nc)|*.nc|All Files (*.*)|*.*"
dlg = wx.FileDialog(
self,
message="Open ROMS file...",
defaultDir=os.getcwd(),
defaultFile="",
wildcard=file_choices,
style= wx.FD_MULTIPLE)
if dlg.ShowModal() == wx.ID_OK:
self.plot_type='hydro'
path = dlg.GetPaths()
# Initialise the class
if dlg.GetFilterIndex() == 0:
self.flash_status_message("Opening ROMS file: %s" % path)
startvar='h'
ROMS.__init__(self,path,varname=startvar)
# Populate the drop down menus
vnames = self.listCoordVars()
self.variable_list.SetItems(vnames)
# Update the time drop down list
if self.__dict__.has_key('time'):
self.timestr = [datetime.strftime(tt,'%d-%b-%Y %H:%M:%S') for tt in self.time]
else:
# Assume that it is a harmonic-type file
self.timestr = self.nc.Constituent_Names.split()
self.time_list.SetItems(self.timestr)
# Draw the depth
if startvar in vnames:
self.varname=startvar
self.loadData()
self.create_figure()
def on_load_grid(self, event):
dlg = wx.DirDialog(
self,
message="Open SUNTANS grid from folder...",
defaultPath=os.getcwd(),
style= wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
# Initialise the class
self.flash_status_message("Opening SUNTANS grid from folder: %s" % path)
Grid.__init__(self,path)
# Plot the Grid
self.axes,self.collection = self.plotmesh(ax=self.axes,edgecolors='y')
# redraw the figure
self.canvas.draw()
def on_load_ptm(self, event):
file_choices = "PTM Binary (*_bin.out)|*_bin.out|All Files (*.*)|*.*"
dlg = wx.FileDialog(
self,
message="Open PTM file...",
defaultDir=os.getcwd(),
defaultFile="",
wildcard=file_choices,
style= wx.FD_MULTIPLE)
if dlg.ShowModal() == wx.ID_OK:
self.plot_type = 'particles'
path = dlg.GetPath()
# Initialise the class
self.flash_status_message("Opening PTM binary file: %s" % path)
self.PTM = PtmBin(path)
# Update the time drop down list
self.timestr = [datetime.strftime(tt,'%d-%b-%Y %H:%M:%S') for tt in self.PTM.time]
self.time_list.SetItems(self.timestr)
# Plot the first time step
if self.__dict__.has_key('xlims'):
self.PTM.plot(self.PTM.nt-1,ax=self.axes,xlims=self.xlims,\
ylims=self.ylims,fontcolor='w')
else:
self.PTM.plot(self.PTM.nt-1,ax=self.axes,fontcolor='w')
# redraw the figure
self.canvas.draw()
def on_show_edges(self,event):
sender=event.GetEventObject()
self.showedges = sender.GetValue()
# Update the figure
self.update_figure()
def on_clim_check(self,event):
sender=event.GetEventObject()
if sender.GetValue() == True:
self.autoclim=False
self.update_figure()
else:
self.autoclim=True
def on_climlow(self,event):
self.clim[0] = event.GetString()
#self.update_figure()
def on_climhigh(self,event):
self.clim[1] = event.GetString()
#self.update_figure()
def on_select_cmap(self,event):
self.cmap=event.GetString()
self.collection.set_cmap(self.cmap)
self.cbar.on_mappable_changed(self.collection)
self.canvas.draw()
# Update the figure
#self.update_figure()
def on_save_fig(self,event):
"""
Save a figure of the current scene to a file
"""
file_choices = " (*.png)|*.png| (*.pdf)|*.pdf |(*.jpg)|*.jpg |(*.eps)|*eps "
filters=['.png','.pdf','.png','.png']
dlg = wx.FileDialog(
self,
message="Save figure to file...",
defaultDir=os.getcwd(),
defaultFile="",
wildcard=file_choices,
style= wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
ext = filters[dlg.GetFilterIndex()]
if ext in path:
outfile=path
else:
outfile = path+ext
self.fig.savefig(outfile)
def on_save_anim(self,event):
"""
Save an animation of the current scene to a file
"""
file_choices = "Quicktime (*.mov)|*.mov| (*.gif)|*.gif| (*.avi)|*.avi |(*.mp4)|*.mp4 "
filters=['.mov','.gif','.avi','.mp4']
dlg = wx.FileDialog(
self,
message="Output animation file...",
defaultDir=os.getcwd(),
defaultFile="",
wildcard=file_choices,
style= wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
ext = filters[dlg.GetFilterIndex()]
if ext in path:
outfile=path
else:
outfile = path+ext
self.flash_status_message("Saving figure to file: %s" %outfile)
self.flash_status_message("Saving animation to file: %s" %outfile)
# Create the animation
#self.tstep = range(self.Nt) # Use all time steps for animation
#self.animate(cbar=self.cbar,cmap=self.cmap,\
# xlims=self.axes.get_xlim(),ylims=self.axes.get_ylim())
def updateScalar(i):
self.tstep=[i]
self.loadData()
self.update_figure()
return (self.title,self.collection)
self.anim = animation.FuncAnimation(self.fig, updateScalar, frames=self.Nt, interval=50, blit=True)
if ext=='.gif':
self.anim.save(outfile,writer='imagemagick',fps=6)
else:
self.anim.save(outfile,fps=6,bitrate=3600)
# Return the figure back to its status
del self.anim
self.tstep=self.tindex
self.loadData()
self.update_figure()
# Bring up a dialog box
dlg2= wx.MessageDialog(self, 'Animation complete.', "Done", wx.OK)
dlg2.ShowModal()
dlg2.Destroy()
def on_exit(self, event):
self.Destroy()
def on_about(self, event):
msg = """ ROMS NetCDF visualization tool
*Author: Matt Rayson
*Institution: Stanford University
*Created: May 2014
"""
dlg = wx.MessageDialog(self, msg, "About", wx.OK)
dlg.ShowModal()
dlg.Destroy()
def on_plot_gridstat(self, event):
"""
Plot the grid size histogram in a new figure
"""
matplotlib.pyplot.figure()
self.plothist()
matplotlib.pyplot.show()
def create_status_bar(self):
self.statusbar = self.CreateStatusBar()
def flash_status_message(self, msg, flash_len_ms=1500):
self.statusbar.SetStatusText(msg)
self.timeroff = wx.Timer(self)
self.Bind(
wx.EVT_TIMER,
self.on_flash_status_off,
self.timeroff)
self.timeroff.Start(flash_len_ms, oneShot=True)
def on_flash_status_off(self, event):
self.statusbar.SetStatusText('')
def SetAxColor(ax,color,bgcolor):
ax.set_axis_bgcolor(bgcolor)
ax.yaxis.set_tick_params(color=color,labelcolor=color)
ax.xaxis.set_tick_params(color=color,labelcolor=color)
ax.yaxis.label.set_color(color)
ax.xaxis.label.set_color(color)
ax.spines['top'].set_color(color)
ax.spines['bottom'].set_color(color)
ax.spines['left'].set_color(color)
ax.spines['right'].set_color(color)
return ax
if __name__ == '__main__':
app = wx.PySimpleApp()
app.frame = ROMSPlotPy()
app.frame.Show()
app.MainLoop()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Audio Recording utilities.
Parts of this code are taken from:
- https://stackoverflow.com/a/6743593/2402281
Information to write this code are taken from:
- https://en.wikipedia.org/wiki/Voice_frequency
"""
# Python-native imports
import logging
import struct
import time
import wave
from array import array
from collections import deque
from struct import pack
from sys import byteorder
from threading import Thread
from typing import Union, Tuple
# Third-party imports
import numpy as np
import pyaudio
import pykka
# App imports
from pyCrow.audiolib.process import normalize, trim, add_silence, butter_bandpass_filter
from pyCrow.crowlib import Action
L = logging.getLogger(__name__)
L.debug(f'Loaded module: {__name__}.')
class AudioActor(pykka.ThreadingActor):
def __init__(self, config: dict, **kwargs):
super().__init__()
self._config = config
self._recorder = Recorder(**kwargs)
def on_start(self):
L.info(msg=f'Started AudioActor ({self.actor_urn})')
def on_stop(self):
L.info('AudioActor is stopped.')
def on_failure(self, exception_type, exception_value, traceback):
L.error(f'AudioActor failed: {exception_type} {exception_value} {traceback}')
def on_receive(self, msg: dict) -> None:
L.info(msg=f'AudioActor received message: {msg}')
# process msg and alter state accordingly
_cmd = msg.get('cmd', '').lower()
if _cmd == Action.AUDIO_RECORD.get('cmd'):
self._recorder.record(seconds=self._config.get('duration'))
elif _cmd == Action.AUDIO_RECORD_TO_FILE.get('cmd'):
self._recorder.record_to_file(
file=self._config.get('file'), seconds=self._config.get('duration'))
else:
# default: do nothing but log this event
L.info(msg=f'Received message {msg} which cannot be processed.')
class Recorder(object):
"""
1s ==> 44100 samples
20ms ==> 44100/50 = 882 samples
"""
# constants for the speech recognition task
RATE: int = 44100
THRESHOLD: int = 500
CHUNK_SIZE: int = 1024
FORMAT: int = pyaudio.paInt16
def __init__(self, threshold: int = THRESHOLD, chunk_size: int = CHUNK_SIZE,
format: int = FORMAT, rate: int = RATE):
super(Recorder, self).__init__()
self.THRESHOLD = threshold
self.CHUNK_SIZE = chunk_size
self.FORMAT = format
self.RATE = rate
self._last_recoding = np.empty(0)
L.info('Instantiated Recorder with specs:\n' + '\n'.join(
['\t\t{}: {}'.format(k, v) for k, v in self.__dict__.items()]))
def record(self, seconds: Union[int, float] = 0) -> Tuple[int, np.ndarray]:
"""
RecordAudio a word or words from the microphone and
return the data as a ``numpy.ndarray`` of signed shorts.
Normalizes the audio, trims silence from the
start and end, and pads with 0.5 seconds of
blank sound to make sure VLC et al can play
it without getting chopped off.
(this shall be configurable -^)
"""
# store data in this array
#r = array('h')
global r # FIXME AHHH... need to fix this asap!!! :D
r = np.ndarray(shape=(0,), dtype=np.dtype('h'))
# use a ring buffer to buffer at most 1024 chunks
ring_buffer = deque(maxlen=int(self.CHUNK_SIZE))
# local helper: flush the buffer data to the recordings array
def _l__persist_recordings_from_buffer():
def _h(v):
global r
r = np.append(r, v).astype(dtype='h')
L.debug('Writing audio from ring buffer to byte array.')
Thread(target=_h, args=[ring_buffer.copy()], daemon=False).start()
ring_buffer.clear()
# local helper: pyaudio stream callback
def _l__audio_stream_callback(stream_in: bytes, frame_count, time_info, status):
if status:
L.error('Non zero exit status in audio stream callback! Aborting...')
return None, pyaudio.paAbort
unpacked_in_data = list(struct.unpack('h' * frame_count, stream_in))
# append data to the ring buffer
if byteorder == 'big':
ring_buffer.extendleft(unpacked_in_data)
else: # little
ring_buffer.extend(unpacked_in_data)
# when ring buffer is full, flush it to a byte array
if len(ring_buffer) >= int(self.CHUNK_SIZE):
_l__persist_recordings_from_buffer()
return None, pyaudio.paContinue
# let the recording begin...
p = pyaudio.PyAudio()
stream = p.open(format=self.FORMAT, channels=1, rate=self.RATE, input=True, output=False,
frames_per_buffer=self.CHUNK_SIZE, stream_callback=_l__audio_stream_callback)
sample_width = p.get_sample_size(self.FORMAT)
input_specs = '\n'.join(
['\t\t{:30}: {}'.format(k, v) for k, v in p.get_default_input_device_info().items()])
L.info(f'Input device is running with the following specs:\n{input_specs}')
t = time.time()
while stream.is_active() and (time.time() < (t + seconds) or seconds == 0):
L.debug(f'Start time: {t}\tEnd time: {t+seconds}\tDelta: {t+seconds-time.time()}')
time.sleep(1. / self.RATE)
# yield sample_width, r
# flush the rest of the buffer
_l__persist_recordings_from_buffer()
L.debug('Stopping audio stream...')
stream.stop_stream()
L.debug('Closing audio stream...')
stream.close()
p.terminate()
# FIXME the processing of the data done below shall be done only!!!
# TODO make this configurable?
# post-processing of the audio data
#r = normalize(r, absolute_maximum=16384) # 16384 is the max for int16 (2**15 / 2)
#r = trim(r, threshold=self.THRESHOLD)
#r = add_silence(r, seconds=0.5, rate=self.RATE)
self._last_recoding = r
return sample_width, r
def record_to_file(self, file: str, seconds: Union[int, float] = 0) -> None:
""" Records from the microphone and outputs the resulting data to '`file'`.
"""
sample_width, npdata = self.record(seconds=seconds)
L.info(f'Audio data is of length {len(npdata)/self.RATE} before filtering.')
# todo parametrize filtering and the like
# bandpass filter in the Voice Frequency (VF) range
npdata = butter_bandpass_filter(
npdata, cutfreq=(85.0, 800.0), sampling_frequency=self.RATE / 5, order=6)
data = pack('<' + ('h' * len(npdata.astype(array))), *npdata.astype(dtype=array))
with wave.open(file, 'wb') as wf:
L.info(f'Writing audio data of length {len(npdata)/self.RATE}...')
wf.setnchannels(1)
wf.setsampwidth(sample_width)
wf.setframerate(self.RATE)
wf.writeframes(data)
L.info(f'Successfully written audio data into file "{file}".')
# TODO refactor into its own module
'''
import matplotlib.pyplot as plt
fig = plt.figure()
s = fig.add_subplot(111)
# s.plot(npdata)
s.specgram(npdata, NFFT=1024, Fs=self.RATE / 5, noverlap=900, cmap='binary')
plt.show(block=True)
'''
def record_mfcc_batches(self):
pass # TODO ...?
|
|
#!/usr/bin/env python
'''
Copyright (c) 2014, Andrew McConachie <[email protected]>
All rights reserved.
'''
import os
import math
import curses
import locale
import sys
import copy
from time import sleep
# hexcap specific imports
import cfg
import minibuffer
import capture
import packet
import layer
import section
# Our generic ScreenError exception class
class ScreenError(Exception):
def __init__(self, msg):
curses.echo()
curses.endwin()
# Our wrapper class for an ncurses screen
class HexScreen:
def __init__(self):
locale.setlocale(locale.LC_ALL, '')
self.code = locale.getpreferredencoding()
self.stdscr = curses.initscr()
curses.noecho()
curses.raw()
self.stdscr.keypad(1)
self.headerHeight = 2 # Section / Column names
self.footerHeight = 2 # Includes blank line
# Our stack of hidden sections
self.hiddenSectIDs = []
# Are we in insert mode?
self.insert = False
# Is the mark set?
self.markSet = False
# Packet ID of marked packet. Zero based.
self.mark = 0
# Flag is True if mini-buffer has focus
self.mBufFocus = False
# Message to be printed to mBuf for one cycle and then cleared
self.mBufMsg = ''
def tearDown(self, dieStr=''):
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
if(len(dieStr)):
print(dieStr)
sys.exit(0)
# Initializes our ncurses pad
# Takes a Capture object
def initPad(self, cap):
self.cap = cap
self.maxY, self.maxX = self.stdscr.getmaxyx()
self.ppadTopY = self.headerHeight # Topmost ppad position on screen
self.ppadBottomY = self.maxY - self.footerHeight # Bottommost ppad position on screen
self.ppadRows = len(self.cap.packets) # Total number of lines in ppad
self.buildSections()
self.drawPpads()
self.initCursor()
self.refresh()
# Initialize all cursor attributes
def initCursor(self):
self.cY = self.headerHeight
self.cX = self.offLimitsWidth
self.ppadCurY = 0 # Current topmost visible Y in ppad
self.ppadCurX = 0 # Current leftmost visible X in ppad
# Completely redraws our ppad and rebuilds our section list
# Sets ppadWidth
def drawPpads(self):
if(self.ppadRows != len(self.cap.packets)): # Our capture has changed in size
self.buildSections()
# Draw our packet ppad
self.ppadRows = len(self.cap.packets)
self.ppadWidth = self.tableWidth + 1 # Don't understand, why the extra 1?
self.ppad = curses.newpad(self.ppadRows, self.ppadWidth)
self.stdscr.clear()
self.ppad.clear()
y = 0
for p in self.cap.packets:
self.drawPktLine(y, p.out())
y += 1
# Create our header ppad
self.headPpad = curses.newpad(2, self.ppadWidth)
def refresh(self):
# cfg.dbg("hexscreen.py refresh tw:" + str(self.tableWidth) + " ppadCurX:" + str(self.ppadCurX) + " maxX:" + str(self.maxX))
if(curses.is_term_resized(self.maxY, self.maxX)):
cfg.dbg("Caught resize event. Consider using immedok()")
self.tearDown()
self.drawHeader()
self.drawFooter()
# Handle the mini-buffer
if(self.mBufFocus):
eStr = self.mBuf.exe()
if(eStr):
self.toggleMBuf()
self.stdscr.move(self.cY, self.cX)
self.genericTry(eStr)
else:
self.printToMBuf(self.mBuf.out())
self.stdscr.move(self.maxY - 1, self.mBuf.cX)
else:
self.printToMBuf(self.mBufMsg)
self.mBufMsg = ''
self.stdscr.move(self.cY, self.cX)
self.refreshBoldPacket()
self.headPpad.refresh(0, self.ppadCurX, 0, 0, self.headerHeight, self.maxX - 1)
self.ppad.refresh(self.ppadCurY, self.ppadCurX, self.ppadTopY, 0, self.ppadBottomY, self.maxX - 1)
self.stdscr.refresh()
curses.doupdate()
# Determines the correct order of sections to display based on capture
def buildSections(self):
# Remember which sections are exposed before clobbering self.sections
hiddenSections = []
if(hasattr(self, 'sections')):
for s in self.sections:
if(not s.exposed):
hiddenSections.append(s.ID)
self.sections = []
IDs = [] # Holds temp list of sections we've added to self.sections
for pkt in self.cap.packets:
for lay in pkt.layers:
if(not(lay.ID in IDs)):
IDs.append(lay.ID)
# Construct our new section
s = section.Section(lay.ID, lay.position)
for col,width in lay.cols.iteritems():
s.append(col, width)
# non-default values for layers need to be handled here
if(s.ID in hiddenSections):
s.exposed = False
else:
s.exposed = lay.exposed
s.exposable = lay.exposable
s.RO = lay.RO
# append/insert our new section
if(len(self.sections) <= 1):
self.sections.append(s)
else:
for ii in xrange(len(self.sections)):
if(ii == len(self.sections) - 1):
self.sections.append(s)
break
elif(s.position <= self.sections[ii].position):
self.sections.insert(ii, s)
break
# Relative Y cursor position in our ppad
def _get_ppadCY(self):
return self.ppadCurY + self.cY - self.ppadTopY
ppadCY = property(_get_ppadCY)
# Relative X cursor position in our ppad
def _get_ppadCX(self):
return self.ppadCurX + self.cX
ppadCX = property(_get_ppadCX)
# An ordered list of displayed sections
def _get_displayedSections(self):
rv = []
for s in self.sections:
if(s.visible):
rv.append(s)
return rv
displayedSections = property(_get_displayedSections)
# Table width of the entire displayed table
def _get_tableWidth(self):
rv = 0
for s in self.displayedSections:
rv += s.width
return max(1, rv)
tableWidth = property(_get_tableWidth)
# Leftmost width that is off limits to cursor
def _get_offLimitsWidth(self):
rv = 0
for s in self.displayedSections:
if(s.RO):
rv += s.width
else:
return rv
return False
offLimitsWidth = property(_get_offLimitsWidth)
# Leftmost sections that are off limits to cursor
def _get_offLimitsSections(self):
rv = 0
for s in self.displayedSections:
if(s.RO):
rv += 1
else:
return rv
return rv
offLimitsSections = property(_get_offLimitsSections)
# Returns header section that cursor X value is currently in
# Takes X value of cursor
def cursorSection(self, x):
dSections = self.displayedSections
totX = self.ppadCurX * -1
for s in dSections:
if(x < totX + s.width):
return s
else:
totX += s.width
return dSections.reversed.next()
# Returns header section and column key that passed X value is currently in
# Takes X screen position
def cursorColumn(self, x):
totX = self.ppadCurX * -1
for s in self.displayedSections:
if(x < totX + s.width - 1):
if(s.exposed):
for col, cWidth in s.c.iteritems():
if(x < totX + cWidth):
return list((s, col))
else:
totX += cWidth + 1
else:
return list((s, None))
else:
totX += s.width
# Returns leftmost screen X value for passed section name
def sectionLeft(self, sid):
rv = self.ppadCurX * -1
for s in self.displayedSections:
if(s.ID == sid):
return rv
else:
rv += s.width
raise ScreenError, "sectionLeft:Section not found"
# Returns center screen X value for passed section name
def sectionCenter(self, sid):
rv = self.ppadCurX * -1
for s in self.displayedSections:
if(s.ID == sid):
c = rv + (int(math.floor(s.width / 2)))
return c
else:
rv += s.width
raise ScreenError, "sectionCenter:Section not found"
# Returns leftmost screen X value(after "|") for passed section and column name
# If column is None then returns leftmost screen X value(after "|") for section only
def columnLeft(self, sid, cid=None):
rv = self.sectionLeft(sid)
if(cid == None):
return rv
for s in self.displayedSections:
if(s.ID == sid):
if(s.exposed):
for col, width in s.c.iteritems():
if(col == cid):
return rv
else:
rv += width + 1
else:
return rv
raise ScreenError, "columnLeft:Column not found"
# Returns rightmost screen X value(before "|") for passed section and column name
def columnRight(self, sid, cid):
for s in self.displayedSections:
if(s.ID == sid):
if(s.exposed):
return self.columnLeft(sid, cid) + s.c[cid] - 1
else:
return self.sectionLeft(sid) + s.width - 1
# Handle regular refreshing of packet lines
# cfg.dbg("refreshBoldPacket markSet:" + str(self.markSet) + " mark:" + str(self.mark) + " ppadCY:" + str(self.ppadCY) + " pkts:" + str(len(self.cap.packets)))
def refreshBoldPacket(self):
if(len(self.cap.packets) == 1):
if(self.markSet):
self.drawPktLine(0, self.cap.packets[0].out(), True, True)
else:
self.drawPktLine(0, self.cap.packets[0].out(), True, False)
return
if(self.markSet):
self.drawPktLine(self.ppadCY, self.cap.packets[self.ppadCY].out(), False, True)
if(self.ppadCY < self.mark): # Cursor is above mark
if(self.ppadCY > 0):
self.drawPktLine(self.ppadCY - 1, self.cap.packets[self.ppadCY - 1].out())
for pkt in xrange(self.mark, self.ppadCY + 1, -1):
self.drawPktLine(pkt, self.cap.packets[pkt].out(), False, True)
if(self.mark < len(self.cap.packets) - 1):
self.drawPktLine(self.mark + 1, self.cap.packets[self.mark + 1].out())
elif(self.ppadCY == self.mark): # Cursor is on mark
if(self.mark > 0):
self.drawPktLine(self.ppadCY - 1, self.cap.packets[self.ppadCY - 1].out())
if(self.mark < len(self.cap.packets) - 1):
self.drawPktLine(self.ppadCY + 1, self.cap.packets[self.ppadCY + 1].out()) ##
elif(self.ppadCY > self.mark): # Cursor is below mark
if(self.mark > 0):
self.drawPktLine(self.mark - 1, self.cap.packets[self.mark - 1].out())
for pkt in xrange(self.mark, self.ppadCY + 1):
self.drawPktLine(pkt, self.cap.packets[pkt].out(), False, True)
if(self.ppadCY < len(self.cap.packets) - 1):
self.drawPktLine(self.ppadCY + 1, self.cap.packets[self.ppadCY + 1].out())
else:
self.drawPktLine(self.ppadCY, self.cap.packets[self.ppadCY].out(), True)
if(self.ppadCY == 0): # First packet in ppad
if(len(self.cap.packets) > 1):
self.drawPktLine(1, self.cap.packets[1].out())
elif(self.cY == self.ppadTopY - 1): # Top packet on screen
self.drawPktLine(self.ppadCY + 1, self.cap.packets[self.ppadCY + 1].out())
elif((self.cY == self.ppadBottomY - 1) or (len(self.cap.packets) == self.ppadCY + 1)): # Bottom packet on screen
self.drawPktLine(self.ppadCY - 1, self.cap.packets[self.ppadCY - 1].out())
else: # Middle packet on screen
self.drawPktLine(self.ppadCY - 1, self.cap.packets[self.ppadCY - 1].out())
self.drawPktLine(self.ppadCY + 1, self.cap.packets[self.ppadCY + 1].out())
# Draws a packet line onto our ppad
# Takes a y value and list of cells that correlates to our global header list
# cfg.dbg("drawPktLine y:" + str(y) + " pid:" + str(row['pid']['pid']) + " bold:" + str(bold) + " rev:" + str(reverse))
def drawPktLine(self, y, row, bold=False, reverse=False):
x = 0
for s in self.sections:
if(s.visible):
if(s.exposed):
if(s.ID in row):
for colName, width in s.c.iteritems():
if(reverse):
self.ppadAddStr(y, x, row[s.ID][colName].rjust(width) + "|", curses.A_REVERSE)
else:
if(bold):
self.ppadAddStr(y, x, row[s.ID][colName].rjust(width) + "|", curses.A_BOLD)
else:
self.ppadAddStr(y, x, row[s.ID][colName].rjust(width) + "|")
x += width + 1
else:
self.ppadHLine(y, x, " ", s.width - 1)
self.ppadAddStr(y, x + s.width - 1, "|")
x += s.width
else:
self.ppadHLine(y, x, "-", s.width - 1)
self.ppadAddStr(y, x + s.width - 1, "|")
x += s.width
else:
continue
# Draws our top 2 header rows
def drawHeader(self):
x0 = 0
x1 = 0
for s in self.sections:
if(s.visible):
if(s.exposed):
head = "{" + s.ID + "}"
head = head.center(s.width - 1, " ") + "|"
self.headPpadAddStr(0, x0, head)
x0 += s.width
for column, width in s.c.iteritems():
col = column.center(width, " ")
self.headPpadAddStr(1, x1, col + "|", curses.A_REVERSE)
x1 += width + 1
else:
head = "{" + s.ID + "}|"
self.headPpadAddStr(0, x0, head)
self.headPpadHLine(1, x1, "-", s.width - 1, curses.A_REVERSE)
self.headPpadAddStr(1, x1 + s.width - 1, "|", curses.A_REVERSE)
x0 += s.width
x1 += s.width
else:
continue
# Draws our footer
def drawFooter(self):
s,c = self.cursorColumn(self.cX)
y = self.maxY - self.footerHeight
x = 0
divider = 2
def addElement(sf):
sf = "[" + sf + "]"
self.stdscr.hline(y, x, "-", divider)
self.stdscr.addstr(y, x + divider, sf)
return divider + len(sf)
x += addElement(self.cap.fName)
txt = str(self.ppadCY + 1) + "/" + str(len(self.cap.packets))
x += addElement(txt)
if(self.markSet):
txt = "MRK"
elif(self.insert):
txt = "INS"
else:
txt = "NAV"
x += addElement(txt)
if(s.exposed):
if(s.RO):
txt = s.ID + "/" + c + "/RO"
else:
txt = s.ID + "/" + c + "/RW"
else:
txt = s.ID + "/-/-"
x += addElement(txt)
if(self.cap.ifName):
x += addElement(self.cap.ifName)
# Show control elements if present
if(self.cap.packets[self.ppadCY].control):
if(self.cap.packets[self.ppadCY].control == 'g'):
for lay in self.cap.packets[self.ppadCY].genLayers:
if(lay.ID == s.ID):
for col in lay.gen:
if(col == c):
txt = "cnt:" + str(lay.gen[col]['count'])
txt += " stp:" + str(lay.gen[col]['step'])
txt += " msk:" + lay.gen[col]['mask']
x += addElement(txt)
break
# Claim remaining space
if(self.tableWidth > x):
self.stdscr.hline(y, x, "-", self.tableWidth - x)
# Handles pageUp and pageDown
def page(self, dY):
if(self.ppadBottomY >= self.ppadRows):
return
self.drawPktLine(self.ppadCY, self.cap.packets[self.ppadCY].out())
if(dY > 0):
ppadHeight = self.ppadBottomY - self.ppadTopY
if(self.ppadCurY + ppadHeight < self.ppadRows):
self.ppadCurY = min(self.ppadRows - ppadHeight, self.ppadCurY + dY)
else:
self.cY = self.ppadBottomY - 1
else:
if(self.ppadCurY > 0):
self.ppadCurY = max(self.ppadCurY + dY, 0)
else:
self.cY = self.ppadTopY
# Move cursor to first column after pktID
def gotoLineBegin(self):
self.ppadCurX = 0
self.cX = self.offLimitsWidth
# Move cursor to end of line
def gotoLineEnd(self):
if(self.maxX > self.tableWidth):
self.cX = self.tableWidth - 2
else:
self.ppadCurX = self.tableWidth - self.maxX
self.cX = self.maxX - 2
# Moves cursor right and left by delta columns
def shiftColumn(self, delta):
dSections = self.displayedSections
if(len(dSections) < 2):
return
if(self.cX >= self.maxX): # Reset our cursor and shift screen if we shifted off screen
s, col = self.cursorColumn(self.cX)
if(col == None):
self.ppadCurX += s.width
else:
self.ppadCurX += s.c[col]
self.cX = self.maxX - 1
elif(self.cX < 0):
self.ppadCurX += self.cX
s, col = self.cursorColumn(1)
self.cX = self.columnRight(s.ID, col)
if(delta == 0): # Where every call to this function usually ends up
return
sect, col = self.cursorColumn(self.cX)
if(col == None):
ii = -1
for s in dSections:
ii += 1
if(s.ID == sect.ID): # Found sect
if(delta > 0):
if(ii == len(dSections) - 1):
return
else:
ns = dSections[ii + 1]
self.cX = self.columnLeft(ns.ID, None)
self.shiftColumn(delta - 1)
else:
if(ii <= self.offLimitsSections): # Leftmost RO sections are off limits
return
else:
ns = dSections[ii - 1]
self.cX = self.columnLeft(ns.ID, None)
self.shiftColumn(delta + 1)
else:
sii = -1
for s in dSections:
sii += 1
if(sect.ID == s.ID):
cii = -1
for c,w in s.c.iteritems():
cii += 1
if(c == col): # Found sect and col
if(delta > 0):
if(cii == len(s.c) - 1): # Last column
if(sii == len(dSections) - 1): # Last section and column
return
else:
ns = dSections[sii + 1]
nc = ns.c.getStrKey(0)
self.cX = self.columnLeft(ns.ID, nc)
self.shiftColumn(delta - 1)
else:
self.cX = self.columnLeft(s.ID, s.c.getStrKey(cii + 1))
self.shiftColumn(delta - 1)
else:
if(cii == 0):
if(sii <= self.offLimitsSections): # Leftmost RO sections are off limits
return
else:
ns = dSections[sii - 1]
nc = ns.c.getStrKey(len(ns.c) - 1)
self.cX = self.columnLeft(ns.ID, nc)
self.shiftColumn(delta + 1)
else:
self.cX = self.columnLeft(s.ID, s.c.getStrKey(cii -1))
self.shiftColumn(delta + 1)
# Moves our cursor, takes deltaY and deltaX
# Either deltaY or deltaX MUST be 0
def move(self, dY, dX):
# cfg.dbg("move cX:" + str(self.cX) + " dY:" + str(dY) + " dX:" + str(dX) + " ppadCurX:" + str(self.ppadCurX))
# Finds the next valid X position for cursor
# Returns False if no such position exists
def findValidX(diffX):
if((self.ppadCX + diffX >= self.ppadWidth - 1) or (self.ppadCX + diffX < self.offLimitsWidth)):
return False
else:
validChars = copy.deepcopy(cfg.hexChars)
validChars.append(ord('-')) # For hidden sections
validChars.append(ord('.')) # For the undefined layer
if(ord(self.inch(self.ppadCY, self.ppadCX + diffX)[1]) in validChars):
return diffX
else:
if(diffX > 0):
return findValidX(diffX + 1)
else:
return findValidX(diffX - 1)
if(dY == 0 and dX == 0):
return
if(dY != 0):
if(dY > 0):
if(self.cY + 1 < self.ppadBottomY): # Are we not at the bottom of the screen
if(self.ppadCY + 1 < len(self.cap.packets)): # Are we not at the end of the ppad
self.cY += 1
else:
if(self.ppadCurY + self.ppadBottomY - self.ppadTopY < self.ppadRows):
self.ppadCurY += 1
else:
if(self.cY - 1 >= self.ppadTopY):
self.cY -= 1
self.move(dY+1 ,0)
elif(self.cY - 1 == self.ppadTopY - 1):
if(self.ppadCurY > 0):
self.ppadCurY -= 1
elif(dX != 0):
if(dX > 0):
vX = findValidX(1)
if(not vX):
return
if(self.cX + vX < self.tableWidth - self.ppadCurX):
if(self.cX + vX < self.maxX):
self.cX += vX
else:
self.ppadCurX += vX
else:
vX = findValidX(-1)
if(not vX):
return
if(self.cX + vX > max(0, self.offLimitsWidth - self.ppadCurX - 1)):
self.cX += vX
else:
self.ppadCurX = max(0, self.ppadCurX + vX)
if(self.ppadCurX > 0 and self.ppadCurX <= self.offLimitsWidth and self.cX <= self.offLimitsWidth): # Reset screen to far left
self.cX = self.offLimitsWidth
self.ppadCurX = 0
if(dY > 0):
self.move(dY-1, 0)
elif(dY < 0):
self.move(dY+1, 0)
elif(dX > 0):
self.move(0, dX-1)
elif(dX < 0):
self.move(0, dX+1)
def toggleExpose(self, s=None):
if(not s):
s = self.cursorSection(self.cX)
if(not s.exposable):
return
if(s.exposed):
s.exposed = False
else:
s.exposed = True
self.cX = self.sectionCenter(s.ID)
self.drawPpads()
self.resetCursor()
self.refresh()
# Either expose all sections or unexpose all sections, whichever will toggle more sections
def toggleExposeAll(self):
x = 0
for s in self.sections:
if(s.exposed):
x += 1
else:
x -= 1
if(x > int(math.floor(len(self.sections) / 2))):
expose = False
else:
expose = True
for s in self.sections:
if(expose != s.exposed):
self.toggleExpose(s)
def toggleInsert(self):
if(self.markSet): # Cannot enter insert mode with mark set
return
if(self.insert):
self.insert = False
else:
self.insert = True
def toggleMBuf(self):
if(self.mBufFocus):
self.mBufFocus = False
self.printToMBuf()
del self.mBuf
else:
self.mBuf = minibuffer.MiniBuffer()
self.mBufFocus = True
# Prints text to the mini-buffer
def printToMBuf(self, s=''):
if(len(s.strip()) > 0):
self.stdscr.addstr(self.maxY - 1, 0, s.strip()[:self.maxX])
self.stdscr.hline(self.maxY - 1, len(s.strip()), " ", self.maxX)
else:
self.stdscr.hline(self.maxY - 1, 0, " ", self.maxX)
# Handles all character input to mBuf
def inputToMBuf(self, c):
if(curses.keyname(c) == '^Q' or curses.keyname(c) == '^X' or curses.keyname(c) == '^['):
self.toggleMBuf()
else:
self.mBuf.input(c)
def getch(self):
return self.stdscr.getch()
# Takes ppad relative y,x coordinates
# Returns list((int)attributes, (chr)character) at that location on our ppad
def inch(self, y, x):
# curses.inch() returns 3 bytes; left byte is attributes, right 2 bytes are the character
inpt = hex(self.ppad.inch(y, x))[2:].zfill(6)
return list((int(inpt[:2], 16), chr(int(inpt[2:], 16))))
# Executes passed string in try/except
# Properly exits if exception raised
def genericTry(self, s):
try:
rv = eval(s)
if(rv):
self.printToMBuf(rv)
except:
curses.echo()
curses.endwin()
raise
# Transmits packets by calling capture.tx()
# Handles blocking/unblocking and keyboard Interrupt from user
# If repeat is zero then loop until broken by user
def tx(self, first, last, repeat):
def end(): # Called before we return
self.stdscr.nodelay(0) # Reblock character input
if(fail):
self.printToMBuf("Error:One or more packets failed to transmit")
else:
self.printToMBuf(str(pktSent) + " packets egressed " + self.cap.ifName)
# Does the actual sending, returns on user input
# Returns tuple [successes, failure, userBreak]
# successes is packets sent successfully
# failure is True if any packet failed to send, otherwise false
# userBreak == True if user break detected, otherwise False
def sendPkts(packets):
successes = 0
failure = False
userBreak = False
for pkt in packets:
if(pkt.control == 's'): # Sleep
for halfSecond in xrange(2 * int(pkt.layer('cntrl').vals['arg'].strip())):
sleep(0.5)
if(self.getch() != -1):
return successes, failure, True
elif(pkt.control == 'j'): # Jump
jmpPid = int(pkt.layer('cntrl').vals['arg'].strip())
newPackets = [p for p in packets if(int(p.getPID()) >= jmpPid)]
ss,ff,ub = sendPkts(newPackets)
successes += ss
failure |= ff
userBreak |= ub
break
else: # Normal packet or generator
rv = self.cap.tx(pkt)
if(rv):
successes += rv
else:
failure = True
if(self.getch() != -1 or userBreak == True):
return successes, failure, True
return successes, failure, userBreak # End sendPkts()
if(os.getuid() or os.geteuid()):
return "Error:Requires root access"
pkts = []
fail = False
pktSent = 0
# Convert from user's one-based numbering to internal zero-based numbering
first -= 1
last -= 1
if(first <= last): # User wants normal ordering
if(first < 0):
first = 0
if(last > len(self.cap.packets) - 1):
last = len(self.cap.packets) - 1
for jj in xrange(first, last+1):
pkts.append(self.cap.packets[jj])
else: # User wants reverse ordering
if(last < 0):
last = 0
if(first > len(self.cap.packets) - 1):
first = len(self.cap.packets) - 1
for jj in xrange(first, last-1, -1):
pkts.append(self.cap.packets[jj])
# Check for illegal jumps before starting
for pkt in pkts:
if(pkt.control == 'j'):
jmpPid = pkt.layer('cntrl').vals['arg'].strip()
if(jmpPid < pkt.getPID()):
return "Error_Internal: Cannot jump backwards"
if(jmpPid not in [p.getPID().lstrip("0") for p in pkts]):
return "Error: Cannot jump outside of tx range"
self.printToMBuf("Any key to break")
self.stdscr.nodelay(1) # Unblock character input
if(repeat == 0):
while True:
successes, failure, userBreak = sendPkts(pkts)
fail |= failure
pktSent += successes
if(userBreak):
end()
return
else:
for ii in xrange(repeat):
successes, failure, userBreak = sendPkts(pkts)
fail |= failure
pktSent += successes
if(userBreak):
end()
return
end()
# Receives packets by calling capture.rx()
# Handles blocking/unblocking and keyboard Interrupt from user
# Redraws ppad after some captured packets
# Takes count of packets to capture, and BPF filter
# BPF filter can be NULL
def rx(self, count, *filt):
def end(): # Called before we return
self.stdscr.nodelay(0) # Reblock character input
self.initPad(self.cap)
self.refresh()
def redraw(): # Called when we need to refresh during capture
self.initPad(self.cap)
self.refresh()
self.printToMBuf("Any key to break")
if(filt):
rv = self.cap.initRx(filt[0])
if(rv != None):
self.printToMBuf(rv)
return
elif(len(filt) == 0):
rv = self.cap.initRx('')
if(rv != None):
self.printToMBuf(rv)
return
else:
cfg.dbg("Error in hexscreen.rx(): Bad filter")
return
self.printToMBuf("Any key to break")
self.stdscr.nodelay(1) # Unblock character input
captured = 0
if(count == 0):
while True:
rv = self.cap.rx()
if(rv != 0): # We got a packet
captured += rv
if((captured % 10) == 0):
redraw()
if(self.getch() != -1):
end()
return
else:
while(captured != count):
rv = self.cap.rx()
if(rv != 0): # We got a packet
captured += rv
if((captured % 5) == 0):
redraw()
if(self.getch() != -1):
end()
return
end()
# Mini-buffer wrapper function for modifying a packet
# Takes a command string and variable list of args
def modPkt(self, f, *args):
def redraw(): # Redraws screen after modifying column
self.buildSections()
self.resetCursor()
self.drawPpads()
self.refresh()
# Modify a single column in a single packet
# Takes a command string and variable list of args
def modCol(f, *args):
s, cid = self.cursorColumn(self.cX)
sid = s.ID
if(not s.exposed):
return "Error:Cannot modify hidden section"
if(self.cap.packets[self.ppadCY].control and self.cap.packets[self.ppadCY].control != 'g'):
return "Error:Cannot add generator or mask to control packet"
if(s.RO):
return "Error:Layer is read only"
if(not self.cap.packets[self.ppadCY].hasLayer(sid)):
return "Error_Internal:Layer does not exist"
if(f == 'generator'):
count = args[0]
step = args[1]
return self.cap.packets[self.ppadCY].addGenerator(sid, cid, count, step)
elif(f == 'mask'):
mask = args[0]
if(len(mask) < 1):
return "Error:Mask too short"
# Mask can only contain hexChars
mask = mask.translate(None, '.,:').strip().lower()
if(len(mask.translate(None, ''.join(map(chr, cfg.hexChars)))) != 0):
return "Error:Mask may only contain hex digits"
# binMask must contain at least 1 zero
# binMask cannot have more than one grouping of zeros
# Valid masks: 110011, 1, 1100, 0011, 10
# Invalid masks: 00101, 110010, 0101
binMask = cfg.hexStrToBinStr(mask)
if(binMask.find('0') == -1):
return "Error:Invalid mask"
if(binMask.startswith('0')):
if(binMask.lstrip('0').find('0') != -1):
return "Error:Invalid mask"
else:
if(binMask.lstrip('1').lstrip('0').find('0') != -1):
return "Error:Invalid mask"
return self.cap.packets[self.ppadCY].addMask(sid, cid, binMask)
# END modCol()
if(f == 'generator'):
if(len(args) != 2):
self.printToMBuf("Error_Internal:Bad arg count")
return
else:
rv = modCol('generator', *args)
else:
if(len(args) != 1):
self.printToMBuf("Error_Internal:Bad arg count")
return
else:
if(f == 'mask'):
rv = modCol('mask', *args)
elif(f == 'sleep'):
rv = self.cap.packets[self.ppadCY].makeSleep(args[0])
elif(f == 'insert-sleep'):
rv = self.cap.insert('sleep', self.ppadCY, args[0])
elif(f == 'jump'):
jmpPid = args[0]
if(jmpPid < 1 or jmpPid > len(self.cap.packets)):
self.printToMBuf("Error:pid outside of range")
return
elif(jmpPid == self.ppadCY + 1):
self.printToMBuf("Error:Cannot jump to same packet")
return
elif(jmpPid <= self.ppadCY):
self.printToMBuf("Error:Cannot jump backwards")
return
else:
rv = self.cap.packets[self.ppadCY].makeJump(jmpPid)
elif(f == 'insert-jump'):
jmpPid = args[0]
if((jmpPid < 1) or (jmpPid > len(self.cap.packets))):
self.printToMBuf("Error:pid outside of range")
return
else:
rv = self.cap.insert('jump', self.ppadCY, jmpPid)
# Should check return values for all packet modifying functions
if(rv):
self.printToMBuf(rv)
return
else:
redraw()
# Wrapper for ppad.addstr
def ppadAddStr(self, y, x, s, atr=None):
if(atr):
self.genericTry("self.ppad.addstr(" + str(y) + "," + str(x) + ",'" + s + "'," + str(atr) + ")")
else:
self.genericTry("self.ppad.addstr(" + str(y) + "," + str(x) + ",'" + s + "')")
# Wrapper for ppad.hline
def ppadHLine(self, y, x, char, width, atr=None):
if(atr):
self.genericTry("self.ppad.hline(" + str(y) + "," + str(x) + ",'" + char + "'," + str(width) + "," + str(atr) + ")")
else:
self.genericTry("self.ppad.hline(" + str(y) + "," + str(x) + ",'" + char + "'," + str(width) + ")")
# Wrapper for header ppad.addstr
def headPpadAddStr(self, y, x, s, atr=None):
if(atr):
self.genericTry("self.headPpad.addstr(" + str(y) + "," + str(x) + ",'" + s + "'," + str(atr) + ")")
else:
self.genericTry("self.headPpad.addstr(" + str(y) + "," + str(x) + ",'" + s + "')")
# Wrapper for ppad.hline
def headPpadHLine(self, y, x, char, width, atr=None):
if(atr):
self.genericTry("self.headPpad.hline(" + str(y) + "," + str(x) + ",'" + char + "'," + str(width) + "," + str(atr) + ")")
else:
self.genericTry("self.headPpad.hline(" + str(y) + "," + str(x) + ",'" + char + "'," + str(width) + ",)")
# Handles our character insertion
# Modifies column then increments cursor X position by 1
def handleInsert(self, c):
sect,col = self.cursorColumn(self.cX)
if(sect.RO): # Cursor section ReadOnly
return
elif(not sect.exposed): # Cursor section not exposed
return
elif(not self.cap.packets[self.ppadCY].hasLayer(sect.ID)): # Cursor section not in packet
return
attr,char = self.inch(self.ppadCY, self.ppadCX)
if(ord(char) not in cfg.hexChars): # Cursor character is immutable
self.move(0, 1)
return
leftX = self.columnLeft(sect.ID, col)
rightX = self.columnRight(sect.ID, col)
val = ""
for x in xrange(leftX, rightX + 1):
if(x == self.cX):
val += chr(c)
else:
attr,char = self.inch(self.ppadCY, self.ppadCurX + x)
val += char
self.cap.packets[self.ppadCY].setColumn(sect.ID, col, val)
self.move(0, 1)
def toggleMark(self):
if(self.insert): # Cannot set mark in insert mode
return
if(self.markSet):
self.markSet = False
self.drawPpads()
self.refresh()
else:
self.markSet = True
self.mark = self.ppadCY
# Called after an action MAY cause cY,cX,ppadCurY,ppadCurX to be in illegal position(s)
# Returns them to legal position(s)
def resetCursor(self):
# Handle X
if(self.ppadCurX >= self.tableWidth - self.maxX):
self.ppadCurX = self.tableWidth - self.maxX - 2
self.ppadCurX = max(0, self.ppadCurX)
# Too far right
if(self.cX > self.maxX - 1):
self.cX = self.maxX - 1
elif(self.cX > self.tableWidth - self.ppadCurX - 2):
self.cX = self.tableWidth - self.ppadCurX - 2
# Too far left
if(self.cX < self.offLimitsWidth - self.ppadCurX):
self.cX = self.offLimitsWidth
# Handle Y
if(self.ppadCurY >= len(self.cap.packets) - self.maxY):
self.ppadCurY = len(self.cap.packets) - self.maxY - 1
self.ppadCurY = max(0, self.ppadCurY)
if(len(self.cap.packets) <= 1):
self.cY = self.ppadTopY
elif(self.cY < self.ppadTopY):
self.cY = self.ppadTopY
elif(self.cY > self.ppadBottomY):
self.cY = self.ppadBottomY
elif(self.ppadCY >= len(self.cap.packets)):
self.cY = self.ppadTopY + len(self.cap.packets) - 1
# Actually move the cursor
self.stdscr.move(self.cY, self.cX)
# cfg.dbg("Hexscreen_yank len_packets:" + str(len(self.cap.packets)) + " len_clipboard:" + str(len(self.cap.clipboard)) + \
# " ppadCY:" + str(self.ppadCY) + " mark:" + str(self.mark))
def yank(self):
if(not self.markSet):
return
# We can't yank the whole buffer
if(not ((self.mark == 0 and self.ppadCY == len(self.cap.packets) - 1) or (self.mark == len(self.cap.packets) - 1 and self.ppadCY == 0))):
if(self.ppadCY <= self.mark):
self.cap.yank(self.ppadCY, self.mark)
else:
self.cap.yank(self.mark, self.ppadCY)
self.cY -= len(self.cap.clipboard) - 1
self.markSet = False
self.resetCursor()
self.drawPpads()
self.refresh()
# Yanks a single packet to clipboard
def yankPacket(self):
if(not (len(self.cap.packets) > 1)):
return
self.cap.yank(self.ppadCY, self.ppadCY)
self.resetCursor()
self.drawPpads()
self.refresh()
def paste(self):
if(len(self.cap.clipboard) == 0):
return
self.cap.paste(self.ppadCY)
self.cY += len(self.cap.clipboard)
self.resetCursor()
self.drawPpads()
self.refresh()
|
|
import datetime
import warnings
from django.conf import settings
import haystack
from haystack.backends import BaseEngine
from haystack.constants import DEFAULT_OPERATOR, DJANGO_CT, DJANGO_ID
from haystack.exceptions import MissingDependency
from haystack.utils import get_identifier, get_model_ct
# Backport support
from .constants import FUZZINESS
from .elasticsearch_backend import (ElasticsearchSearchBackend,
ElasticsearchSearchQuery)
try:
import elasticsearch
if not ((7, 0, 0) <= elasticsearch.__version__ < (8, 0, 0)):
raise ImportError
from elasticsearch.helpers import bulk, scan
except ImportError:
raise MissingDependency(
"The 'elasticsearch7' backend requires the \
installation of 'elasticsearch>=7.0.0,<8.0.0'. \
Please refer to the documentation."
)
DEFAULT_FIELD_MAPPING = {
"type": "text",
"analyzer": "snowball",
}
FIELD_MAPPINGS = {
"edge_ngram": {
"type": "text",
"analyzer": "edgengram_analyzer",
},
"ngram": {
"type": "text",
"analyzer": "ngram_analyzer",
},
"date": {"type": "date"},
"datetime": {"type": "date"},
"location": {"type": "geo_point"},
"boolean": {"type": "boolean"},
"float": {"type": "float"},
"long": {"type": "long"},
"integer": {"type": "long"},
}
class Elasticsearch7SearchBackend(ElasticsearchSearchBackend):
# Settings to add an n-gram & edge n-gram analyzer.
DEFAULT_SETTINGS = {
"settings": {
"index": {
"max_ngram_diff": 2,
},
"analysis": {
"analyzer": {
"ngram_analyzer": {
"tokenizer": "standard",
"filter": [
"haystack_ngram",
"lowercase",
],
},
"edgengram_analyzer": {
"tokenizer": "standard",
"filter": [
"haystack_edgengram",
"lowercase",
],
},
},
"filter": {
"haystack_ngram": {
"type": "ngram",
"min_gram": 3,
"max_gram": 4,
},
"haystack_edgengram": {
"type": "edge_ngram",
"min_gram": 2,
"max_gram": 15,
},
},
},
},
}
def __init__(self, connection_alias, **connection_options):
super(Elasticsearch7SearchBackend, self).__init__(
connection_alias, **connection_options)
self.content_field_name = None
def _get_doc_type_option(self):
# ES7 does not support a doc_type option
return {}
def _get_current_mapping(self, field_mapping):
# ES7 does not support a doc_type option
return {"properties": field_mapping}
def clear(self, models=None, commit=True):
"""
Clears the backend of all documents/objects for a collection of models.
:param models: List or tuple of models to clear.
:param commit: Not used.
"""
if models is not None:
assert isinstance(models, (list, tuple))
try:
if models is None:
self.conn.indices.delete(index=self.index_name, ignore=404)
self.setup_complete = False
self.existing_mapping = {}
self.content_field_name = None
else:
models_to_delete = []
for model in models:
models_to_delete.append("%s:%s" % (DJANGO_CT, get_model_ct(model)))
# Delete using scroll API
query = {
"query": {"query_string": {"query": " OR ".join(models_to_delete)}}
}
generator = scan(
self.conn,
query=query,
index=self.index_name,
)
actions = (
{"_op_type": "delete", "_id": doc["_id"]} for doc in generator
)
bulk(
self.conn,
actions=actions,
index=self.index_name,
)
self.conn.indices.refresh(index=self.index_name)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
if models is not None:
self.log.error(
"Failed to clear Elasticsearch index of models '%s': %s",
",".join(models_to_delete),
e,
exc_info=True,
)
else:
self.log.error(
"Failed to clear Elasticsearch index: %s", e, exc_info=True
)
def build_search_kwargs(
self,
query_string,
sort_by=None,
start_offset=0,
end_offset=None,
fields="",
highlight=False,
facets=None,
date_facets=None,
query_facets=None,
narrow_queries=None,
spelling_query=None,
within=None,
dwithin=None,
distance_point=None,
models=None,
limit_to_registered_models=None,
result_class=None,
**extra_kwargs
):
index = haystack.connections[self.connection_alias].get_unified_index()
content_field = index.document_field
if query_string == "*:*":
kwargs = {"query": {"match_all": {}}}
else:
kwargs = {
"query": {
"query_string": {
"default_field": content_field,
"default_operator": DEFAULT_OPERATOR,
"query": query_string,
"analyze_wildcard": True,
"fuzziness": FUZZINESS,
}
}
}
filters = []
if fields:
if isinstance(fields, (list, set)):
fields = " ".join(fields)
kwargs["stored_fields"] = fields
if sort_by is not None:
order_list = []
for field, direction in sort_by:
if field == "distance" and distance_point:
# Do the geo-enabled sort.
lng, lat = distance_point["point"].coords
sort_kwargs = {
"_geo_distance": {
distance_point["field"]: [lng, lat],
"order": direction,
"unit": "km",
}
}
else:
if field == "distance":
warnings.warn(
"In order to sort by distance, you must call the '.distance(...)' method."
)
# Regular sorting.
sort_kwargs = {field: {"order": direction}}
order_list.append(sort_kwargs)
kwargs["sort"] = order_list
# From/size offsets don't seem to work right in Elasticsearch's DSL. :/
# if start_offset is not None:
# kwargs['from'] = start_offset
# if end_offset is not None:
# kwargs['size'] = end_offset - start_offset
if highlight:
# `highlight` can either be True or a dictionary containing custom parameters
# which will be passed to the backend and may override our default settings:
kwargs["highlight"] = {"fields": {content_field: {}}}
if isinstance(highlight, dict):
kwargs["highlight"].update(highlight)
if self.include_spelling:
kwargs["suggest"] = {
"suggest": {
"text": spelling_query or query_string,
"term": {
# Using content_field here will result in suggestions of stemmed words.
"field": "text", # ES7 does not support '_all' field
},
}
}
if narrow_queries is None:
narrow_queries = set()
if facets is not None:
kwargs.setdefault("aggs", {})
for facet_fieldname, extra_options in facets.items():
facet_options = {
"meta": {"_type": "terms"},
"terms": {"field": index.get_facet_fieldname(facet_fieldname)},
}
if "order" in extra_options:
facet_options["meta"]["order"] = extra_options.pop("order")
# Special cases for options applied at the facet level (not the terms level).
if extra_options.pop("global_scope", False):
# Renamed "global_scope" since "global" is a python keyword.
facet_options["global"] = True
if "facet_filter" in extra_options:
facet_options["facet_filter"] = extra_options.pop("facet_filter")
facet_options["terms"].update(extra_options)
kwargs["aggs"][facet_fieldname] = facet_options
if date_facets is not None:
kwargs.setdefault("aggs", {})
for facet_fieldname, value in date_facets.items():
# Need to detect on gap_by & only add amount if it's more than one.
interval = value.get("gap_by").lower()
# Need to detect on amount (can't be applied on months or years).
if value.get("gap_amount", 1) != 1 and interval not in (
"month",
"year",
):
# Just the first character is valid for use.
interval = "%s%s" % (value["gap_amount"], interval[:1])
kwargs["aggs"][facet_fieldname] = {
"meta": {"_type": "date_histogram"},
"date_histogram": {"field": facet_fieldname, "interval": interval},
"aggs": {
facet_fieldname: {
"date_range": {
"field": facet_fieldname,
"ranges": [
{
"from": self._from_python(
value.get("start_date")
),
"to": self._from_python(value.get("end_date")),
}
],
}
}
},
}
if query_facets is not None:
kwargs.setdefault("aggs", {})
for facet_fieldname, value in query_facets:
kwargs["aggs"][facet_fieldname] = {
"meta": {"_type": "query"},
"filter": {"query_string": {"query": value}},
}
if limit_to_registered_models is None:
limit_to_registered_models = getattr(
settings, "HAYSTACK_LIMIT_TO_REGISTERED_MODELS", True
)
if models and len(models):
model_choices = sorted(get_model_ct(model) for model in models)
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
filters.append({"terms": {DJANGO_CT: model_choices}})
for q in narrow_queries:
filters.append({"query_string": {"query": q}})
if within is not None:
filters.append(self._build_search_query_within(within))
if dwithin is not None:
filters.append(self._build_search_query_dwithin(dwithin))
# if we want to filter, change the query type to bool
if filters:
kwargs["query"] = {"bool": {"must": kwargs.pop("query")}}
if len(filters) == 1:
kwargs["query"]["bool"]["filter"] = filters[0]
else:
kwargs["query"]["bool"]["filter"] = {"bool": {"must": filters}}
if extra_kwargs:
kwargs.update(extra_kwargs)
return kwargs
def _build_search_query_dwithin(self, dwithin):
lng, lat = dwithin["point"].coords
distance = "%(dist).6f%(unit)s" % {"dist": dwithin["distance"].km, "unit": "km"}
return {
"geo_distance": {
"distance": distance,
dwithin["field"]: {"lat": lat, "lon": lng},
}
}
def _build_search_query_within(self, within):
from haystack.utils.geo import generate_bounding_box
((south, west), (north, east)) = generate_bounding_box(
within["point_1"], within["point_2"]
)
return {
"geo_bounding_box": {
within["field"]: {
"top_left": {"lat": north, "lon": west},
"bottom_right": {"lat": south, "lon": east},
}
}
}
def more_like_this(
self,
model_instance,
additional_query_string=None,
start_offset=0,
end_offset=None,
models=None,
limit_to_registered_models=None,
result_class=None,
**kwargs
):
from haystack import connections
if not self.setup_complete:
self.setup()
# Deferred models will have a different class ("RealClass_Deferred_fieldname")
# which won't be in our registry:
model_klass = model_instance._meta.concrete_model
index = (
connections[self.connection_alias]
.get_unified_index()
.get_index(model_klass)
)
field_name = index.get_content_field()
params = {}
if start_offset is not None:
params["from_"] = start_offset
if end_offset is not None:
params["size"] = end_offset - start_offset
doc_id = get_identifier(model_instance)
try:
# More like this Query
# https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-mlt-query.html
mlt_query = {
"query": {
"more_like_this": {
"fields": [field_name],
"like": [
{
"_index": self.index_name,
"_id": doc_id,
},
],
}
}
}
narrow_queries = []
if additional_query_string and additional_query_string != "*:*":
additional_filter = {"query_string": {"query": additional_query_string}}
narrow_queries.append(additional_filter)
if limit_to_registered_models is None:
limit_to_registered_models = getattr(
settings, "HAYSTACK_LIMIT_TO_REGISTERED_MODELS", True
)
if models and len(models):
model_choices = sorted(get_model_ct(model) for model in models)
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
model_filter = {"terms": {DJANGO_CT: model_choices}}
narrow_queries.append(model_filter)
if len(narrow_queries) > 0:
mlt_query = {
"query": {
"bool": {
"must": mlt_query["query"],
"filter": {"bool": {"must": list(narrow_queries)}},
}
}
}
raw_results = self.conn.search(
body=mlt_query, index=self.index_name, _source=True, **params
)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
self.log.error(
"Failed to fetch More Like This from Elasticsearch for document '%s': %s",
doc_id,
e,
exc_info=True,
)
raw_results = {}
return self._process_results(raw_results, result_class=result_class)
def _process_hits(self, raw_results):
return raw_results.get("hits", {}).get("total", {}).get("value", 0)
def _process_results(
self,
raw_results,
highlight=False,
result_class=None,
distance_point=None,
geo_sort=False,
):
results = super(Elasticsearch7SearchBackend, self)._process_results(
raw_results, highlight, result_class, distance_point, geo_sort
)
facets = {}
if "aggregations" in raw_results:
facets = {"fields": {}, "dates": {}, "queries": {}}
for facet_fieldname, facet_info in raw_results["aggregations"].items():
facet_type = facet_info["meta"]["_type"]
if facet_type == "terms":
facets["fields"][facet_fieldname] = [
(individual["key"], individual["doc_count"])
for individual in facet_info["buckets"]
]
if "order" in facet_info["meta"]:
if facet_info["meta"]["order"] == "reverse_count":
srt = sorted(
facets["fields"][facet_fieldname], key=lambda x: x[1]
)
facets["fields"][facet_fieldname] = srt
elif facet_type == "date_histogram":
# Elasticsearch provides UTC timestamps with an extra three
# decimals of precision, which datetime barfs on.
facets["dates"][facet_fieldname] = [
(
datetime.datetime.utcfromtimestamp(
individual["key"] / 1000
),
individual["doc_count"],
)
for individual in facet_info["buckets"]
]
elif facet_type == "query":
facets["queries"][facet_fieldname] = facet_info["doc_count"]
results["facets"] = facets
return results
def _get_common_mapping(self):
return {
DJANGO_CT: {
"type": "keyword",
},
DJANGO_ID: {
"type": "keyword",
},
}
def build_schema(self, fields):
content_field_name = ""
mapping = self._get_common_mapping()
for _, field_class in fields.items():
field_mapping = FIELD_MAPPINGS.get(
field_class.field_type, DEFAULT_FIELD_MAPPING
).copy()
if field_class.boost != 1.0:
field_mapping["boost"] = field_class.boost
if field_class.document is True:
content_field_name = field_class.index_fieldname
# Do this last to override `text` fields.
if field_mapping["type"] == "text":
if field_class.indexed is False or hasattr(field_class, "facet_for"):
field_mapping["type"] = "keyword"
del field_mapping["analyzer"]
mapping[field_class.index_fieldname] = field_mapping
return (content_field_name, mapping)
class Elasticsearch7SearchQuery(ElasticsearchSearchQuery):
def add_field_facet(self, field, **options):
self.facets[field] = options.copy()
class Elasticsearch7SearchEngine(BaseEngine):
backend = Elasticsearch7SearchBackend
query = Elasticsearch7SearchQuery
|
|
"""
pygments.lexers.dotnet
~~~~~~~~~~~~~~~~~~~~~~
Lexers for .net languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
using, this, default, words
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String, Number, Literal, Other, Whitespace
from pygments.util import get_choice_opt
from pygments import unistring as uni
from pygments.lexers.html import XmlLexer
__all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer',
'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer']
class CSharpLexer(RegexLexer):
"""
For `C# <http://msdn2.microsoft.com/en-us/vcsharp/default.aspx>`_
source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 0.8
"""
name = 'C#'
aliases = ['csharp', 'c#', 'cs']
filenames = ['*.cs']
mimetypes = ['text/x-csharp'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': r'@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in levels.items():
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*)((?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(Whitespace, using(this), Name.Function, Whitespace,
Punctuation)),
(r'^(\s*)(\[.*?\])', bygroups(Whitespace, Name.Attribute)),
(r'[^\S\n]+', Whitespace),
(r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Whitespace),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'\$?"(\\\\|\\[^\\]|[^"\\\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'(#)([ \t]*)(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b(.*?)(\n)',
bygroups(Comment.Preproc, Whitespace, Comment.Preproc,
Comment.Preproc, Whitespace)),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Whitespace,
Keyword)),
(r'(abstract|as|async|await|base|break|by|case|catch|'
r'checked|const|continue|default|delegate|'
r'do|else|enum|event|explicit|extern|false|finally|'
r'fixed|for|foreach|goto|if|implicit|in|interface|'
r'internal|is|let|lock|new|null|on|operator|'
r'out|override|params|private|protected|public|readonly|'
r'ref|return|sealed|sizeof|stackalloc|static|'
r'switch|this|throw|true|try|typeof|'
r'unchecked|unsafe|virtual|void|while|'
r'get|set|new|partial|yield|add|remove|value|alias|ascending|'
r'descending|from|group|into|orderby|select|thenby|where|'
r'join|equals)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|dynamic|float|int|long|object|'
r'sbyte|short|string|uint|ulong|ushort|var)\b\??', Keyword.Type),
(r'(class|struct)(\s+)', bygroups(Keyword, Whitespace), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Whitespace), 'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop'),
default('#pop'),
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop'),
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class NemerleLexer(RegexLexer):
"""
For `Nemerle <http://nemerle.org>`_ source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 1.5
"""
name = 'Nemerle'
aliases = ['nemerle']
filenames = ['*.n']
mimetypes = ['text/x-nemerle'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': r'@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in levels.items():
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*)((?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(Whitespace, using(this), Name.Function, Whitespace, \
Punctuation)),
(r'^(\s*)(\[.*?\])', bygroups(Whitespace, Name.Attribute)),
(r'[^\S\n]+', Whitespace),
(r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Whitespace),
(r'(\$)(\s*)(")', bygroups(String, Whitespace, String),
'splice-string'),
(r'(\$)(\s*)(<#)', bygroups(String, Whitespace, String),
'splice-string2'),
(r'<#', String, 'recursive-string'),
(r'(<\[)(\s*)(' + cs_ident + ':)?', bygroups(Keyword,
Whitespace, Keyword)),
(r'\]\>', Keyword),
# quasiquotation only
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\[^\\]|[^"\\\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?", Number),
(r'(#)([ \t]*)(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b',
bygroups(Comment.Preproc, Whitespace, Comment.Preproc), 'preproc'),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Whitespace, Keyword)),
(r'(abstract|and|as|base|catch|def|delegate|'
r'enum|event|extern|false|finally|'
r'fun|implements|interface|internal|'
r'is|macro|match|matches|module|mutable|new|'
r'null|out|override|params|partial|private|'
r'protected|public|ref|sealed|static|'
r'syntax|this|throw|true|try|type|typeof|'
r'virtual|volatile|when|where|with|'
r'assert|assert2|async|break|checked|continue|do|else|'
r'ensures|for|foreach|if|late|lock|new|nolate|'
r'otherwise|regexp|repeat|requires|return|surroundwith|'
r'unchecked|unless|using|while|yield)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|'
r'short|string|uint|ulong|ushort|void|array|list)\b\??',
Keyword.Type),
(r'(:>?)(\s*)(' + cs_ident + r'\??)',
bygroups(Punctuation, Whitespace, Keyword.Type)),
(r'(class|struct|variant|module)(\s+)',
bygroups(Keyword, Whitespace), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Whitespace),
'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'preproc': [
(r'\w+', Comment.Preproc),
(r'[ \t]+', Whitespace),
(r'\n', Whitespace, '#pop')
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
],
'splice-string': [
(r'[^"$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'\\"', String),
(r'"', String, '#pop')
],
'splice-string2': [
(r'[^#<>$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'recursive-string': [
(r'[^#<>]', String),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'splice-string-content': [
(r'if|match', Keyword),
(r'[~!%^&*+=|\[\]:;,.<>/?-\\"$ ]', Punctuation),
(cs_ident, Name),
(r'\d+', Number),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens),
'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
def analyse_text(text):
"""Nemerle is quite similar to Python, but @if is relatively uncommon
elsewhere."""
result = 0
if '@if' in text:
result += 0.1
return result
class BooLexer(RegexLexer):
"""
For `Boo <http://boo.codehaus.org/>`_ source code.
"""
name = 'Boo'
aliases = ['boo']
filenames = ['*.boo']
mimetypes = ['text/x-boo']
tokens = {
'root': [
(r'\s+', Whitespace),
(r'(#|//).*$', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment'),
(r'[]{}:(),.;[]', Punctuation),
(r'(\\)(\n)', bygroups(Text, Whitespace)),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'/(\\\\|\\[^\\]|[^/\\\s])/', String.Regex),
(r'@/(\\\\|\\[^\\]|[^/\\])*/', String.Regex),
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
(r'(as|abstract|callable|constructor|destructor|do|import|'
r'enum|event|final|get|interface|internal|of|override|'
r'partial|private|protected|public|return|set|static|'
r'struct|transient|virtual|yield|super|and|break|cast|'
r'continue|elif|else|ensure|except|for|given|goto|if|in|'
r'is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|'
r'while|from|as)\b', Keyword),
(r'def(?=\s+\(.*?\))', Keyword),
(r'(def)(\s+)', bygroups(Keyword, Whitespace), 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
(r'(namespace)(\s+)', bygroups(Keyword, Whitespace), 'namespace'),
(r'(?<!\.)(true|false|null|self|__eval__|__switch__|array|'
r'assert|checked|enumerate|filter|getter|len|lock|map|'
r'matrix|max|min|normalArrayIndexing|print|property|range|'
r'rawArrayIndexing|required|typeof|unchecked|using|'
r'yieldAll|zip)\b', Name.Builtin),
(r'"""(\\\\|\\"|.*?)"""', String.Double),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'[a-zA-Z_]\w*', Name),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'[0-9][0-9.]*(ms?|d|h|s)', Number),
(r'0\d+', Number.Oct),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer),
],
'comment': [
('/[*]', Comment.Multiline, '#push'),
('[*]/', Comment.Multiline, '#pop'),
('[^/*]', Comment.Multiline),
('[*/]', Comment.Multiline)
],
'funcname': [
(r'[a-zA-Z_]\w*', Name.Function, '#pop')
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'namespace': [
(r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
]
}
class VbNetLexer(RegexLexer):
"""
For
`Visual Basic.NET <http://msdn2.microsoft.com/en-us/vbasic/default.aspx>`_
source code.
"""
name = 'VB.net'
aliases = ['vb.net', 'vbnet']
filenames = ['*.vb', '*.bas']
mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?)
uni_name = '[_' + uni.combine('Ll', 'Lt', 'Lm', 'Nl') + ']' + \
'[' + uni.combine('Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^\s*<.*?>', Name.Attribute),
(r'\s+', Whitespace),
(r'\n', Whitespace),
(r'(rem\b.*?)(\n)', bygroups(Comment, Whitespace)),
(r"('.*?)(\n)", bygroups(Comment, Whitespace)),
(r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#Else|#End\s+If|#Const|'
r'#ExternalSource.*?\n|#End\s+ExternalSource|'
r'#Region.*?\n|#End\s+Region|#ExternalChecksum',
Comment.Preproc),
(r'[(){}!#,.:]', Punctuation),
(r'(Option)(\s+)(Strict|Explicit|Compare)(\s+)'
r'(On|Off|Binary|Text)', bygroups(Keyword.Declaration, Whitespace,
Keyword.Declaration, Whitespace, Keyword.Declaration)),
(words((
'AddHandler', 'Alias', 'ByRef', 'ByVal', 'Call', 'Case',
'Catch', 'CBool', 'CByte', 'CChar', 'CDate', 'CDec', 'CDbl',
'CInt', 'CLng', 'CObj', 'Continue', 'CSByte', 'CShort', 'CSng',
'CStr', 'CType', 'CUInt', 'CULng', 'CUShort', 'Declare',
'Default', 'Delegate', 'DirectCast', 'Do', 'Each', 'Else',
'ElseIf', 'EndIf', 'Erase', 'Error', 'Event', 'Exit', 'False',
'Finally', 'For', 'Friend', 'Get', 'Global', 'GoSub', 'GoTo',
'Handles', 'If', 'Implements', 'Inherits', 'Interface', 'Let',
'Lib', 'Loop', 'Me', 'MustInherit', 'MustOverride', 'MyBase',
'MyClass', 'Narrowing', 'New', 'Next', 'Not', 'Nothing',
'NotInheritable', 'NotOverridable', 'Of', 'On', 'Operator',
'Option', 'Optional', 'Overloads', 'Overridable', 'Overrides',
'ParamArray', 'Partial', 'Private', 'Protected', 'Public',
'RaiseEvent', 'ReadOnly', 'ReDim', 'RemoveHandler', 'Resume',
'Return', 'Select', 'Set', 'Shadows', 'Shared', 'Single',
'Static', 'Step', 'Stop', 'SyncLock', 'Then', 'Throw', 'To',
'True', 'Try', 'TryCast', 'Wend', 'Using', 'When', 'While',
'Widening', 'With', 'WithEvents', 'WriteOnly'),
prefix=r'(?<!\.)', suffix=r'\b'), Keyword),
(r'(?<!\.)End\b', Keyword, 'end'),
(r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
(r'(?<!\.)(Function|Sub|Property)(\s+)',
bygroups(Keyword, Whitespace), 'funcname'),
(r'(?<!\.)(Class|Structure|Enum)(\s+)',
bygroups(Keyword, Whitespace), 'classname'),
(r'(?<!\.)(Module|Namespace|Imports)(\s+)',
bygroups(Keyword, Whitespace), 'namespace'),
(r'(?<!\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|'
r'Object|SByte|Short|Single|String|Variant|UInteger|ULong|'
r'UShort)\b', Keyword.Type),
(r'(?<!\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|'
r'Or|OrElse|TypeOf|Xor)\b', Operator.Word),
(r'&=|[*]=|/=|\\=|\^=|\+=|-=|<<=|>>=|<<|>>|:=|'
r'<=|>=|<>|[-&*/\\^+=<>\[\]]',
Operator),
('"', String, 'string'),
(r'(_)(\n)', bygroups(Text, Whitespace)), # Line continuation (must be before Name)
(uni_name + '[%&@!#$]?', Name),
('#.*?#', Literal.Date),
(r'(\d+\.\d*|\d*\.\d+)(F[+-]?[0-9]+)?', Number.Float),
(r'\d+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer),
],
'string': [
(r'""', String),
(r'"C?', String, '#pop'),
(r'[^"]+', String),
],
'dim': [
(uni_name, Name.Variable, '#pop'),
default('#pop'), # any other syntax
],
'funcname': [
(uni_name, Name.Function, '#pop'),
],
'classname': [
(uni_name, Name.Class, '#pop'),
],
'namespace': [
(uni_name, Name.Namespace),
(r'\.', Name.Namespace),
default('#pop'),
],
'end': [
(r'\s+', Whitespace),
(r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b',
Keyword, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
if re.search(r'^\s*(#If|Module|Namespace)', text, re.MULTILINE):
return 0.5
class GenericAspxLexer(RegexLexer):
"""
Lexer for ASP.NET pages.
"""
name = 'aspx-gen'
filenames = []
mimetypes = []
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)', bygroups(using(XmlLexer),
Other,
using(XmlLexer))),
(r'(.+?)(?=<)', using(XmlLexer)),
(r'.+', using(XmlLexer)),
],
}
# TODO support multiple languages within the same source file
class CSharpAspxLexer(DelegatingLexer):
"""
Lexer for highlighting C# within ASP.NET pages.
"""
name = 'aspx-cs'
aliases = ['aspx-cs']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super().__init__(CSharpLexer, GenericAspxLexer, **options)
def analyse_text(text):
if re.search(r'Page\s*Language="C#"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None:
return 0.15
class VbNetAspxLexer(DelegatingLexer):
"""
Lexer for highlighting Visual Basic.net within ASP.NET pages.
"""
name = 'aspx-vb'
aliases = ['aspx-vb']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super().__init__(VbNetLexer, GenericAspxLexer, **options)
def analyse_text(text):
if re.search(r'Page\s*Language="Vb"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None:
return 0.15
# Very close to functional.OcamlLexer
class FSharpLexer(RegexLexer):
"""
For the `F# language <https://fsharp.org/>`_ (version 3.0).
.. versionadded:: 1.5
"""
name = 'F#'
aliases = ['fsharp', 'f#']
filenames = ['*.fs', '*.fsi']
mimetypes = ['text/x-fsharp']
keywords = [
'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default',
'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else',
'end', 'exception', 'extern', 'false', 'finally', 'for', 'function',
'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal',
'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable',
'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public',
'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to',
'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when',
'while', 'with', 'yield!', 'yield',
]
# Reserved words; cannot hurt to color them as keywords too.
keywords += [
'atomic', 'break', 'checked', 'component', 'const', 'constraint',
'constructor', 'continue', 'eager', 'event', 'external', 'fixed',
'functor', 'include', 'method', 'mixin', 'object', 'parallel',
'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait',
'virtual', 'volatile',
]
keyopts = [
'!=', '#', '&&', '&', r'\(', r'\)', r'\*', r'\+', ',', r'-\.',
'->', '-', r'\.\.', r'\.', '::', ':=', ':>', ':', ';;', ';', '<-',
r'<\]', '<', r'>\]', '>', r'\?\?', r'\?', r'\[<', r'\[\|', r'\[', r'\]',
'_', '`', r'\{', r'\|\]', r'\|', r'\}', '~', '<@@', '<@', '=', '@>', '@@>',
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'or', 'not']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = [
'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single',
'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string',
'list', 'exn', 'obj', 'enum',
]
# See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or
# http://fsharp.org/about/files/spec.pdf for reference. Good luck.
tokens = {
'escape-sequence': [
(r'\\[\\"\'ntbrafv]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\U[0-9a-fA-F]{8}', String.Escape),
],
'root': [
(r'\s+', Whitespace),
(r'\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b(?<!\.)([A-Z][\w\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name),
(r'(///.*?)(\n)', bygroups(String.Doc, Whitespace)),
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'\(\*(?!\))', Comment, 'comment'),
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'\b(open|module)(\s+)([\w.]+)',
bygroups(Keyword, Whitespace, Name.Namespace)),
(r'\b(let!?)(\s+)(\w+)',
bygroups(Keyword, Whitespace, Name.Variable)),
(r'\b(type)(\s+)(\w+)',
bygroups(Keyword, Whitespace, Name.Class)),
(r'\b(member|override)(\s+)(\w+)(\.)(\w+)',
bygroups(Keyword, Whitespace, Name, Punctuation, Name.Function)),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'``([^`\n\r\t]|`[^`\n\r\t])+``', Name),
(r'(%s)' % '|'.join(keyopts), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r'(#)([ \t]*)(if|endif|else|line|nowarn|light|\d+)\b(.*?)(\n)',
bygroups(Comment.Preproc, Whitespace, Comment.Preproc,
Comment.Preproc, Whitespace)),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*[uU]?[yslLnQRZINGmM]?', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*[uU]?[yslLn]?[fF]?', Number.Hex),
(r'0[oO][0-7][0-7_]*[uU]?[yslLn]?', Number.Oct),
(r'0[bB][01][01_]*[uU]?[yslLn]?', Number.Bin),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)[fFmM]?',
Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'B?",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'@?"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'dotted': [
(r'\s+', Whitespace),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
# e.g. dictionary index access
default('#pop'),
],
'comment': [
(r'[^(*)@"]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
# comments cannot be closed within strings in comments
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'[(*)@]', Comment),
],
'string': [
(r'[^\\"]+', String),
include('escape-sequence'),
(r'\\\n', String),
(r'\n', String), # newlines are allowed in any string
(r'"B?', String, '#pop'),
],
'lstring': [
(r'[^"]+', String),
(r'\n', String),
(r'""', String),
(r'"B?', String, '#pop'),
],
'tqs': [
(r'[^"]+', String),
(r'\n', String),
(r'"""B?', String, '#pop'),
(r'"', String),
],
}
def analyse_text(text):
"""F# doesn't have that many unique features -- |> and <| are weak
indicators."""
result = 0
if '|>' in text:
result += 0.05
if '<|' in text:
result += 0.05
return result
|
|
from __future__ import absolute_import
import pprint
import logging
import importlib
from itertools import repeat
import boto3
from botocore.client import Config
from botocore.exceptions import ClientError
from .models import TaskMeta
log = logging.getLogger(__name__)
def import_class(qualname):
parts = qualname.split(".")
module_name = '.'.join(parts[0:-1])
class_name = parts[-1]
return import_action(from_name=module_name, import_name=class_name)
def import_action(from_name, import_name=None):
try:
module = importlib.import_module(from_name)
except ImportError as e:
log.exception(e)
raise
if import_name is None:
return module
try:
return getattr(module, import_name)
except AttributeError as e:
log.exception(e)
raise
def get_client():
boto_config = Config(connect_timeout=50, read_timeout=70)
swf = boto3.client('swf', config=boto_config)
return swf
def get_workflow_data(workflow_class):
domain = workflow_class.domain
tasklist = workflow_class.tasklist
workflow_type_versions = []
activities = []
# workflow_class.name is set with @workflow() from activities.utils
workflow_type_name = "{0}.{1}".format(
workflow_class.name, workflow_class.activities.name
)
# get the entrypoint versions for our workflow types
for name, method in workflow_class.__dict__.iteritems():
if not hasattr(method, "is_entrypoint"):
continue
workflow_type_versions.append(method.version)
for name, method in workflow_class.activities.__class__.__dict__.iteritems():
if not hasattr(method, "is_activity"):
continue
activities.append((method.swf_name, method.swf_version))
# namedtuple might be better here
return {
"domain": domain,
"tasklist": tasklist,
"workflows": zip(repeat(workflow_type_name), workflow_type_versions),
"activities": activities
}
def create_resources(workflow_class):
client = get_client()
data = get_workflow_data(workflow_class)
domain = data["domain"]
tasklist = data["tasklist"]
workflows = data["workflows"]
activities = data["activities"]
create_domain(client, domain)
for name, version in workflows:
create_workflow(client, domain, name, version, tasklist)
for name, version in activities:
create_activity(client, domain, name, version, tasklist)
def create_domain(client, domain, description="", retention_period=1):
log.debug("Creating SWF Domain: '%s'", domain)
try:
client.register_domain(
name=domain,
description=description,
workflowExecutionRetentionPeriodInDays=str(retention_period)
)
except ClientError as e:
code = e.response.get("Error", {}).get("Code")
log.debug("Domain already exists '%s'", code)
def create_workflow(client, domain, workflow, version, tasklist, description="", max_execution_length=(86400 * 365)):
log.debug(
"Creating SWF Workflow: '%s:%s@%s' on task list: '%s'",
workflow, version, domain, tasklist
)
try:
client.register_workflow_type(
domain=domain,
name=workflow,
version=version,
description=description,
defaultExecutionStartToCloseTimeout=str(max_execution_length),
defaultTaskStartToCloseTimeout="NONE",
defaultChildPolicy="TERMINATE",
defaultTaskList={"name": tasklist}
)
except ClientError as e:
code = e.response.get("Error", {}).get("Code")
log.debug("Workflow already exists '%s'", code)
def create_activity(client, domain, activity, version, tasklist, description=""):
log.debug(
"Creating SWF Activity: '%s:%s@%s' on task list: '%s'",
activity, version, domain, tasklist
)
try:
client.register_activity_type(
domain=domain,
name=activity,
version=version,
description=description,
defaultTaskStartToCloseTimeout="NONE",
defaultTaskList={"name": tasklist}
)
except ClientError as e:
code = e.response.get("Error", {}).get("Code")
log.debug("Activity '%s:%s' already exists '%s'", activity, version, code)
def schedule_later(client, task_token, seconds, timer_id, payload=None):
decision = {
"timerId": timer_id,
"startToFireTimeout": str(seconds)
}
if payload is not None:
decision["control"] = payload
client.respond_decision_task_completed(
taskToken=task_token,
decisions=[{
"decisionType": "StartTimer",
"startTimerDecisionAttributes": decision
}]
)
def schedule_activity(
client, task_token, name, version, activity_id,
tasklist, payload="", close_timeout="NONE", start_timeout="10",
timeout="10", heartbeat_timeout="NONE", priority=0, attempt=0):
client.respond_decision_task_completed(
taskToken=task_token,
decisions=[{
"decisionType": "ScheduleActivityTask",
"scheduleActivityTaskDecisionAttributes": {
"activityId": "{0}-{1}".format(activity_id, attempt),
"input": payload,
"taskPriority": str(priority),
"scheduleToCloseTimeout": timeout, # maximum duration for this task
"scheduleToStartTimeout": start_timeout, # maximum duration the task can wait to be assigned to a worker
"startToCloseTimeout": close_timeout, # maximum duration a worker may take to process this task
"heartbeatTimeout": heartbeat_timeout, # maximum time before which a worker processing a task of this type must report progress
"activityType": {
"name": name,
"version": version
},
"taskList": {
"name": tasklist
},
}
}]
)
def schedule_activity_later(client, task_token, payload, timer_id):
later = 5
schedule_later(
client=client,
task_token=task_token,
seconds=later,
payload=payload,
timer_id=timer_id
)
log.info("Scheduled task for later: '%ss' with payload '%s' %s'", later, payload, timer_id)
def cancel_workflow(client, task_token, reason=""):
client.respond_decision_task_completed(
taskToken=task_token,
decisions=[{
"decisionType": "CancelWorkflowExecution",
"cancelWorkflowExecutionDecisionAttributes": {
"details": reason
}
}]
)
def complete_activity(client, task_token, result=None):
client.respond_activity_task_completed(
taskToken=task_token,
result=result
)
def fail_activity(client, task_token, reason, details=""):
client.respond_activity_task_failed(
taskToken=task_token,
reason=reason,
details=details
)
def fail_workflow(client, task_token, reason, details=""):
client.respond_decision_task_completed(
taskToken=task_token,
decisions=[{
"decisionType": "FailWorkflowExecution",
"failWorkflowExecutionDecisionAttributes": {
"reason": reason,
"details": details
}
}]
)
def complete_workflow(client, task_token, result="success"):
client.respond_decision_task_completed(
taskToken=task_token,
decisions=[{
"decisionType": "CompleteWorkflowExecution",
"completeWorkflowExecutionDecisionAttributes": {
"result": result
}
}]
)
def poll_for_decision_task(client, domain, identity, tasklist, next_page_token=None):
params = {
"domain": domain,
"taskList": {"name": tasklist},
"identity": identity,
"reverseOrder": False
}
if next_page_token:
params["nextPageToken"] = next_page_token
try:
task = client.poll_for_decision_task(**params)
except ClientError as e:
log.error(e.message)
return None
log.debug("Received new decision task: \n%s", pprint.pformat(task))
if "taskToken" not in task:
log.debug("Poll timed out, no new task.")
return None
if "events" not in task:
log.info("No events found in new task")
return None
return task
def poll_for_activity_task(client, domain, identity, tasklist):
params = {
"domain": domain,
"taskList": {"name": tasklist},
"identity": identity,
}
try:
task = client.poll_for_activity_task(**params)
except ClientError as e:
print("WORKER FAILURE")
log.error(e.message)
return None
log.debug("Received new activity task: \n%s", pprint.pformat(task))
if "taskToken" not in task:
log.debug("Poll timed out, no new task.")
return None
return task
def get_task_meta(task, domain, tasklist):
task_token = task["taskToken"]
run_id = task["workflowExecution"]["runId"]
workflow_id = task["workflowExecution"]["workflowId"]
meta = TaskMeta(
task_token=task_token,
run_id=run_id,
workflow_id=workflow_id,
domain=domain,
tasklist=tasklist
)
return meta
|
|
import khmer
import numpy as np
import os
import sys
import pandas as pd
import re
import screed
from argparse import ArgumentTypeError
import multiprocessing
import argparse
import subprocess
import json
from itertools import starmap
import tempfile
# The following is for ease of development (so I don't need to keep re-installing the tool)
try:
from CMash import MinHash as MH
from CMash import Query
except ImportError:
try:
import MinHash as MH
import Query
except ImportError:
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from CMash import MinHash as MH
from CMash import Query
# Note: here, I am using canonical k-mers: i.e. to disambiguate between a k-mer and it's reverse complement, I
# simply take as the representative whichever one is lexicographically smaller.
notACTG = re.compile('[^ACTG]') # look for any not ACTG
class TrueContainment:
"""
This class has functionality to compute the ground truth containment indicies and return them in the same format
as the scripts (to ease future testing). It is only intended for:
1. Small-ish training databases
2. Databases that were formed using genomes that you have direct access to (i.e. live on your file system)
"""
def __init__(self, training_database_file: str, k_sizes: str, temp_dir: str):
self.training_database_file = training_database_file
self.k_sizes = self.__parseNumList(k_sizes)
self.CEs = self.__import_database()
self.training_file_names = self.__return_file_names()
self.temp_dir = temp_dir
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
# compute all the training k-mers up front
self._compute_all_training_kmers()
def __import_database(self) -> list:
"""
Private function that imports the HDF5 training file.
:return: a list of CountEstimators
:rtype: MinHash.CountEstimator
"""
CEs = MH.import_multiple_from_single_hdf5(self.training_database_file)
return CEs
def __return_file_names(self):
"""
Private function that gets all the files names contained in the training data.
:return: a list of file names
:rtype: list
"""
training_file_names = list(map(lambda x: x.input_file_name.decode('utf-8'), self.CEs))
return training_file_names
@staticmethod
def __parseNumList(k_sizes_str: str) -> list:
"""
Parses a string like 10-21-1 and turn it into a list like [10, 11, 12,...,21]
:param k_sizes_str: the <start>-<end>-<increment> string
:type k_sizes_str: str
:return: list of k-mer sizes
:rtype: list
"""
m = re.match(r'(\d+)(?:-(\d+))?(?:-(\d+))?$', k_sizes_str)
# ^ (or use .split('-'). anyway you like.)
if not m:
raise ArgumentTypeError(
"'" + k_sizes_str + "' is not a range of number. Expected forms like '1-5' or '2' or '10-15-2'.")
start = int(m.group(1))
end = int(m.group(2))
if m.group(3):
increment = int(m.group(3))
else:
increment = 1
return list(range(start, end + 1, increment))
@staticmethod
def _kmc_count(input_file_name: str, output_file_name: str, kmer_size: int, threads=2) -> None:
"""
Calls KMC to compute the k-mers for a given input file name
:param input_file_name:
:type input_file_name:
:param output_file_name:
:type output_file_name:
:param kmer_size:
:type kmer_size:
"""
input_types = ['-fm', '-fq', '-fa', '-fbam']
success = False
for input_type in input_types:
res = subprocess.run(f"kmc -k{kmer_size} {input_type} -r -t{threads} -ci0 -cs3 -j{output_file_name}.log {input_file_name} {output_file_name} .", shell=True, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
if res.returncode == 0:
success = True
break
if not success:
raise Exception(f"Unknown sequence format: must be one of multifasta, fastq, fasta, or BAM (gzipped or uncompressed). Culprit file is {input_file_name}. Command was {res.args}")
@staticmethod
def _kmc_return_distinct_kmers(kmc_log_file: str) -> int:
"""
Parses the KMC log file to return the number of distinct k-mers
:param kmc_log_file:
:type kmc_log_file:
:return:
:rtype:
"""
with open(kmc_log_file, 'r') as fid:
res = json.load(fid)
return res['Stats']['#Unique_k-mers']
@staticmethod
def _kmc_return_intersection_count(kmc_input_file1: str, kmc_input_file2: str) -> int:
"""
Takes two kmc counted files, returns the number of k-mers in their intersection
:param kmc_input_file1:
:type kmc_input_file1:
:param kmc_input_file2:
:type kmc_input_file2:
"""
dir_name = os.path.dirname(kmc_input_file1)
intersect_file = os.path.join(dir_name, f"{os.path.basename(kmc_input_file1)}_intersect_{os.path.basename(kmc_input_file2)}")
dump_file = os.path.join(dir_name, f"{os.path.basename(kmc_input_file1)}_intersect_{os.path.basename(kmc_input_file2)}_dump")
res = subprocess.run(f"kmc_tools simple {kmc_input_file1} -ci1 {kmc_input_file2} -ci1 intersect {intersect_file}", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if res.returncode != 0:
raise Exception(f"The command {res.args} failed to run and returned the returncode={res.returncode}")
res = subprocess.run(f"kmc_dump {intersect_file} {dump_file}; cat {dump_file} | wc -l", shell=True, capture_output=True)
if res.returncode != 0:
raise Exception(f"The command {res.args} failed to run and returned the returncode={res.returncode}")
intersect_count = int(res.stdout)
# then clean up the mess
os.remove(f"{intersect_file}.kmc_pre")
os.remove(f"{intersect_file}.kmc_suf")
os.remove(dump_file)
return intersect_count
def __kmc_output_name_converter(self, input_file: str, k_size: str) -> str:
temp_dir = self.temp_dir
return f"{os.path.join(temp_dir, os.path.basename(input_file))}_k_{k_size}"
def _compute_all_training_kmers(self):
num_threads = int(multiprocessing.cpu_count()/float(8))
to_compute = []
# create the tuples to be computed on: (input file, ouput_kmc_file, k_kmer_size)
for training_file in self.training_file_names:
for k_size in self.k_sizes:
output_file = self.__kmc_output_name_converter(training_file, k_size)
to_compute.append((training_file, output_file, k_size))
pool = multiprocessing.Pool(processes=int(min(num_threads, len(self.training_file_names))))
res = pool.starmap(self._kmc_count, to_compute)
# consume everything so we know the process has completed
for it in res:
pass
pool.close()
def _return_containment_index(self, query_file: str, i: int, j: int) -> tuple:
k_size = self.k_sizes[j]
training_file = self.training_file_names[i]
training_kmc_output = self.__kmc_output_name_converter(training_file, k_size)
query_kmc_output = self.__kmc_output_name_converter(query_file, k_size)
numerator = self._kmc_return_intersection_count(query_kmc_output, training_kmc_output)
denomenator = self._kmc_return_distinct_kmers(f"{training_kmc_output}.log")
return (i, j, numerator / float(denomenator)) # | train \cap query| / | train |
def _return_containment_indicies(self, query_file: str) -> np.ndarray:
"""
Creates a matrix of containment indicies:
for each i in self.training_file_names:
for each k in k_sizes:
containment_indicies[i ,k] = |query_file_k-mers \cap training_file_i_k-mers| / |training_file_i_k-mers|
:param query_file: a file pointing to a fasta/q (maybe compressed) file
:type query_file: str
:return: a numpy matrix of containment indicies: containment_indicies[i ,k] = |query_file_k-mers \cap training_file_i_k-mers| / |training_file_i_k-mers|
:rtype: np.ndarray
"""
training_file_names = self.training_file_names
k_sizes = self.k_sizes
# compute the k-mers in the query file
for k_size in k_sizes:
# store the query file kmc outputs to a dict for future convenience
self._kmc_count(query_file, self.__kmc_output_name_converter(query_file, k_size), k_size, threads=int(multiprocessing.cpu_count()/float(4)))
# compute the containment indicies
# rows are the files, columns are the k-mer sizes
containment_indicies = np.zeros((len(training_file_names), len(k_sizes)))
# serial version
#for (j, k_size) in enumerate(k_sizes):
# query_kmc_output = self.__kmc_output_name_converter(query_file, k_size)
# for (i, training_file) in enumerate(training_file_names):
# training_kmc_output = self.__kmc_output_name_converter(training_file, k_size)
# numerator = self._kmc_return_intersection_count(query_kmc_output, training_kmc_output)
# denomenator = self._kmc_return_distinct_kmers(f"{training_kmc_output}.log")
# containment_indicies[i, j] = numerator / float(denomenator) # | train \cap query| / | train |
# parallel version
to_compute = []
for i in range(len(training_file_names)):
for j in range(len(k_sizes)):
to_compute.append((query_file, i, j))
pool = multiprocessing.Pool(processes=int(min(multiprocessing.cpu_count()/float(4), len(self.training_file_names))))
res = pool.starmap(self._return_containment_index, to_compute)
for (i, j, ci) in res:
containment_indicies[i, j] = ci
pool.close()
return containment_indicies
def return_containment_data_frame(self, query_file: str, location_of_thresh: int, coverage_threshold: float) -> pd.DataFrame:
"""
Returns a Pandas Data frame with rows indexed by training file names, columns indicated by k-mer sizes, and entries the
containment indicies for the give query_file. Same exact format as CMash/Query.py and scripts/StreamingQueryDNADatabase.py
:param query_file: a file pointing to a fasta/q (maybe compressed) file. Need not be in the training data
:type query_file: str
:param location_of_thresh: where in self.k_sizes the thresholding should take place (-1 means the last one)
:type location_of_thresh: int
:param coverage_threshold: filter out those results that have containment indicies strictly below this threshold
:type coverage_threshold: float
:return: Returns a Pandas Data frame with rows indexed by training file names, columns indicated by k-mer sizes, and entries the
containment indicies for the give query_file.
:rtype: pandas.DataFrame
"""
k_range = self.k_sizes
training_file_names = self.training_file_names
containment_indices = self._return_containment_indicies(query_file)
df = Query.return_data_frame(training_file_names=training_file_names,
k_range=k_range,
location_of_thresh=location_of_thresh,
containment_indices=containment_indices,
coverage_threshold=coverage_threshold)
return df
def main():
parser = argparse.ArgumentParser(
description="This script calculates the *ground truth* containment indicies for each of the training/reference sketches"
" via brute force enumeration of all the (canonical) k-mers", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--containment_threshold', type=float,
help="Only return results with containment index above this "
"threshold at the maximum k-mer size.", default=0.1)
parser.add_argument('-l', '--location_of_thresh', type=int,
help="Location in range to apply the threshold passed by the -c flag. -l 2 -c 5-50-10 means the"
" threshold will be applied at k-size 25. Default is largest size.", default=-1)
parser.add_argument('in_file', help="Input file: FASTA/Q file to be processes")
parser.add_argument('reference_file',
help='Training database/reference file (in HDF5 format). Created with MakeStreamingDNADatabase.py')
parser.add_argument('out_file', help='Output csv file with the containment indices.')
parser.add_argument('k_range', type=str,
help="Range of k-mer sizes in the formate <start>-<end>-<increment>."
" So 5-10-2 means [5, 7, 9]. If <end> is larger than the k-mer size"
"of the training data, this will automatically be reduced.")
# get the args
args = parser.parse_args()
k_sizes = args.k_range
training_database_file = args.reference_file
query_file = args.in_file
out_file = args.out_file
location_of_thresh = args.location_of_thresh
coverage_threshold = args.containment_threshold
temp_dir = tempfile.TemporaryDirectory()
# pre-compute the kmers in the training database
g = TrueContainment(training_database_file=training_database_file, k_sizes=k_sizes, temp_dir=temp_dir.name)
# compute the containment indicies
df = g.return_containment_data_frame(query_file=query_file, location_of_thresh=location_of_thresh, coverage_threshold=coverage_threshold)
# save them
df.to_csv(out_file, index=True, encoding='utf-8')
if __name__ == "__main__":
main()
|
|
import pdb
import logging
import json
import couchdb
from lr.model.base_model import appConfig
import lr.lib.helpers as h
from pylons import request
from pylons.controllers.util import abort
from lr.lib.base import BaseController
from datetime import datetime, timedelta
from lr.lib.oaipmherrors import *
from lr.lib import resumption_token
log = logging.getLogger(__name__)
ANY_TAGS = "any_tags"
IDENTITY = "identity"
END_DATE = 'until'
START_DATE = 'from'
#FULL_DOCS = 'full_docs'
IDS_ONLY = 'ids_only'
CALLBACK = 'callback'
RESUMPTION_TOKEN = 'resumption_token'
SLICE_SERVICE_DOC = "access:slice"
SLICE_DOCUMENT = '_design/learningregistry-slicelite'
class SliceController(BaseController):
"""REST Controller styled on the Atom Publishing Protocol"""
# To properly map this controller, ensure your config/routing.py
# file has a resource setup:
# map.resource('slice', 'slices')
def __before__(self):
self.enable_flow_control = False
self.fc_id_limit = None
self.fc_doc_limit = None
self.serviceDoc = h.getServiceDocument(appConfig['lr.slice.docid'])
if self.serviceDoc != None:
if 'service_id' in self.serviceDoc:
self.service_id = self.serviceDoc['service_id']
if 'service_data' in self.serviceDoc:
serviceData = self.serviceDoc['service_data']
if 'flow_control' in serviceData:
self.enable_flow_control = serviceData['flow_control']
if self.enable_flow_control and 'id_limit' in serviceData:
self.fc_id_limit = serviceData['id_limit']
elif self.enable_flow_control:
self.fc_id_limit = 100
if self.enable_flow_control and 'doc_limit' in serviceData:
self.fc_doc_limit = serviceData['doc_limit']
elif self.enable_flow_control:
self.fc_doc_limit = 100
def _get_params(self):
req_params = {}
if request.method == "POST":
req_params = dict(request.params.copy(), **self._parameterizePOSTBody())
else:
req_params = request.params.copy()
return req_params
def _validate_params(self, req_params):
if END_DATE in req_params and not START_DATE in req_params:
abort(500, 'if an end_date is specified a start_date must also be specified')
if IDENTITY in req_params and ANY_TAGS in req_params:
abort(500, "Only support for either any_tags or identity not both")
def _parse_params(self, req_params):
params = {}
start, end = self._get_dates(req_params)
if start is not None:
params[START_DATE] = start
params[END_DATE] = end
if IDENTITY in req_params:
params[IDENTITY] = req_params[IDENTITY].lower()
if ANY_TAGS in req_params:
params[ANY_TAGS] = req_params[ANY_TAGS].lower()
if IDS_ONLY in req_params:
params[IDS_ONLY] = req_params[IDS_ONLY] in ['T', 't', 'True', 'true', True]
else:
params[IDS_ONLY] = False
if RESUMPTION_TOKEN in req_params and req_params[RESUMPTION_TOKEN] is not None:
resp_params = resumption_token.parse_token(self.service_id, req_params[RESUMPTION_TOKEN])
params[RESUMPTION_TOKEN] = resp_params
if ANY_TAGS in resp_params:
params[ANY_TAGS] = resp_params[ANY_TAGS]
if IDENTITY in resp_params:
params[IDENTITY] = resp_params[IDENTITY]
return params
def _get_view(self, view_name, params, include_docs=False, resumptionToken=None, limit=None):
db_url = '/'.join([appConfig['couchdb.url'], appConfig['couchdb.db.resourcedata']])
opts = {"stale": appConfig['couchdb.stale.flag'], "reduce": False}
if include_docs:
opts["include_docs"] = True
if self.enable_flow_control and resumptionToken is not None:
if "startkey_docid" in params:
opts['startkey_docid'] = params['startkey_docid']
opts["skip"] = 1
opts['startkey'] = params['startkey']
opts['endkey'] = params['endkey']
print("OPTS: {0}".format(params))
if limit is not None:
opts["limit"] = limit
return h.getView(database_url=db_url, method="POST", view_name=view_name, **opts)
def _get_couch_opts(self, params):
opts = {}
if RESUMPTION_TOKEN in params:
params.update(params[RESUMPTION_TOKEN])
opts.update(params)
if RESUMPTION_TOKEN in params and "startkey_docid" in params[RESUMPTION_TOKEN]:
opts['startkey_docid'] = params[RESUMPTION_TOKEN]['startkey_docid']
if "startkey" in params and "endkey" in params:
opts['startkey'] = params['startkey']
opts['endkey'] = params['endkey']
elif START_DATE in params and IDENTITY in params:
opts['startkey'] = [params[IDENTITY], params[START_DATE]]
opts['endkey'] = [params[IDENTITY], params[END_DATE]]
elif START_DATE in params and ANY_TAGS in params:
opts['startkey'] = [params[ANY_TAGS], params[START_DATE]]
opts['endkey'] = [params[ANY_TAGS], params[END_DATE]]
elif START_DATE in params:
opts['startkey'] = params[START_DATE]
opts['endkey'] = params[END_DATE]
return opts
def _get_index(self, params):
if START_DATE in params and IDENTITY in params:
return SLICE_DOCUMENT + "/_view/identity-by-date"
elif START_DATE in params and ANY_TAGS in params:
return SLICE_DOCUMENT + "/_view/any-tags-by-date"
if START_DATE in params:
return SLICE_DOCUMENT + "/_view/by-date"
def _get_view_total(self, view_name, params, resumptionToken=None):
if resumptionToken and "maxResults" in resumptionToken and resumptionToken["maxResults"] != None:
return resumptionToken["maxResults"]
db_url = '/'.join([appConfig['couchdb.url'], appConfig['couchdb.db.resourcedata']])
opts = {"stale": appConfig['couchdb.stale.flag'], "group": True}
if "startkey" in params and "endkey" in params:
opts['startkey'] = params['startkey']
opts['endkey'] = params['endkey']
totalDocs = 0
view = h.getView(database_url=db_url, method="POST", view_name=view_name, **opts)
for row in view:
if "value" in row:
totalDocs += row["value"]
return totalDocs
def _get_dates(self, params):
cur = h.convertDateTime(params.get(START_DATE, h.EPOCH_STRING))
end = h.convertDateTime(params.get(END_DATE, datetime.utcnow().isoformat() + "Z"))
return (cur, end)
def format_data(self, keys_only, docs, params, forceUnique, maxResults, current_rt=None):
try:
sentIDs = []
prefix = '{"documents":[\n'
num_sent = 0
doc_count = 0
startkey_docid = None
startkey = params.get('startkey', None)
update_resumption_max_results = current_rt and "maxResults" in current_rt and current_rt["maxResults"] != None
if docs is not None:
for row in docs:
doc_count += 1
alreadySent = (row["id"] in sentIDs)
if not alreadySent or not forceUnique:
sentIDs.append(row["id"])
startkey_docid = row["id"]
startkey = row['key']
if keys_only:
return_data = {"doc_ID": row["id"]}
else:
# Get the resource data and update with the node timestamp data
# That the view has in value['timestamp']
resourceData = {}
resourceData = row["doc"]
return_data = {"doc_ID": row["id"], "resource_data_description": resourceData}
yield prefix + json.dumps(return_data)
num_sent += 1
prefix = ",\n"
else:
log.debug("{0} skipping: alreadySent {1} / forceUnique {2}".format(doc_count, repr(alreadySent), forceUnique))
if update_resumption_max_results:
current_rt["maxResults"] = current_rt["maxResults"] - 1
if doc_count == 0:
yield prefix
rt = " "
if self.enable_flow_control:
#only create resumption_token if we have sent docs, and we have a next doc to start with
if num_sent < maxResults and startkey_docid is not None:
token = resumption_token.get_token_slice(self.service_id, maxResults=maxResults, startkey_docid=startkey_docid,
startkey=startkey, endkey=params.get('endkey', None),
any_tags=params.get(ANY_TAGS), identity=params.get(IDENTITY))
rt = ''' "resumption_token":"{0}", '''.format(token)
db = couchdb.Server(appConfig['couchdb.url'])[appConfig['couchdb.db.resourcedata']]
yield '\n],' + rt + '"resultCount":' + str(maxResults) + ',"viewUpToDate":' + h.isViewUpdated(db, SLICE_DOCUMENT) + '}'
except Exception as ex:
print(ex)
# if __name__ == '__main__':
# param = {START_DATE: "2011-03-10", END_DATE: "2011-05-01", IDENTITY: "NSDL 2 LR Data Pump", 'search_key': 'Arithmetic'}
# keys(param)
def index(self, format='html'):
"""GET /slices: All items in the collection"""
# url('slices')
def getResponse(params):
limit = None
if self.enable_flow_control:
if params[IDS_ONLY]:
limit = self.fc_id_limit
else:
limit = self.fc_doc_limit
if CALLBACK in params:
yield "{0}(".format(params[CALLBACK])
current_rt = params.get(RESUMPTION_TOKEN, None)
docs = self._get_view(self._get_index(params), params, not params[IDS_ONLY], current_rt, limit)
maxResults = self._get_view_total(self._get_index(params), params, resumptionToken=current_rt)
for i in self.format_data(params[IDS_ONLY], docs, params, True, maxResults, params.get(RESUMPTION_TOKEN, None)):
yield i
if CALLBACK in params:
yield ");"
# try:
req_params = self._get_params()
self._validate_params(req_params)
params = self._get_couch_opts(self._parse_params(req_params))
return getResponse(params)
# except BadArgumentError as bae:
# return '{ "error": "{0}" }'.format(bae.msg)
# except Exception as e:
# log.error(e)
# return '{ "error": "Unknown Error, check log." }'
#return params["start_date"] + " " + params["identity"] + " " + params["search_key"] + "\n" + str(self.format_data(False,data))
# url('obtain')
def create(self):
"""POST /slices: Create a new item"""
# url('slices')
def new(self, format='html'):
"""GET /slices/new: Form to create a new item"""
# url('new_slice')
def update(self, id):
"""PUT /slices/id: Update an existing item"""
# Forms posted to this method should contain a hidden field:
# <input type="hidden" name="_method" value="PUT" />
# Or using helpers:
# h.form(url('slice', id=ID),
# method='put')
# url('slice', id=ID)
def delete(self, id):
"""DELETE /slices/id: Delete an existing item"""
# Forms posted to this method should contain a hidden field:
# <input type="hidden" name="_method" value="DELETE" />
# Or using helpers:
# h.form(url('slice', id=ID),
# method='delete')
# url('slice', id=ID)
def show(self, id, format='html'):
"""GET /slices/id: Show a specific item"""
# url('slice', id=ID)
def edit(self, id, format='html'):
"""GET /slices/id/edit: Form to edit an existing item"""
# url('edit_slice', id=ID)
class BadArgumentError(Exception):
def __init__(self, msg):
self.msg = msg
self.datetime_now = datetime.utcnow().isoformat()
self.path_url = request.path_url
|
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import random
from typing import List, Optional, Tuple
def _extended_euclidean_algorithm(a: int, b: int) -> Tuple[int, int, int]:
'''The extended Euclidean algorithm.
Returns a tuple (r, s, t) so that gcd is the GCD of the two inputs and r =
a s + b t.
'''
r, r_nxt = a, b
s, s_nxt = 1, 0
t, t_nxt = 0, 1
while r_nxt:
q = r // r_nxt
r, r_nxt = r_nxt, r - q * r_nxt
s, s_nxt = s_nxt, s - q * s_nxt
t, t_nxt = t_nxt, t - q * t_nxt
# If both inputs are non-positive, the result comes out negative and we
# should flip all the signs.
if r < 0:
r, s, t = - r, - s, - t
return (r, s, t)
def _intersect_ranges(a: List[Tuple[int, int]],
b: List[Tuple[int, int]]) -> List[Tuple[int, int]]:
ret = []
paired = ([(r, False) for r in a] + [(r, True) for r in b])
arng = None # type: Optional[Tuple[int, int]]
brng = None # type: Optional[Tuple[int, int]]
for (lo, hi), is_b in sorted(paired):
if is_b:
if arng is not None:
a0, a1 = arng
if a0 <= hi and lo <= a1:
ret.append((max(a0, lo), min(a1, hi)))
brng = (lo, hi)
else:
if brng is not None:
b0, b1 = brng
if b0 <= hi and lo <= b1:
ret.append((max(lo, b0), min(hi, b1)))
arng = (lo, hi)
return ret
class KnownMem:
'''A representation of what memory/CSRs have architectural values'''
def __init__(self, top_addr: int):
assert top_addr > 0
self.top_addr = top_addr
# A list of pairs of addresses. If the pair (lo, hi) is in the list
# then each byte in the address range {lo..hi - 1} has a known value.
self.known_ranges = [] # type: List[Tuple[int, int]]
def copy(self) -> 'KnownMem':
'''Return a shallow copy of the object'''
ret = KnownMem(self.top_addr)
ret.known_ranges = self.known_ranges.copy()
return ret
def merge(self, other: 'KnownMem') -> None:
'''Merge in values from another KnownMem object'''
assert self.top_addr == other.top_addr
self.known_ranges = _intersect_ranges(self.known_ranges,
other.known_ranges)
def touch_range(self, base: int, width: int) -> None:
'''Mark {base .. base + width - 1} as known'''
assert 0 <= width
assert 0 <= base <= self.top_addr - width
for off in range(width):
self.touch_addr(base + off)
def touch_addr(self, addr: int) -> None:
'''Mark word starting at addr as known'''
assert 0 <= addr < self.top_addr
# Find the index of the last range that starts below us, if there is
# one, and the index of the first range that starts above us, if there
# is one.
last_idx_below = None
first_idx_above = None
for idx, (lo, hi) in enumerate(self.known_ranges):
if lo <= addr:
last_idx_below = idx
continue
first_idx_above = idx
break
# Are we below all other ranges?
if last_idx_below is None:
# Are we one address below the next range above? In which case, we
# need to shuffle it back one.
if first_idx_above is not None:
lo, hi = self.known_ranges[first_idx_above]
assert addr < lo
if addr == lo - 1:
self.known_ranges[first_idx_above] = (lo - 1, hi)
return
# Otherwise, we're disjoint. Add a one-element range at the start.
self.known_ranges = [(addr, addr + 1)] + self.known_ranges
return
# If not, are we inside a range? In that case, there's nothing to do.
left_lo, left_hi = self.known_ranges[last_idx_below]
if addr < left_hi:
return
left = self.known_ranges[:last_idx_below]
# Are we just above it?
if addr == left_hi:
# If there is no range above, we can just extend the last range by
# one.
if first_idx_above is None:
self.known_ranges = left + [(left_lo, left_hi + 1)]
return
# Otherwise, does this new address glue two ranges together?
assert first_idx_above == last_idx_below + 1
right_lo, right_hi = self.known_ranges[first_idx_above]
assert addr < right_lo
if addr == right_lo - 1:
self.known_ranges = (left + [(left_lo, right_hi)] +
self.known_ranges[first_idx_above + 1:])
return
# Otherwise, we still extend the range by one (but have to put the
# right hand list back too).
self.known_ranges = (left + [(left_lo, left_hi + 1)] +
self.known_ranges[first_idx_above:])
return
# We are miles above the left range. If there is no range above, we can
# just append a new 1-element range.
left_inc = self.known_ranges[:first_idx_above]
if first_idx_above is None:
self.known_ranges.append((addr, addr + 1))
return
# Otherwise, are we just below the next range?
assert first_idx_above == last_idx_below + 1
right_lo, right_hi = self.known_ranges[first_idx_above]
assert addr < right_lo
if addr == right_lo - 1:
self.known_ranges = (left_inc + [(right_lo - 1, right_hi)] +
self.known_ranges[first_idx_above + 1:])
return
# If not, we just insert a 1-element range in between
self.known_ranges = (left_inc + [(addr, addr + 1)] +
self.known_ranges[first_idx_above:])
return
def pick_lsu_target(self,
loads_value: bool,
base_addr: int,
offset_range: Tuple[int, int],
offset_align: int,
width: int,
addr_align: int) -> Optional[Tuple[int, int]]:
'''Try to pick an address with base and offset.
If loads_value is true, the memory needs a known value for at least
width bytes starting at that address. The address should be encodable
as base_addr + offset where offset is in offset_range (inclusive) and
is a multiple of offset_align. The address must be a multiple of
addr_align.
On failure, returns None. On success, returns (addr, offset) where addr
is the chosen address and offset is the signed value that should be
added to base_addr to get that address.
'''
assert 0 <= base_addr < (1 << 32)
assert offset_range[0] <= offset_range[1]
assert 1 <= offset_align
assert 1 <= width
assert 1 <= addr_align
# The code below assumes signed integers and no overflows. That doesn't
# allow us to handle things like when base_addr = 0xffffffff, where
# adding an offset of 1 would get us back to zero.
#
# Convert to a signed 32-bit representation here to make that work.
ibase_addr = base_addr - (1 << 32) if base_addr >> 31 else base_addr
# We're trying to pick an offset and an address so that
#
# ibase_addr + offset = addr
#
# Let's ignore offset_range and questions about valid memory addresses
# for a second. We have two alignment requirements from offset and
# addr, which mean we're really trying to satisfy something that looks
# like
#
# a = b i + c j
#
# for a = ibase_addr; b = -offset_align; c = addr_align: find solutions
# i, j.
#
# This is a 2-variable linear Diophantine equation. If gcd(b, c) does
# not divide a, there is no solution. Otherwise, the extended Euclidean
# algorithm yields x0, y0 such that
#
# gcd(b, c) = b x0 + c y0.
#
# Multiplying up by a / gcd(b, c) gives
#
# a = b i0 + c j0
#
# where i0 = x0 * a / gcd(b, c) and j0 = y0 * a / gcd(b, c).
#
# This is the "inhomogeneous part". It's a solution to the equation,
# and every other solution, (i, j) is a translate of the form
#
# i = i0 + k v
# j = j0 - k u
#
# for some k, where u = b / gcd(b, c) and v = c / gcd(b, c).
gcd, x0, y0 = _extended_euclidean_algorithm(-offset_align, addr_align)
assert gcd == -offset_align * x0 + addr_align * y0
assert 0 < gcd
if ibase_addr % gcd:
return None
# If gcd divides ibase_addr, we convert x0 and y0 to an initial
# solution (i0, j0) as described above by multiplying up by ibase_addr
# / gcd.
#
# Note: the floor divisions below for scale_factor, minus_u and v are
# actually exact
scale_factor = ibase_addr // gcd
i0 = x0 * scale_factor
j0 = y0 * scale_factor
minus_u = offset_align // gcd
v = addr_align // gcd
assert 0 < v
assert 0 < minus_u
# offset_range gives the possible values of offset, which is - b i
# in the equations above. Re-arranging the equation for i gives:
#
# k v = i - i0
#
# so
#
# b k v = b i - b i0 = - offset - b i0
#
# or
#
# k = (- offset - b i0) / (b v)
#
# Since b < 0 and v > 0, the denominator is negative and this is an
# increasing function of offset, so we can get the allowed range for k
# by evaluating it at the endpoints of offset_range.
#
# Round down in the division when computing k_max and round up when
# computing k_min (because we're interested in the range of integers
# that we can choose). Since b is negative, we negate top and bottom
# when rounding up to allow the usual "(num + den - 1) // den" trick to
# work properly.
bv = - offset_align * v
k_max = (-offset_range[1] + offset_align * i0) // bv
k_min_num = -offset_range[0] + offset_align * i0
k_min = (- k_min_num + ((- bv) - 1)) // (- bv)
# If k_min > k_max, this means b*v gives such big steps that none
# landed in the range of allowed offsets
if k_max < k_min:
return None
# Now, we need to consider which memory locations we can actually use.
# If we're writing memory, we have a single range of allowed addresses
# (all of memory!). If reading, we need to use self.known_ranges. In
# either case, adjust for the fact that we need a width-byte access and
# then rescale everything into "k units".
#
# To do that rescaling, we know that c j = addr and that j = j0 - k u.
# So
#
# j0 - k u = addr / c
# k u = j0 - addr / c
# k = (j0 - addr / c) / u
# = (addr / c - j0) / (- u)
#
# Since u is negative, this is an increasing function of addr, so we
# can use address endpoints to get (disjoint) ranges for k.
k_ranges = []
k_weights = []
byte_ranges = (self.known_ranges
if loads_value else [(0, self.top_addr - 1)])
for byte_lo, byte_top in byte_ranges:
# Since we're doing an access of width bytes, we round byte_top
# down to the largest base address where the access lies completely
# in the range.
base_hi = byte_top - width
if base_hi < byte_lo:
continue
# Compute the valid range for addr/c, rounding inwards.
word_lo = (byte_lo + addr_align - 1) // addr_align
word_hi = base_hi // addr_align
# If word_hi < word_lo, there are no multiples of addr_align in the
# range [byte_lo, base_hi].
if word_hi < word_lo:
continue
# Now translate by -j0 and divide through by -u, rounding inwards.
k_hi = (word_hi - j0) // minus_u
k_lo = (word_lo - j0 + (minus_u - 1)) // minus_u
# If k_hi < k_lo, that means there are no multiples of u in the
# range [word_lo - j0, word_hi - j0].
if k_hi < k_lo:
continue
# Finally, take the intersection with [k_min, k_max]. The
# intersection is non-empty so long as k_lo <= k_max and k_min <=
# k_hi.
if k_lo > k_max or k_min > k_hi:
continue
k_lo = max(k_lo, k_min)
k_hi = min(k_hi, k_max)
k_ranges.append((k_lo, k_hi))
k_weights.append(k_hi - k_lo + 1)
if not k_ranges:
return None
# We can finally pick a value of k. Pick the range (weighted by
# k_weights) and then pick uniformly from in that range.
k_lo, k_hi = random.choices(k_ranges, weights=k_weights)[0]
k = random.randrange(k_lo, k_hi + 1)
# Convert back to a solution to the original problem
i = i0 + k * v
j = j0 + k * minus_u
offset = offset_align * i
addr = addr_align * j
assert addr == ibase_addr + offset
return addr, offset
def pick_bad_addr(self) -> Optional[int]:
'''Pick bad addresses from gaps present in known addresses.'''
gap_list = []
gap_vma = 0
for low, high in self.known_ranges:
assert gap_vma <= low
if gap_vma < low:
gap_list.append((gap_vma, low - 1))
gap_vma = high + 1
if gap_vma <= self.top_addr:
gap_list.append((gap_vma, self.top_addr))
if not gap_list:
return None
gap_len = [1 + hi - lo for lo, hi in gap_list]
bad_addr_lo, bad_addr_hi = random.choices(gap_list, weights=gap_len)[0]
return random.randint(bad_addr_lo, bad_addr_hi)
|
|
__author__ = 'IVMIT KFU: Gataullin Ravil & Veselovkiy Sergei'
from copy import copy
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from time import time
import warnings
warnings.filterwarnings("ignore")
from sklearn.cross_validation import train_test_split
from structure import Position
class PatchVarianceClassifier:
def __init__(self, init_patch):
self.init_patch_variance = np.var(init_patch.content)
def classify(self, patch):
# return 1 if object is positive detected
# return 0 if object is negative detected
if np.var(patch.content) > 0.5 * self.init_patch_variance:
return 1
else:
return 0
def predict_patch(self, patch):
return np.var(patch.content) / self.init_patch_variance
def predict_position(self, position):
return np.var(position.calculate_patch().content) / self.init_patch_variance
class EnsembleClassifier:
def __init__(self, learning_component):
self.learning_component = learning_component
self.classifier = RandomForestClassifier(max_depth=3)
def classify(self, patch):
# return 1 if object is positive detected
# return 0 if object is negative detected
feature = patch.calculate_feature(self.learning_component.descriptor)
if self.classifier.predict_proba(feature)[0][self.positive_class_index] > 0.5:
return 1
else:
return 0
def predict_patch(self, patch):
feature = patch.calculate_feature(self.learning_component.descriptor)
return self.classifier.predict_proba(feature)[0][self.positive_class_index]
def predict_position(self, position):
feature = position.calculate_patch().calculate_feature(self.learning_component.descriptor)
return self.classifier.predict_proba(feature)[0][self.positive_class_index]
def relearn(self, test_size=0):
samples, weights, targets = self.learning_component.get_training_set(const_weight=True)
train_samples, test_samples, train_targets, test_targets = train_test_split(samples, targets, test_size=test_size, random_state=np.random.RandomState(0))
count_positives = 1.0*np.count_nonzero(train_targets)
count_negatives = 1.0*(len(train_targets) - count_positives)
positive_weight = count_negatives/len(train_targets)
negative_weight = count_positives/len(train_targets)
weights = np.array([positive_weight if target == 1 else negative_weight for target in train_targets])
self.classifier.fit(train_samples, train_targets, sample_weight=weights)
self.learning_component.new_samples_count = 0
if len(test_samples) > 0:
test_result = [self.classifier.predict(sample) for sample in test_samples]
true_positives = 0.0
count_test_positives = 1.0*np.count_nonzero(test_targets)
count_result_positives = 1.0*np.count_nonzero(test_result)
for i in xrange(len(test_targets)):
if test_targets[i] == test_result[i] and test_result[i] == 1:
true_positives += 1
precision = true_positives / count_test_positives
recall = true_positives / count_result_positives
print "Precision:", precision
print "Recall", recall
if precision + recall != 0:
print "F-score:", 2 * precision * recall / (precision + recall)
else:
print "F-score:", 0
self.positive_class_index = 0
for elem in self.classifier.classes_:
if elem != 1.0:
self.positive_class_index += 1
else:
break
class NearestNeighborClassifier:
def __init__(self, learning_component, lmbd = 0.1, tetta = 0.6):
self.learning_component = learning_component
self.lmbd = lmbd
self.tetta = tetta
def classify(self, patch):
# return 1 if object is positive detected
# return 0 if object is negative detected
if self.learning_component.relative_similarity(patch) > self.tetta:
return 1
else:
return 0
def predict_patch(self, patch):
return self.learning_component.relative_similarity(patch)
def predict_position(self, position):
return self.learning_component.relative_similarity(position.calculate_patch())
def scanning_window(init_position, scales_step = 1.2, slip_step = 0.1, minimal_bounding_box_size = 20, min_step=1, max_step=20):
flag_inc = True
flag_dec = False
position = copy(init_position)
while min(position.width, position.height) >= minimal_bounding_box_size:
position.update(x=0,y=0)
step_width = min(max(min_step,int(slip_step * position.width)),max_step)
step_height = min(max(min_step,int(slip_step * position.height)),max_step)
while position.is_correct():
while position.is_correct():
yield position
position.update(x=position.x+step_width)
position.update(x=0, y=position.y+step_height)
# if position.is_correct():
# yield position
# is_end = False
# step_width = int(slip_step * position.width)
# step_height = int(slip_step * position.height)
# layer = 1
# xx = position.x
# yy = position.y
# while not is_end:
# is_end = True
# for start_point, vector in (([-1,-1],[1,0]),([1,-1],[0,1]),([1,1],[-1,0]),([-1,1],[0,-1])):
# position.update(x=xx + (start_point[0]*layer + vector[0])*step_width, y=yy+(start_point[1]*layer + vector[1])*step_height)
# while position.is_correct() and xx - layer*step_width <= position.x <= xx + layer*step_width and yy - layer*step_height <= position.y <= yy + layer*step_height:
# is_end = False
# yield position
# position.update(x=position.x+vector[0]*step_width, y=position.y+vector[1]*step_height)
# layer += 1
if flag_inc:
position.update(height=int(position.height * scales_step), width = int(position.width * scales_step))
if position.height > position.buffer[0].shape[0] or position.width > position.buffer[0].shape[0]:
flag_inc = False
flag_dec = True
position = copy(init_position)
if flag_dec:
position.update(height=int(position.height / scales_step), width = int(position.width / scales_step))
def get_sliding_positions(init_position, scales_step = 1.2, slip_step = 0.1, minimal_bounding_box_size = 20, min_step=2, max_step=2):
sliding_positions = []
flag_inc = True
flag_dec = False
position = copy(init_position)
while min(position.width, position.height) >= minimal_bounding_box_size:
position.update(x=0,y=0)
step_width = min(max(min_step,int(slip_step * position.width)),max_step)
step_height = min(max(min_step,int(slip_step * position.height)),max_step)
while position.is_correct():
while position.is_correct():
sliding_positions.append(copy(position))
position.update(x=position.x+step_width)
position.update(x=0, y=position.y+step_height)
if flag_inc:
position.update(height=int(position.height * scales_step), width = int(position.width * scales_step))
if position.height > position.buffer[0].shape[0] or position.width > position.buffer[0].shape[0]:
flag_inc = False
flag_dec = True
position = copy(init_position)
if flag_dec:
position.update(height=int(position.height / scales_step), width = int(position.width / scales_step))
return sliding_positions
class Detector:
def __init__(self, init_position, learning_component, threshold_patch_variance=0.5, threshold_ensemble=0.5, threshold_nearest_neighbor=0.6):
self.learning_component = learning_component
self.patch_variance_classifier = PatchVarianceClassifier(learning_component.init_patch)
self.ensemble_classifier = EnsembleClassifier(learning_component)
self.nearest_neighbor_classifier = NearestNeighborClassifier(learning_component)
self.threshold_patch_variance = threshold_patch_variance
self.threshold_ensemble = threshold_ensemble
self.threshold_nearest_neighbor = threshold_nearest_neighbor
self.sliding_positions = get_sliding_positions(init_position, scales_step = 1.2, slip_step = 0.1, minimal_bounding_box_size = 50, min_step=2, max_step=10)
def cascaded_classifier(self, patch):
# 3 stages of classify
# return 1 if object is positive detected
# return 0 if object is negative detected
if self.patch_variance_classifier.predict_patch(patch) < self.threshold_patch_variance:
return 0
if self.ensemble_classifier.predict_patch(patch) < self.threshold_patch_variance:
return 0
# elif self.nearest_neighbor_classifier.predict_patch(patch) < self.threshold_nearest_neighbor:
# return 0
return 1
def detect(self, position, is_tracked):
if self.learning_component.new_samples_count > 10:
start = time()
self.ensemble_classifier.relearn()
print "Relearn:", time() - start
detected_windows = []
predict_times = []
for current_position in self.sliding_positions:
start = time()
proba = self.predict_position(current_position)
predict_times.append(time() - start)
if proba == 1:
detected_windows.append((current_position.get_window(), current_position.calculate_patch(), proba))
self.learning_component.add_new_positive(current_position.calculate_patch())
if is_tracked:
return detected_windows
else:
self.learning_component.add_new_negative(current_position.calculate_patch())
print "Analysed window count:", len(predict_times)
print "Max detection time:", np.max(predict_times)
print "Min detection time:", np.min(predict_times)
print "Mean detection time:", np.mean(predict_times)
return detected_windows
def predict_patch(self, patch):
return self.cascaded_classifier(patch)
def predict_position(self, position):
return self.cascaded_classifier(position.calculate_patch())
|
|
import numpy as np, h5py
import argparse
import cv2
from wrap import CyCompressiveTracker as CompressiveTracker
from wrap import Rect
import torsolib
import tensorflow as tf
'''******************************************************************'''
'''******************************************************************'''
''' TensorFlow Model Definition Start '''
''' ... you can skip to the end of this section '''
''' unless you care about the network details '''
'''******************************************************************'''
'''******************************************************************'''
'''Setup TensorFlow CNN basics'''
# All input patches are 32x32
image_size = 32
# One color channel (grayscale)
num_channels = 1
# Mini-batch size for training
batch_size = 16
# Number of output labels ... 0-9 for digits, and 10 as a code for no digit present, which we're not using here
num_labels = 11
'''Filters'''
#this is sort of black magic, but a 5x5 input patch at the base level should capture
#enough information on a 32x32 image to be relevant ...
patch_size = 5
#this is reasonable for a 2-digit recognizer, but I haven't played with these numbers
depth_1 = 16
depth_2 = depth_1 * 2
depth_3 = depth_2 * 3
# Number of hidden nodes in fully connected layer 1
num_hidden = 64
shape = [batch_size, image_size, image_size, num_channels]
graph = tf.Graph()
with graph.as_default():
'''Variables'''
tf_runtime_dataset = tf.placeholder(
tf.float32, shape=(None, 32, 32, 1))
# Create Variables Function
def init_weights(shape, name):
return tf.Variable(
tf.random_normal(shape=shape, stddev=0.01),
name=name)
def init_biases(shape, name):
return tf.Variable(
tf.constant(1.0, shape=shape),
name=name
)
# Create Function for Image Size: Pooling
# 3 Convolutions
# 2 Max Pooling
def output_size_pool(input_size, conv_filter_size, pool_filter_size,
padding, conv_stride, pool_stride):
if padding == 'same':
padding = -1.00
elif padding == 'valid':
padding = 0.00
else:
return None
# After convolution 1
output_1 = (
((input_size - conv_filter_size - 2 * padding) / conv_stride) + 1.00)
# After pool 1
output_2 = (
((output_1 - pool_filter_size - 2 * padding) / pool_stride) + 1.00)
# After convolution 2
output_3 = (
((output_2 - conv_filter_size - 2 * padding) / conv_stride) + 1.00)
# After pool 2
output_4 = (
((output_3 - pool_filter_size - 2 * padding) / pool_stride) + 1.00)
# After convolution 2
output_5 = (
((output_4 - conv_filter_size - 2 * padding) / conv_stride) + 1.00)
# After pool 2
# output_6 = (
# ((output_5 - pool_filter_size - 2 * padding) / pool_stride) + 1.00)
return int(output_5)
# Convolution 1
# Input channels: num_channels = 1
# Output channels: depth = depth_1
w_c1 = init_weights([patch_size, patch_size, num_channels, depth_1], 'w_c1')
b_c1 = init_biases([depth_1], 'b_c1')
# Convolution 2
# Input channels: num_channels = depth_1
# Output channels: depth = depth_2
w_c2 = init_weights([patch_size, patch_size, depth_1, depth_2], 'w_c2')
b_c2 = init_biases([depth_2], 'b_c2')
# Convolution 3
# Input channels: num_channels = depth_2
# Output channels: depth = depth_3
w_c3 = init_weights([patch_size, patch_size, depth_2, depth_3], 'w_c3')
b_c3 = init_biases([depth_3], 'b_c3')
# Fully Connect Layer 1
final_image_size = output_size_pool(input_size=image_size,
conv_filter_size=5, pool_filter_size=2,
padding='valid', conv_stride=1,
pool_stride=2)
print('Final image size after convolutions {}'.format(final_image_size))
w_fc1 = init_weights([final_image_size*final_image_size*depth_3, num_hidden], 'w_fc1')
b_fc1 = init_biases([num_hidden], 'b_fc1')
# Softmax 1
w_s1 = init_weights([num_hidden, num_labels], 'w_s1')
b_s1 = init_biases([num_labels], 'b_s1')
# Softmax 2
w_s2 = init_weights([num_hidden, num_labels], 'w_s2')
b_s2 = init_biases([num_labels], 'b_s2')
# Softmax 3
w_s3 = init_weights([num_hidden, num_labels], 'w_s3')
b_s3 = init_biases([num_labels], 'b_s3')
# Softmax 4
w_s4 = init_weights([num_hidden, num_labels], 'w_s4')
b_s4 = init_biases([num_labels], 'b_s4')
# Softmax 5
w_s5 = init_weights([num_hidden, num_labels], 'w_s5')
b_s5 = init_biases([num_labels], 'b_s5')
def model(data, keep_prob, shape):
with tf.name_scope("conv_layer_1"):
conv_1 = tf.nn.conv2d(
data, w_c1, strides=[1, 1, 1, 1], padding='VALID')
hidden_conv_1 = tf.nn.relu(conv_1 + b_c1)
pool_1 = tf.nn.max_pool(
hidden_conv_1, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID')
with tf.name_scope("conv_layer_2"):
conv_2 = tf.nn.conv2d(
pool_1, w_c2, strides=[1, 1, 1, 1], padding='VALID')
hidden_conv_2 = tf.nn.relu(conv_2 + b_c2)
pool_2 = tf.nn.max_pool(
hidden_conv_2, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID')
with tf.name_scope("conv_layer_3"):
conv_3 = tf.nn.conv2d(
pool_2, w_c3, strides=[1, 1, 1, 1], padding='VALID')
hidden_conv_3 = tf.nn.relu(conv_3 + b_c3)
with tf.name_scope("fc_layer_1"):
hidden_drop = tf.nn.dropout(hidden_conv_3, keep_prob)
shape = tf.shape(hidden_drop)
reshape = tf.reshape(
hidden_drop, [shape[0], shape[1] * shape[2] * shape[3]])
hidden_fc = tf.nn.relu(
tf.matmul(reshape, w_fc1) + b_fc1)
with tf.name_scope("softmax_1"):
logits_1 = tf.matmul(hidden_fc, w_s1) + b_s1
with tf.name_scope("softmax_2"):
logits_2 = tf.matmul(hidden_fc, w_s2) + b_s2
with tf.name_scope("softmax_3"):
logits_3 = tf.matmul(hidden_fc, w_s3) + b_s3
with tf.name_scope("softmax_4"):
logits_4 = tf.matmul(hidden_fc, w_s4) + b_s4
with tf.name_scope("softmax_5"):
logits_5 = tf.matmul(hidden_fc, w_s5) + b_s5
return [logits_1, logits_2, logits_3, logits_4, logits_5]
'''Predictions'''
def softmax_combine(dataset, shape):
train_prediction = tf.stack([
tf.nn.softmax(model(dataset, 1.0, shape)[0]),
tf.nn.softmax(model(dataset, 1.0, shape)[1]),
tf.nn.softmax(model(dataset, 1.0, shape)[2]),
tf.nn.softmax(model(dataset, 1.0, shape)[3]),
tf.nn.softmax(model(dataset, 1.0, shape)[4])])
return train_prediction
single_prediction = softmax_combine(tf_runtime_dataset, shape)
'''Save Model (will be initiated later)'''
saver = tf.train.Saver()
'''******************************************************************'''
'''******************************************************************'''
''' Set up the main program environment '''
'''******************************************************************'''
'''******************************************************************'''
'''Parse the input arguments'''
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", default=None, required=True, help="Input video file")
ap.add_argument("-s", "--starttime", type=float, default=0, help="Skip this number of seconds of video before processing")
ap.add_argument("-n", "--frames", type=int, default=0, required=True, help="Number of frames to process")
ap.add_argument("-o", "--outputdir", default=None, required=True, help="Directory to store output PNGs")
ap.add_argument("-k", "--kmeans", default=None, help="Render the k-means output as the background")
ap.add_argument("-t", "--testbox", default=None, help="Render the 'debug'/test information boxes")
ap.add_argument("-l", "--lines", default=None, help="Render the detected lines")
ap.add_argument("-f", "--finalbox", default=None, help="Render the 'nice' green boxes only")
ap.add_argument("-b", "--bigtext", default=None, help="Make the output text bigger for ease of reading")
args = vars(ap.parse_args())
''' Setup the input and output parameters'''
#determine what video file to open
inputVideoFile = ""
if args['video'] is not None:
inputVideoFile = args['video']
else:
print "Need to specify a video file with the -v option"
exit()
print 'Opening video', inputVideoFile
cap = cv2.VideoCapture(inputVideoFile);
#skip to the desired start time
startVideoTime = 0
if args['starttime'] is not None:
startVideoTime = args['starttime']
cap.set(cv2.CAP_PROP_POS_MSEC,startVideoTime*1000);
#cap.set(cv2.CAP_PROP_POS_MSEC,(1*60+5)*1000+500);
#and we'll start writing PNGs to the output directory, so start the output frame number to 1
saveOutputFrameNumber = 1
maxFramesToProcess = 210
'''OpenCV initial options'''
FONT_NAME_REGULAR = cv2.FONT_HERSHEY_PLAIN;
FONT_NAME_BOLD = cv2.FONT_HERSHEY_DUPLEX;
WORKING_FRAME_WIDTH = 1280
WORKING_FRAME_HEIGHT = 720
#read the first frame
frameReadSuccess, last_rawimage = cap.read()
last_grayimage = cv2.cvtColor(last_rawimage, cv2.COLOR_RGB2GRAY)
#figure out where to store the rendered PNGs
outputDir = args['outputdir']
#parameters to help with filtering initial regions of interest
MIN_ROI_SIZE_X = 16 #minimum pixel-width of the region of interest ... keep in mind we'll be rescaling to 32x32
MIN_ROI_SIZE_Y = 16 #minimum pixel-height of the region of interest
#ideally, the ROIs will just be over the jersey numbers, but we'll want to track more than that
#this is how much to extend the ROI in X and Y to define our 'tracking region'
ROI_EXTEND_FOR_TRACKING_PERCENTAGE_X = 0.125 # +/-12.5% in the x direction
ROI_EXTEND_FOR_TRACKING_PERCENTAGE_Y = 0.25 # +/-25% in the Y direction
#number of centers to use for K-means
KMEANS_NUMBER_OF_CLUSTERS = 12
#main storage variable for the (hopefully jersey numbers) we're tracking
trackedRegions = []
'''******************************************************************'''
'''******************************************************************'''
''' Start the actual tracking '''
'''******************************************************************'''
'''******************************************************************'''
with tf.Session(graph=graph) as session:
'''Restore the deep network Tensorflow model'''
saver.restore(session, "weights.ckpt")
print("Model restored!")
'''While we're able to read the video input'''
while frameReadSuccess:
'''Read video'''
ret, rawimage = cap.read()
print '\n\n\n*** Read video frame', rawimage.shape, ' ***'
#resize if we need to
if (rawimage.shape[0] != WORKING_FRAME_WIDTH or rawimage.shape[1] != WORKING_FRAME_HEIGHT):
rawimage = cv2.resize(rawimage, (WORKING_FRAME_WIDTH, WORKING_FRAME_HEIGHT));
#compute a grayscale version for tracking
grayimage = cv2.cvtColor(rawimage, cv2.COLOR_RGB2GRAY)
grayimage_normalized = cv2.normalize(grayimage.astype('float32'), None, 0.0, 1.0, cv2.NORM_MINMAX) / 255
#copy the raw frame to the 'background' of the OpenCV output texture
#this will eventually be written to the output directory
#as a PNG
outputToPNG = rawimage.copy()
'''KMeans the background'''
binstore, labels, centers, grass_label, white_label = torsolib.separateFrame(rawimage,KMEANS_NUMBER_OF_CLUSTERS)
#binstore = raw binary images of kmeans regions, indexed by label
#labels = int image of label of each pixel
#centers = final k-means color cluster centers
#grass_label = the predicted label index of the 'grass' region
#white_label = the predicted label index of the 'white' region
#if we want to save the kmeans separated image as the background ...
if args['kmeans'] is not None:
centers = np.uint8(centers)
res = centers[labels.flatten()]
outputToPNG = res.reshape((rawimage.shape))
'''Detect the field lines'''
fieldLines = torsolib.getLines(binstore[white_label,:,:])
if args['lines'] is not None:
#if we have opted to render the field lines
if fieldLines is not None:
for i in range(fieldLines.shape[0]):
cv2.line(outputToPNG, (fieldLines[i][0][0], fieldLines[i][0][1]), (fieldLines[i][0][2], fieldLines[i][0][3]), (0, 0, 255), 3,
cv2.LINE_AA)
'''******************************************************************'''
''' First pass: generate some ROIs which may contain jersey numbers '''
'''******************************************************************'''
'''Detect the initial regions of interest which may contain jersey numbers'''
ROIRects = []
#for each of the clusters (except white) look for regions with text labels in them
for i in np.arange(0, KMEANS_NUMBER_OF_CLUSTERS):
if i != grass_label:
ROIRects.extend(torsolib.getLabelRects(binstore[i, :, :]))
#if we detected no ROIs this frame, go to the next frame
if (len(ROIRects) == 0):
continue;
#do some heuristics to eliminate certain ROI regions ... for example regions which are mostly grass
#or are just white lines and grass ... see the torsolib.py file for documentation
ROIRects = np.array(ROIRects)
ROIRects = torsolib.cleanRects(ROIRects, MIN_ROI_SIZE_X, MIN_ROI_SIZE_Y, labels, grass_label, white_label, torsolib.RECT_MODE_XYWH)
#now we're going to delete some ROIs which are unfavorable ...
delInds = []
#go through the ROI rects and expand/reshape them to better include all of the hypothetical
#jersey number region ... see torsolib.py for documentation
#also while we're doing this, eliminate any ROIs which are intersected by a field line
for i in np.arange(0, ROIRects.shape[0]):
inRect = ROIRects[i, :]
betterNewTrackedBoundingBox = torsolib.refineTorso(inRect, rawimage, grayimage)
ROIRects[i, :] = betterNewTrackedBoundingBox
if fieldLines is not None:
if (torsolib.linesBoxIntersect(fieldLines, betterNewTrackedBoundingBox[0], betterNewTrackedBoundingBox[1], betterNewTrackedBoundingBox[2], betterNewTrackedBoundingBox[3]) == True):
#add this ROI rect to the delete list
delInds.append(i)
#if we want to render the deleted rects for debugging
if args['testbox'] is not None:
cv2.rectangle(outputToPNG, (betterNewTrackedBoundingBox[0], betterNewTrackedBoundingBox[1]), (betterNewTrackedBoundingBox[2], betterNewTrackedBoundingBox[3]), (0, 128, 255), 1)
#go ahead and remove the ROI rects which don't make the cut
if len(delInds) > 0:
ROIRects = np.delete(ROIRects, delInds, 0)
#another clean pass
ROIRects = torsolib.cleanRects(ROIRects, MIN_ROI_SIZE_X, MIN_ROI_SIZE_Y, labels, grass_label, white_label, torsolib.RECT_MODE_X1Y1X2Y2)
#if we want to render the boxes that made the cut ....
if args['testbox'] is not None:
for i in np.arange(0, ROIRects.shape[0]):
inRect = ROIRects[i, :]
cv2.rectangle(outputToPNG, (inRect[0], inRect[1]), (inRect[2], inRect[3]), (128, 255, 128), 1)
'''******************************************************************'''
''' Second pass: Pass the ROIs through the deep network to see if they '''
''' probably contain a number '''
'''******************************************************************'''
print '*** Detecting new ROIs, first pass through deep net ***'
#determine how many ROI regions we're left with after initial culling
numRegions = ROIRects.shape[0];
#pixel data to evaluate
#there are two types of evaluations we do ... one with a single region passed in and another with
#multiple image regions passed in at once (for speed)
evaluation_singleregion_dataset = np.ndarray([1, 32, 32, 1], dtype='float32') #single-region dataset
evaluation_multiregion_dataset = np.ndarray([numRegions, 32, 32, 1], dtype='float32') #multi-region dataset
#coords of the pixel data
evaluation_rects = np.ndarray([numRegions, 4], dtype='int32')
'''Build dataset to pass to the deep network '''
for i in np.arange(0, numRegions):
tempRegion = ROIRects[i, :]
#get the
croppedGray = grayimage_normalized[tempRegion[1]:tempRegion[3], tempRegion[0]:tempRegion[2]]
croppedGrayScaled = cv2.resize(croppedGray, (32, 32))
im = croppedGrayScaled;
'''normalize image'''
mean = np.mean(im, dtype='float32')
std = np.std(im, dtype='float32', ddof=1)
if std < 1e-4: std = 1.
im = (im - mean) / std
'''set up the data input'''
evaluation_multiregion_dataset[i, :, :, 0] = im[:, :]
evaluation_rects[i, :] = tempRegion
#initialize the feed dictionary for the Tensorflow network
feed_dict = {tf_runtime_dataset: evaluation_multiregion_dataset}
#actual pass the data through the deep network
singleregion_predictions = session.run([single_prediction], feed_dict=feed_dict)
#the resulting predictions from the network
singleregion_deepNums = np.reshape(np.argmax(singleregion_predictions, 3).T, (numRegions, 5))
#the raw softmax intensities of those number predictions (1 = 100%, 0 = 0%, etc.)
singleregion_deepPreds = np.reshape(np.max(singleregion_predictions, 3).T, (numRegions, 5))
#Debugging deep network output ... just annoying if you don't need it
#print "Raw preds for this frame: ", singleregion_deepPreds
#print "Raw numbers for this frame: ", singleregion_deepNums
'''Now evaluate each of the ROIs based on their deep network response'''
for i in np.arange(0, numRegions):
#the ROI region can be floats at this point, convert to actual pixels, should check for bounds, but eh ...
regionRect = evaluation_rects[i, :]
regionRect[0] = int(regionRect[0])
regionRect[1] = int(regionRect[1])
regionRect[2] = int(regionRect[2])
regionRect[3] = int(regionRect[3])
#there should be two predicted numbers (we're not considering single-number predictions for this demo)
#convert into an actual int that we can use ....
singleregion_predictedNumber = singleregion_deepNums[i, 0] * 10 + singleregion_deepNums[i, 1];
# heuristic: make sure the first two prediction certainties sum to > 150%
# heuristic: make sure we're at least 90% certain of one number
# heuristic: make sure that the second number is recognized ... 10 is the code for no-number
if singleregion_deepPreds[i, 0] + singleregion_deepPreds[i, 1] > 1.5 and \
(singleregion_deepPreds[i, 0] > 0.9 or singleregion_deepPreds[i, 1] > 0.9) and \
singleregion_deepNums[i, 1] != 10:
#debug information ....
print "Recognized new ROI with number ", singleregion_predictedNumber, ", confidences=", singleregion_deepPreds[i, 0], singleregion_deepPreds[i, 1]
#if we're accepting this ROI as probably containing a jersey number, we're going to need to track it
#go ahead and prepare a rectangle for tracking
newTrackingBox = Rect(regionRect[0], regionRect[1], regionRect[2] - regionRect[0],
(regionRect[3] - regionRect[1]))
#make sure this ROI isn't already being tracked ... compare the potential new region
#against the already tracked regions
alreadyTracked = False
for region in trackedRegions:
#just standard box-box intersection checking
displayBox = region['box']
intersect = torsolib.intersectRect([displayBox.x, displayBox.y, displayBox.x + displayBox.width, displayBox.y + displayBox.height],
[newTrackingBox.x, newTrackingBox.y, newTrackingBox.x + newTrackingBox.width, newTrackingBox.y + newTrackingBox.height])
#regions can be marked as 'dead' when tracking fails ...
#if this tracked region HASN'T been marked as dead yet (i.e. is actually still being tracked)
if region['dead'] == 0:
#if this region is 'confirmed' ... meaning that two separate deep-network checks have
#yielded the same number ... then this patch is REALLY already being tracked, so don't
#start a new tracker on it
if (intersect == True and region['confirmed'] == 1):
alreadyTracked = True
#if this is a new patch, but its intersecting an existing tracked region with the SAME number
#consider this a re-detection confirmation and go ahead and confirm the existing tracked
#region
if (intersect == True and (singleregion_predictedNumber == region['nums'][0]*10 + region['nums'][1]) and region['trackedFrames'] == 1):
region['confirmed'] = 1
alreadyTracked = True
#if we haven't tracked this ROI yet ...
if (alreadyTracked == False):
#if the user opted to render debugging boxes ...
if args['testbox'] is not None:
cv2.rectangle(outputToPNG, (regionRect[0], regionRect[1]), (regionRect[2], regionRect[3]), (0, 255, 255), 2)
cv2.putText(outputToPNG, str(singleregion_predictedNumber), (regionRect[0], regionRect[1]), FONT_NAME_REGULAR, 0.5, (0, 255, 255), 1)
#get the mean HSV values of this patch ... used to detetermine when tracking has failed
_, newHSV = torsolib.getPatchHSV(rawimage[regionRect[1]:regionRect[3], regionRect[0]:regionRect[2], :]);
#initialize a new compressive tracker on the region
ct = CompressiveTracker(grayimage, Rect(newTrackingBox.x - newTrackingBox.width * ROI_EXTEND_FOR_TRACKING_PERCENTAGE_X, newTrackingBox.y - newTrackingBox.height * ROI_EXTEND_FOR_TRACKING_PERCENTAGE_Y,
newTrackingBox.width * (1 + 2 * ROI_EXTEND_FOR_TRACKING_PERCENTAGE_X), newTrackingBox.height * (1 + 2 * ROI_EXTEND_FOR_TRACKING_PERCENTAGE_Y)))
#and add the new region as an officially tracked region
newPatch = {'image':grayimage, #raw image for the compressive tracker ... huge waste, but why not
'startColor':newHSV, #the start HSV value of the tracked region
'tracker':ct, #compressive tracker object
'confirmed':0, #if this region has been confirmed (i.e., two deep-net passes yeilded the same number)
'box': newTrackingBox, #the coordinates of the region for the compressive tracker
'preds': singleregion_deepPreds[i, :], #the raw prediction soft-max values from the network (i.e. certainties)
'nums': singleregion_deepNums[i, :], #the soft-max values (i.e., predicted numbers)
'health': 2, #health of the region, when this goes to zero, this tracked patch will be removed
'trackedFrames': 0, #number of frames this region has been tracked over
'dead':0} #whether tracking has failed for this region
trackedRegions.append(newPatch) #add this tracked region to the tracking list
else: #if this ROI is already being tracked .... skip it
#... but render a box if the user wants
if args['testbox'] is not None:
cv2.rectangle(outputToPNG, (regionRect[0], regionRect[1]), (regionRect[2], regionRect[3]), (255, 255, 0), 2)
cv2.putText(outputToPNG, str(singleregion_predictedNumber), (regionRect[0], regionRect[1]), FONT_NAME_REGULAR, 0.5, (255, 255, 0), 1)
else: #if this ROI didn't generate a strong enough response from the deep network
#... skip, but render a debug box if the user wants
if args['testbox'] is not None:
cv2.rectangle(outputToPNG, (regionRect[0], regionRect[1]), (regionRect[2], regionRect[3]), 0, 0)
cv2.putText(outputToPNG, str(singleregion_predictedNumber), (regionRect[0], regionRect[3]), FONT_NAME_REGULAR, 0.5, (0, 0, 0), 1)
'''******************************************************************'''
''' Third pass: Track all the tracked regions using compressive sensing '''
'''******************************************************************'''
print '*** Tracking ROIs with compressive tracking ***'
for region in trackedRegions:
#if we just detected this tracked region, don't re-track it on the same frame
if region['trackedFrames'] == 0:
print "Skipping patch because just detected", region['nums']
continue
else:
#the compressive tracker doesn't have great boundary checking ... so if the region get's too close
#to the edge, just skip it ... otherwise the tracker will generate a memory access error
if region['box'].y < 10 or region['box'].x < 10 \
or region['box'].x > WORKING_FRAME_WIDTH - 10 or region['box'].y > WORKING_FRAME_HEIGHT - 10:
print "Skipping regions because too close to image edge"
continue
else:
print "Tracking region with numbers ", region['nums']
'''Actual compressive tracking'''
#update the bounding box of the tracked region
oldBox = region['box'];
displayBox = region['tracker'].process_frame(grayimage, Rect(oldBox.x - oldBox.width * ROI_EXTEND_FOR_TRACKING_PERCENTAGE_X,
oldBox.y - oldBox.height * ROI_EXTEND_FOR_TRACKING_PERCENTAGE_Y,
oldBox.width * (1 + 2*ROI_EXTEND_FOR_TRACKING_PERCENTAGE_X),
oldBox.height * (1 + 2*ROI_EXTEND_FOR_TRACKING_PERCENTAGE_Y)))
#display a debug box if requested
if args['testbox'] is not None:
cv2.rectangle(outputToPNG, (oldBox.x - int(oldBox.width * ROI_EXTEND_FOR_TRACKING_PERCENTAGE_X), oldBox.y - int(oldBox.height * ROI_EXTEND_FOR_TRACKING_PERCENTAGE_Y)),
(oldBox.x+int(oldBox.width* (1 + ROI_EXTEND_FOR_TRACKING_PERCENTAGE_X)),oldBox.y+int(oldBox.height* (1 + ROI_EXTEND_FOR_TRACKING_PERCENTAGE_Y))),
np.array([255, 0, 255]),
1)
#create a new rectangle object from the new region
displayBox = Rect(displayBox.x + (displayBox.width - displayBox.width / (1.0 + 2 * ROI_EXTEND_FOR_TRACKING_PERCENTAGE_X)) / 2,
displayBox.y + (displayBox.height - displayBox.height / (1.0 + 2 * ROI_EXTEND_FOR_TRACKING_PERCENTAGE_Y)) / 2,
displayBox.width / (1.0 + 2 * ROI_EXTEND_FOR_TRACKING_PERCENTAGE_X),
displayBox.height / (1.0 + 2 * ROI_EXTEND_FOR_TRACKING_PERCENTAGE_Y));
#display a debug box if requested
if args['testbox'] is not None:
cv2.rectangle(outputToPNG, (displayBox.x - int(displayBox.width * ROI_EXTEND_FOR_TRACKING_PERCENTAGE_X), displayBox.y - int(displayBox.height * ROI_EXTEND_FOR_TRACKING_PERCENTAGE_Y)),
(displayBox.x + int(displayBox.width * (1 + ROI_EXTEND_FOR_TRACKING_PERCENTAGE_X)), displayBox.y + int(displayBox.height * (1 + ROI_EXTEND_FOR_TRACKING_PERCENTAGE_Y))),
np.array([255, 255, 255]),
1)
'''Check the validity of the tracking results'''
#the compressive tracker can actually fail and return zero ... if this isn't the case ...
if (displayBox.width > 0 and displayBox.height > 0):
#update the bounding box of the tracked region
region['box'] = displayBox
#debug information
print "Tracked ", region['nums'], "to", displayBox.x,displayBox.y,displayBox.width,displayBox.height
newTrackedBoundingBox = np.array([displayBox.x, displayBox.y, displayBox.width, displayBox.height]);
#if the user wants a debug box...
if args['testbox'] is not None:
cv2.rectangle(outputToPNG, (newTrackedBoundingBox[0], newTrackedBoundingBox[1]), (newTrackedBoundingBox[0] + newTrackedBoundingBox[2], newTrackedBoundingBox[1] + newTrackedBoundingBox[3]), np.array([255, 0, 255]), 1)
'''At one time there was a pass to improve the region by expanding it to contain more gradient, turned out to not be necessary'''
#betterNewTrackedBoundingBox = torsolib.refineTorso(newTrackedBoundingBox, rawimage, grayimage)
betterNewTrackedBoundingBox = np.array([newTrackedBoundingBox[0], newTrackedBoundingBox[1], newTrackedBoundingBox[0] + newTrackedBoundingBox[2], newTrackedBoundingBox[1] + newTrackedBoundingBox[3]])
#if the user wants a debug box ...
if args['testbox'] is not None:
cv2.rectangle(outputToPNG, (betterNewTrackedBoundingBox[0], betterNewTrackedBoundingBox[1]), (betterNewTrackedBoundingBox[2], betterNewTrackedBoundingBox[3]), np.array([255, 0, 255]), 1)
'''Pass the tracked region through the deep network (for confirmation of recognition)'''
croppedGray = grayimage_normalized[betterNewTrackedBoundingBox[1]:betterNewTrackedBoundingBox[3], betterNewTrackedBoundingBox[0]:betterNewTrackedBoundingBox[2]];
croppedGrayScaled = cv2.resize(croppedGray, (32, 32))
im = croppedGrayScaled;
mean = np.mean(im, dtype='float32')
std = np.std(im, dtype='float32', ddof=1)
if std < 1e-4: std = 1.
im = (im - mean) / std
'''set up the data input for the deep network'''
evaluation_singleregion_dataset[0, :, :, 0] = im[:, :]
feed_dict = {tf_runtime_dataset: evaluation_singleregion_dataset}
#run the deep net on this single region
singleregion_predictions = session.run([single_prediction], feed_dict=feed_dict)
singleregion_deepNums = np.reshape(np.argmax(singleregion_predictions, 3).T, (1, 5))
singleregion_deepPreds = np.reshape(np.max(singleregion_predictions, 3).T, (1, 5))
singleregion_predictedNumber = singleregion_deepNums[0, 0] * 10 + singleregion_deepNums[0, 1]
print " After-tracking nums ", singleregion_deepNums #soft-max predicted numbers
print " After-tracking confidences ", singleregion_deepPreds #soft-max prediction strengths
'''Now that this region has been run through the deep net again, handle the output'''
'''Confirm regions that redected the same number after tracking'''
#this mirrors our deep-net response check earlier, but instead of requiring 150% total certain,
#we're only requiring 80% total certainty for a reconfirmation, and only requiring that we are
#60% sure of a single number
if singleregion_deepPreds[0, 0] + singleregion_deepPreds[0, 1] > 0.8 and \
(singleregion_deepPreds[0, 0] > 0.6 or singleregion_deepPreds[0, 1] > 0.6) and \
singleregion_deepNums[0, 1] != 10 and \
(singleregion_predictedNumber == region['nums'][0]*10 + region['nums'][1]): #also make sure we're predicting the same number as before!
'''Same as before, check that this tracked region isn't on top of something else'''
alreadyTracked = False
for testRegion in trackedRegions:
testBox = testRegion['box']
intersect = torsolib.intersectRect([displayBox.x, displayBox.y, displayBox.x + displayBox.width, displayBox.y + displayBox.height],
[testBox.x, testBox.y, testBox.x + testBox.width,
testBox.y + testBox.height])
if testRegion['dead'] == 0:
if (intersect == True and testRegion['confirmed'] == 1):
alreadyTracked = True
#if this region isn't on top of anything else, go ahead and confirm it
if (alreadyTracked == False):
region['confirmed'] = 1
region['dead'] = 0
#and go ahead and update the mean HSV values of the patch ... probably don't need to do this
#since it won't have changed a ton from the time of initial detection but might as well
_, newHSV = torsolib.getPatchHSV(rawimage[betterNewTrackedBoundingBox[1]:betterNewTrackedBoundingBox[3], betterNewTrackedBoundingBox[0]:betterNewTrackedBoundingBox[2], :]);
region['startColor'] = newHSV
print " Confirmed new tracked region ", singleregion_predictedNumber, singleregion_deepPreds[0, 0], singleregion_deepPreds[0, 1]
#render a debug box if requested
if args['testbox'] is not None:
cv2.putText(outputToPNG, str(singleregion_predictedNumber), (betterNewTrackedBoundingBox[0], betterNewTrackedBoundingBox[1]), FONT_NAME_REGULAR, 0.5, (255, 0, 255), 1)
else: #if the response from the deep network wasn't strong enough (or predicted a different number) don't confirm this region
#render a debug box
if args['testbox'] is not None:
cv2.putText(outputToPNG, str(singleregion_predictedNumber), (betterNewTrackedBoundingBox[0], betterNewTrackedBoundingBox[3]), FONT_NAME_REGULAR, 0.5, (128, 0, 128), 1)
else: #if the compressive tracker failed and returned a zero-size tracking region
print "Box zero error***************"
'''******************************************************************'''
''' Forth pass: Do book-keeping on the tracked regions and display the bounding boxes and text'''
'''******************************************************************'''
'''Decrement health of tracked regions'''
for region in trackedRegions:
region['trackedFrames'] = region['trackedFrames'] + 1
region['health'] = region['health'] - 1
'''If any tracked regions have zero health, and they haven't yet been confirmed, delete them'''
for region in trackedRegions:
if region['health'] <= 0 and region['confirmed'] == 0:
print 'Killing ', region['nums'], ' because low health'
trackedRegions.remove(region)
'''If the tracked region has a significantly different HSV value than when tracking started '''
'''tracking has probably failed ... do a 'color-justified kill' (colorkill) of the tracked region'''
#iterate over the tracked regions
for region in trackedRegions:
#get the CURRENT mean HSV of the region
displayBox = region['box']
color, hsv = torsolib.getPatchHSV(rawimage[displayBox.y:displayBox.y + displayBox.height, displayBox.x:displayBox.x + displayBox.width, :]);
#dot-product with the mean HSV value when tracking started
comp = torsolib.compareColor(hsv, region['startColor'])
#if the hue has changed signfiicantly, tracking has probably failed
#so mark this region as dead
if comp < 0.9:
print 'Killing ', region['nums'], ' because region HSV changed too much (similarity=', comp,')'
region['dead'] = 1
'''Display unconfirmed regions'''
# if the user wants a debug box ...
if args['testbox'] is not None:
for region in trackedRegions:
if (region['confirmed'] == 0):
displayBox = region['box']
displayNumber = region['nums'][0] * 10 + region['nums'][1]
cv2.rectangle(outputToPNG, (displayBox.x, displayBox.y), (displayBox.x + displayBox.width, displayBox.y + displayBox.height),
np.array([0, 0, 255]), 1)
cv2.putText(outputToPNG, str(displayNumber) + "," + str(region['health']), (displayBox.x + int(displayBox.width / 2), displayBox.y), FONT_NAME_REGULAR, 0.5,
(0, 0, 255), 1)
'''Display confirmed regions'''
for region in trackedRegions:
displayBox = region['box']
displayNumber = region['nums'][0] * 10 + region['nums'][1]
if (region['confirmed'] == 1):
#if tracking hasn't failed yet for this region
if region['dead'] == 0:
#if the user wants ANY boxes rendered ....
if (args['testbox'] is not None) or (args['finalbox'] is not None):
cv2.rectangle(outputToPNG, (displayBox.x, displayBox.y), (displayBox.x + displayBox.width, displayBox.y + displayBox.height),
np.array([0, 255, 0]), 2)
if (args['bigtext'] is not None):
cv2.putText(outputToPNG, " " + str(displayNumber),
(displayBox.x , displayBox.y),
FONT_NAME_BOLD, 0.75,
(0, 255, 0), 1)
else:
cv2.putText(outputToPNG, " " + str(displayNumber), (displayBox.x + int(displayBox.width / 2), displayBox.y),
FONT_NAME_REGULAR, 0.5,
(0, 255, 0), 1)
else: #if tracking has failed for this region
#if the user wants debug boxes
if args['testbox'] is not None:
cv2.rectangle(outputToPNG, (displayBox.x, displayBox.y), (displayBox.x + displayBox.width, displayBox.y + displayBox.height),
np.array([0, 0, 0]), 2)
cv2.putText(outputToPNG, "dead", (displayBox.x + int(displayBox.width / 2), displayBox.y),
FONT_NAME_REGULAR, 0.5,
(0, 0, 0), 1)
#save the rendered PNG for later analysis
fileName = outputDir + "/" + str(saveOutputFrameNumber).zfill(5) + ".png"
cv2.imwrite(fileName, outputToPNG)
print "Writing to ", fileName
print "Time in video ",cap.get(cv2.CAP_PROP_POS_MSEC);
cv2.imshow("Current frame", outputToPNG)
cv2.waitKey(1)
saveOutputFrameNumber = saveOutputFrameNumber +1
if (saveOutputFrameNumber > maxFramesToProcess):
exit()
print('Successfully completed tracking!')
|
|
import unittest
import datetime
import threading
import time
from .. import task_queue
from .. import connector
from .. import queue
from .. import encoder
class TaskQueueTestCase:
order_matters = True
def test_purge_tasks(
self,
):
self.test_task_queue.purge_tasks(
task_name='test_task',
)
self.assertEqual(
first=self.test_task_queue.number_of_enqueued_tasks(
task_name='test_task',
),
second=0,
)
task = self.test_task_queue.craft_task(
task_name='test_task',
)
self.test_task_queue.apply_async_one(
task=task,
priority='NORMAL',
)
self.assertEqual(
first=self.test_task_queue.number_of_enqueued_tasks(
task_name='test_task',
),
second=1,
)
self.test_task_queue.purge_tasks(
task_name='test_task',
)
self.assertEqual(
first=self.test_task_queue.number_of_enqueued_tasks(
task_name='test_task',
),
second=0,
)
def test_number_of_enqueued_tasks(
self,
):
self.test_task_queue.purge_tasks(
task_name='test_task',
)
self.assertEqual(
first=self.test_task_queue.number_of_enqueued_tasks(
task_name='test_task',
),
second=0,
)
task = self.test_task_queue.craft_task(
task_name='test_task',
)
self.test_task_queue.apply_async_one(
task=task,
priority='NORMAL',
)
self.assertEqual(
first=self.test_task_queue.number_of_enqueued_tasks(
task_name='test_task',
),
second=1,
)
self.test_task_queue.purge_tasks(
task_name='test_task',
)
self.assertEqual(
first=self.test_task_queue.number_of_enqueued_tasks(
task_name='test_task',
),
second=0,
)
self.test_task_queue.apply_async_many(
tasks=[task] * 100,
priority='NORMAL',
)
self.assertEqual(
first=self.test_task_queue.number_of_enqueued_tasks(
task_name='test_task',
),
second=100,
)
self.test_task_queue.apply_async_many(
tasks=[task] * 1000,
priority='NORMAL',
)
self.assertEqual(
first=self.test_task_queue.number_of_enqueued_tasks(
task_name='test_task',
),
second=1100,
)
self.test_task_queue.purge_tasks(
task_name='test_task',
)
self.assertEqual(
first=self.test_task_queue.number_of_enqueued_tasks(
task_name='test_task',
),
second=0,
)
def test_craft_task(
self,
):
task = self.test_task_queue.craft_task(
task_name='test_task',
args=(),
kwargs={},
report_completion=False,
)
current_date = datetime.datetime.utcnow().timestamp()
date = task.pop('date')
self.assertAlmostEqual(
first=date / (10 ** 8),
second=current_date / (10 ** 8),
)
self.assertEqual(
first=task,
second={
'name': 'test_task',
'args': (),
'kwargs': {},
'run_count': 0,
'completion_key': None,
}
)
task = self.test_task_queue.craft_task(
task_name='test_task',
args=(
1,
2,
3,
),
kwargs={
'a': 1,
'b': 2,
},
report_completion=True,
)
current_date = datetime.datetime.utcnow().timestamp()
date = task.pop('date')
self.assertAlmostEqual(
first=date / (10 ** 8),
second=current_date / (10 ** 8),
)
completion_key = task.pop('completion_key')
self.assertNotEqual(
first=completion_key,
second=None,
)
self.assertEqual(
first=task,
second={
'name': 'test_task',
'args': (
1,
2,
3,
),
'kwargs': {
'a': 1,
'b': 2,
},
'run_count': 0,
}
)
def test_report_complete(
self,
):
self.test_task_queue.purge_tasks(
task_name='test_task',
)
task = self.test_task_queue.craft_task(
task_name='test_task',
args=(),
kwargs={},
report_completion=True,
)
completion_key = task['completion_key']
self.assertTrue(
expr=self.test_task_queue.queue.has_result(
queue_name='test_task',
result_id=completion_key,
),
)
self.test_task_queue.report_complete(
task=task,
)
self.assertFalse(
expr=self.test_task_queue.queue.has_result(
queue_name='test_task',
result_id=completion_key,
)
)
def test_wait_task_finished(
self,
):
self.test_task_queue.purge_tasks(
task_name='test_task',
)
task = self.test_task_queue.craft_task(
task_name='test_task',
args=(),
kwargs={},
report_completion=True,
)
report_complete_timer = threading.Timer(
interval=2.0,
function=self.test_task_queue.report_complete,
args=(
task,
),
)
report_complete_timer.start()
before = time.time()
self.test_task_queue.wait_task_finished(
task=task,
)
after = time.time()
self.assertTrue(
expr=3.0 > after - before > 2.0,
)
def test_wait_queue_empty(
self,
):
self.test_task_queue.purge_tasks(
task_name='test_task',
)
task = self.test_task_queue.craft_task(
task_name='test_task',
args=(),
kwargs={},
report_completion=True,
)
self.test_task_queue.apply_async_one(
task=task,
priority='NORMAL',
)
purge_tasks_timer = threading.Timer(
interval=2.0,
function=self.test_task_queue.purge_tasks,
args=(
'test_task',
),
)
purge_tasks_timer.start()
before = time.time()
self.test_task_queue.wait_queue_empty(
task_name='test_task',
)
after = time.time()
self.assertTrue(
expr=3.5 > after - before > 3.0,
)
def test_apply_async_one(
self,
):
self.test_task_queue.purge_tasks(
task_name='test_task',
)
task_one = self.test_task_queue.craft_task(
task_name='test_task',
args=(
1,
),
kwargs={},
report_completion=False,
)
task_two = self.test_task_queue.craft_task(
task_name='test_task',
args=(),
kwargs={
'a': 1,
},
report_completion=True,
)
task_three = self.test_task_queue.craft_task(
task_name='test_task',
args=(),
kwargs={},
report_completion=True,
)
self.test_task_queue.apply_async_one(
task=task_one,
priority='NORMAL',
)
self.test_task_queue.apply_async_one(
task=task_two,
priority='NORMAL',
)
self.test_task_queue.apply_async_one(
task=task_three,
priority='NORMAL',
)
task_one_test = self.test_task_queue.queue.dequeue(
queue_name='test_task',
number_of_items=1,
)[0]
task_two_test = self.test_task_queue.queue.dequeue(
queue_name='test_task',
number_of_items=1,
)[0]
task_three_test = self.test_task_queue.queue.dequeue(
queue_name='test_task',
number_of_items=1,
)[0]
if self.order_matters:
self.assertEqual(
first=task_one,
second=task_one_test,
)
self.assertEqual(
first=task_two,
second=task_two_test,
)
self.assertEqual(
first=task_three,
second=task_three_test,
)
else:
self.assertIn(
member=task_one,
container=[
task_one_test,
task_two_test,
task_three_test,
],
)
self.assertIn(
member=task_two,
container=[
task_one_test,
task_two_test,
task_three_test,
],
)
self.assertIn(
member=task_three,
container=[
task_one_test,
task_two_test,
task_three_test,
],
)
if self.order_matters:
self.assertTrue(
expr=self.test_task_queue.queue.has_result(
queue_name='test_task',
result_id=task_two['completion_key'],
)
)
self.assertTrue(
expr=self.test_task_queue.queue.has_result(
queue_name='test_task',
result_id=task_three['completion_key'],
)
)
else:
tasks_to_wait = [
task_to_wait
for task_to_wait in [
task_one_test,
task_two_test,
task_three_test,
]
if task_to_wait['completion_key'] is not None
]
self.assertEqual(
first=len(tasks_to_wait),
second=2,
)
for task_to_wait in tasks_to_wait:
self.assertTrue(
expr=self.test_task_queue.queue.has_result(
queue_name='test_task',
result_id=task_to_wait['completion_key'],
)
)
def test_apply_async_many(
self,
):
self.test_task_queue.purge_tasks(
task_name='test_task_one',
)
self.test_task_queue.purge_tasks(
task_name='test_task_two',
)
task_one = self.test_task_queue.craft_task(
task_name='test_task_one',
args=(
1,
),
kwargs={},
report_completion=False,
)
task_two = self.test_task_queue.craft_task(
task_name='test_task_one',
args=(),
kwargs={
'a': 1,
},
report_completion=True,
)
task_three = self.test_task_queue.craft_task(
task_name='test_task_two',
args=(),
kwargs={},
report_completion=True,
)
self.test_task_queue.apply_async_many(
tasks=[
task_one,
task_two,
task_three,
],
priority='NORMAL',
)
task_one_test = self.test_task_queue.queue.dequeue(
queue_name='test_task_one',
number_of_items=1,
)[0]
task_two_test = self.test_task_queue.queue.dequeue(
queue_name='test_task_one',
number_of_items=1,
)[0]
task_three_test = self.test_task_queue.queue.dequeue(
queue_name='test_task_two',
number_of_items=1,
)[0]
if self.order_matters:
self.assertEqual(
first=task_one,
second=task_one_test,
)
self.assertEqual(
first=task_two,
second=task_two_test,
)
self.assertEqual(
first=task_three,
second=task_three_test,
)
else:
self.assertIn(
member=task_one,
container=[
task_one_test,
task_two_test,
],
)
self.assertIn(
member=task_two,
container=[
task_one_test,
task_two_test,
],
)
self.assertEqual(
first=task_three,
second=task_three_test,
)
if self.order_matters:
self.assertTrue(
expr=self.test_task_queue.queue.has_result(
queue_name='test_task_one',
result_id=task_two['completion_key'],
)
)
self.assertTrue(
expr=self.test_task_queue.queue.has_result(
queue_name='test_task_two',
result_id=task_three['completion_key'],
)
)
else:
tasks_to_wait = [
task_to_wait
for task_to_wait in [
task_one_test,
task_two_test,
task_three_test,
]
if task_to_wait['completion_key'] is not None
]
self.assertEqual(
first=len(tasks_to_wait),
second=2,
)
for task_to_wait in tasks_to_wait:
self.assertTrue(
expr=self.test_task_queue.queue.has_result(
queue_name=task_to_wait['name'],
result_id=task_to_wait['completion_key'],
)
)
self.assertEqual(
first=task_one,
second=task_one_test,
)
self.assertEqual(
first=task_two,
second=task_two_test,
)
self.assertEqual(
first=task_three,
second=task_three_test,
)
self.assertTrue(
expr=self.test_task_queue.queue.has_result(
queue_name='test_task_one',
result_id=task_two['completion_key'],
)
)
self.assertTrue(
expr=self.test_task_queue.queue.has_result(
queue_name='test_task_two',
result_id=task_three['completion_key'],
)
)
def test_queue_priority(
self,
):
self.test_task_queue.purge_tasks(
task_name='test_task',
)
task_NORMAL_priority = self.test_task_queue.craft_task(
task_name='test_task',
args=(
1,
),
kwargs={
'priority': 'NORMAL',
},
report_completion=False,
)
task_HIGH_priority = self.test_task_queue.craft_task(
task_name='test_task',
args=(),
kwargs={
'priority': 'HIGH',
},
report_completion=False,
)
self.test_task_queue.apply_async_one(
task=task_NORMAL_priority,
priority='NORMAL',
)
self.test_task_queue.apply_async_one(
task=task_NORMAL_priority,
priority='NORMAL',
)
self.test_task_queue.apply_async_one(
task=task_HIGH_priority,
priority='HIGH',
)
self.test_task_queue.apply_async_one(
task=task_HIGH_priority,
priority='HIGH',
)
self.test_task_queue.apply_async_one(
task=task_NORMAL_priority,
priority='NORMAL',
)
self.test_task_queue.apply_async_one(
task=task_NORMAL_priority,
priority='NORMAL',
)
self.test_task_queue.apply_async_one(
task=task_HIGH_priority,
priority='HIGH',
)
self.test_task_queue.apply_async_one(
task=task_HIGH_priority,
priority='HIGH',
)
self.assertEqual(
first=self.test_task_queue.number_of_enqueued_tasks(
task_name='test_task',
),
second=8,
)
high_priority_tasks = self.test_task_queue.get_tasks(
task_name='test_task',
number_of_tasks=2,
)
high_priority_tasks += self.test_task_queue.get_tasks(
task_name='test_task',
number_of_tasks=2,
)
low_priority_tasks = self.test_task_queue.get_tasks(
task_name='test_task',
number_of_tasks=2,
)
low_priority_tasks += self.test_task_queue.get_tasks(
task_name='test_task',
number_of_tasks=2,
)
low_priority_tasks += self.test_task_queue.get_tasks(
task_name='test_task',
number_of_tasks=2,
)
self.assertEqual(
first=[task_HIGH_priority['kwargs']['priority']] * 4,
second=[task['kwargs']['priority'] for task in high_priority_tasks],
)
self.assertEqual(
first=[task_NORMAL_priority['kwargs']['priority']] * 4,
second=[task['kwargs']['priority'] for task in low_priority_tasks],
)
def test_get_tasks(
self,
):
self.test_task_queue.purge_tasks(
task_name='test_task_one',
)
self.test_task_queue.purge_tasks(
task_name='test_task_two',
)
task_one = self.test_task_queue.craft_task(
task_name='test_task_one',
args=(
1,
),
kwargs={},
report_completion=False,
)
task_two = self.test_task_queue.craft_task(
task_name='test_task_one',
args=(),
kwargs={
'a': 1,
},
report_completion=True,
)
task_three = self.test_task_queue.craft_task(
task_name='test_task_two',
args=(),
kwargs={},
report_completion=True,
)
self.test_task_queue.apply_async_many(
tasks=[
task_one,
task_two,
task_three,
],
priority='NORMAL',
)
tasks_one = self.test_task_queue.get_tasks(
task_name='test_task_one',
number_of_tasks=3,
)
tasks_two = self.test_task_queue.get_tasks(
task_name='test_task_two',
number_of_tasks=1,
)
self.assertIn(
member=task_one,
container=tasks_one,
)
self.assertIn(
member=task_two,
container=tasks_one,
)
self.assertIn(
member=task_three,
container=tasks_two,
)
def test_retry(
self,
):
self.test_task_queue.purge_tasks(
task_name='test_task',
)
task_one = self.test_task_queue.craft_task(
task_name='test_task',
args=(
1,
),
kwargs={},
report_completion=False,
)
self.assertEqual(task_one['run_count'], 0)
self.test_task_queue.apply_async_one(
task=task_one,
priority='NORMAL',
)
task_one = self.test_task_queue.queue.dequeue(
queue_name='test_task',
number_of_items=1,
)[0]
self.test_task_queue.retry(
task=task_one,
)
task_one = self.test_task_queue.queue.dequeue(
queue_name='test_task',
number_of_items=1,
)[0]
self.assertEqual(
first=task_one['run_count'],
second=1,
)
def test_requeue(
self,
):
self.test_task_queue.purge_tasks(
task_name='test_task',
)
task_one = self.test_task_queue.craft_task(
task_name='test_task',
args=(
1,
),
kwargs={},
report_completion=False,
)
self.assertEqual(task_one['run_count'], 0)
self.test_task_queue.apply_async_one(
task=task_one,
priority='NORMAL',
)
task_one = self.test_task_queue.queue.dequeue(
queue_name='test_task',
number_of_items=1,
)[0]
self.test_task_queue.requeue(
task=task_one,
)
task_one = self.test_task_queue.queue.dequeue(
queue_name='test_task',
number_of_items=1,
)[0]
self.assertEqual(
first=task_one['run_count'],
second=0,
)
class RedisSingleServerTaskQueueTestCase(
TaskQueueTestCase,
unittest.TestCase,
):
order_matters = True
def setUp(
self,
):
redis_cluster_connector = connector.redis_cluster.Connector(
nodes=[
{
'host': '127.0.0.1',
'port': 6379,
'password': 'e082ebf6c7fff3997c4bb1cb64d6bdecd0351fa270402d98d35acceef07c6b97',
'database': 0,
},
]
)
test_queue = queue.Queue(
connector=redis_cluster_connector,
encoder=encoder.encoder.Encoder(
compressor_name='dummy',
serializer_name='pickle',
),
)
self.test_task_queue = task_queue.TaskQueue(
queue=test_queue,
)
class RedisClusterSingleServerTaskQueueTestCase(
TaskQueueTestCase,
unittest.TestCase,
):
order_matters = True
def setUp(
self,
):
redis_cluster_connector = connector.redis_cluster.Connector(
nodes=[
{
'host': '127.0.0.1',
'port': 6379,
'password': 'e082ebf6c7fff3997c4bb1cb64d6bdecd0351fa270402d98d35acceef07c6b97',
'database': 0,
},
]
)
test_queue = queue.Queue(
connector=redis_cluster_connector,
encoder=encoder.encoder.Encoder(
compressor_name='dummy',
serializer_name='pickle',
),
)
self.test_task_queue = task_queue.TaskQueue(
queue=test_queue,
)
class RedisClusterMultipleServerTaskQueueTestCase(
TaskQueueTestCase,
unittest.TestCase,
):
order_matters = False
def setUp(
self,
):
redis_cluster_connector = connector.redis_cluster.Connector(
nodes=[
{
'host': '127.0.0.1',
'port': 6379,
'password': 'e082ebf6c7fff3997c4bb1cb64d6bdecd0351fa270402d98d35acceef07c6b97',
'database': 0,
},
{
'host': '127.0.0.1',
'port': 6380,
'password': 'e082ebf6c7fff3997c4bb1cb64d6bdecd0351fa270402d98d35acceef07c6b97',
'database': 0,
},
]
)
test_queue = queue.Queue(
connector=redis_cluster_connector,
encoder=encoder.encoder.Encoder(
compressor_name='dummy',
serializer_name='pickle',
),
)
self.test_task_queue = task_queue.TaskQueue(
queue=test_queue,
)
class MongoTaskQueueTestCase(
TaskQueueTestCase,
unittest.TestCase,
):
order_matters = True
def setUp(
self,
):
mongo_connector = connector.mongo.Connector(
mongodb_uri='mongodb://localhost:27030/',
)
test_queue = queue.Queue(
connector=mongo_connector,
encoder=encoder.encoder.Encoder(
compressor_name='dummy',
serializer_name='pickle',
),
)
self.test_task_queue = task_queue.TaskQueue(
queue=test_queue,
)
class TaskerServerTaskQueueTestCase(
TaskQueueTestCase,
unittest.TestCase,
):
order_matters = True
def setUp(
self,
):
tasker_server_connector = connector.tasker.Connector(
host='127.0.0.1',
port=50001,
)
test_queue = queue.Queue(
connector=tasker_server_connector,
encoder=encoder.encoder.Encoder(
compressor_name='dummy',
serializer_name='pickle',
),
)
self.test_task_queue = task_queue.TaskQueue(
queue=test_queue,
)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler
"""
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from manila import context
from manila import db
from manila import exception
from manila.scheduler import driver
from manila.scheduler import manager
from manila.scheduler import simple
from manila.share import rpcapi as share_rpcapi
from manila import test
from manila import utils
CONF = cfg.CONF
class SchedulerManagerTestCase(test.TestCase):
"""Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
driver_cls_name = 'manila.scheduler.driver.Scheduler'
def setUp(self):
super(SchedulerManagerTestCase, self).setUp()
self.flags(scheduler_driver=self.driver_cls_name)
self.manager = self.manager_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
self.fake_args = (1, 2, 3)
self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
def test_1_correct_init(self):
# Correct scheduler driver
manager = self.manager
self.assertTrue(isinstance(manager.driver, self.driver_cls))
def test_update_service_capabilities(self):
service_name = 'fake_service'
host = 'fake_host'
with mock.patch.object(self.manager.driver,
'update_service_capabilities', mock.Mock()):
self.manager.update_service_capabilities(
self.context, service_name=service_name, host=host)
self.manager.driver.update_service_capabilities.\
assert_called_once_with(service_name, host, {})
with mock.patch.object(self.manager.driver,
'update_service_capabilities', mock.Mock()):
capabilities = {'fake_capability': 'fake_value'}
self.manager.update_service_capabilities(
self.context, service_name=service_name, host=host,
capabilities=capabilities)
self.manager.driver.update_service_capabilities.\
assert_called_once_with(service_name, host, capabilities)
@mock.patch.object(db, 'share_update', mock.Mock())
def test_create_share_exception_puts_share_in_error_state(self):
"""Test that a NoValideHost exception for create_share.
Puts the share in 'error' state and eats the exception.
"""
def raise_no_valid_host(*args, **kwargs):
raise exception.NoValidHost(reason="")
fake_share_id = 1
topic = 'fake_topic'
share_id = fake_share_id
request_spec = {'share_id': fake_share_id}
with mock.patch.object(self.manager.driver,
'schedule_create_share',
mock.Mock(side_effect=raise_no_valid_host)):
self.manager.create_share(self.context, topic, share_id,
request_spec=request_spec,
filter_properties={})
db.share_update.assert_called_once_with(
self.context, fake_share_id, {'status': 'error'})
self.manager.driver.schedule_create_share.assert_called_once_with(
self.context, request_spec, {})
def test_get_pools(self):
"""Ensure get_pools exists and calls driver.get_pools."""
mock_get_pools = self.mock_object(self.manager.driver, 'get_pools',
mock.Mock(return_value='fake_pools'))
result = self.manager.get_pools(self.context, filters='fake_filters')
mock_get_pools.assert_called_once_with(self.context, 'fake_filters')
self.assertEqual('fake_pools', result)
class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
def setUp(self):
super(SchedulerTestCase, self).setUp()
self.driver = self.driver_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
def test_update_service_capabilities(self):
service_name = 'fake_service'
host = 'fake_host'
capabilities = {'fake_capability': 'fake_value'}
with mock.patch.object(self.driver.host_manager,
'update_service_capabilities', mock.Mock()):
self.driver.update_service_capabilities(
service_name, host, capabilities)
self.driver.host_manager.update_service_capabilities.\
assert_called_once_with(service_name, host, capabilities)
def test_hosts_up(self):
service1 = {'host': 'host1'}
service2 = {'host': 'host2'}
services = [service1, service2]
def fake_service_is_up(*args, **kwargs):
if args[0]['host'] == 'host1':
return False
return True
with mock.patch.object(db, 'service_get_all_by_topic',
mock.Mock(return_value=services)):
with mock.patch.object(utils, 'service_is_up',
mock.Mock(side_effect=fake_service_is_up)):
result = self.driver.hosts_up(self.context, self.topic)
self.assertEqual(result, ['host2'])
db.service_get_all_by_topic.assert_called_once_with(
self.context, self.topic)
class SchedulerDriverBaseTestCase(SchedulerTestCase):
"""Test cases for base scheduler driver class methods.
These can't fail if the driver is changed.
"""
def test_unimplemented_schedule(self):
fake_args = (1, 2, 3)
fake_kwargs = {'cat': 'meow'}
self.assertRaises(NotImplementedError, self.driver.schedule,
self.context, self.topic, 'schedule_something',
*fake_args, **fake_kwargs)
class SchedulerDriverModuleTestCase(test.TestCase):
"""Test case for scheduler driver module methods."""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
self.context = context.RequestContext('fake_user', 'fake_project')
@mock.patch.object(db, 'share_update', mock.Mock())
def test_share_host_update_db(self):
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value='fake-now')):
driver.share_update_db(self.context, 31337, 'fake_host')
db.share_update.assert_called_once_with(
self.context, 31337,
{'host': 'fake_host', 'scheduled_at': 'fake-now'})
class SimpleSchedulerSharesTestCase(test.TestCase):
"""Test case for simple scheduler create share method."""
def setUp(self):
super(SimpleSchedulerSharesTestCase, self).setUp()
self.mock_object(share_rpcapi, 'ShareAPI')
self.driver = simple.SimpleScheduler()
self.context = context.RequestContext('fake_user', 'fake_project')
self.admin_context = context.RequestContext('fake_admin_user',
'fake_project')
self.admin_context.is_admin = True
@mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True))
def test_create_share_if_two_services_up(self):
share_id = 'fake'
fake_share = {'id': share_id, 'size': 1}
fake_service_1 = {'disabled': False, 'host': 'fake_host1'}
fake_service_2 = {'disabled': False, 'host': 'fake_host2'}
fake_result = [(fake_service_1, 2), (fake_service_2, 1)]
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
with mock.patch.object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=fake_result)):
with mock.patch.object(driver, 'share_update_db',
mock.Mock(return_value=fake_share)):
self.driver.schedule_create_share(self.context,
fake_request_spec, {})
utils.service_is_up.assert_called_once_with(
utils.IsAMatcher(dict))
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
driver.share_update_db.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
share_id, 'fake_host1')
def test_create_share_if_services_not_available(self):
share_id = 'fake'
fake_share = {'id': share_id, 'size': 1}
fake_result = []
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
with mock.patch.object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=fake_result)):
self.assertRaises(exception.NoValidHost,
self.driver.schedule_create_share,
self.context, fake_request_spec, {})
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
def test_create_share_if_max_gigabytes_exceeded(self):
share_id = 'fake'
fake_share = {'id': share_id, 'size': 10001}
fake_service_1 = {'disabled': False, 'host': 'fake_host1'}
fake_service_2 = {'disabled': False, 'host': 'fake_host2'}
fake_result = [(fake_service_1, 5), (fake_service_2, 7)]
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
with mock.patch.object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=fake_result)):
self.assertRaises(exception.NoValidHost,
self.driver.schedule_create_share,
self.context, fake_request_spec, {})
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
@mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True))
def test_create_share_availability_zone(self):
share_id = 'fake'
fake_share = {
'id': share_id,
'availability_zone': 'fake:fake',
'size': 1,
}
fake_service_1 = {
'disabled': False, 'host': 'fake_host1',
'availability_zone': 'fake',
}
fake_service_2 = {
'disabled': False, 'host': 'fake_host2',
'availability_zone': 'super_fake',
}
fake_result = [(fake_service_1, 0), (fake_service_2, 1)]
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
with mock.patch.object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=fake_result)):
with mock.patch.object(driver, 'share_update_db',
mock.Mock(return_value=fake_share)):
self.driver.schedule_create_share(self.context,
fake_request_spec, {})
utils.service_is_up.assert_called_once_with(fake_service_1)
driver.share_update_db.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_id,
fake_service_1['host'])
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
@mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True))
def test_create_share_availability_zone_on_host(self):
share_id = 'fake'
fake_share = {
'id': share_id,
'availability_zone': 'fake:fake',
'size': 1,
}
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
with mock.patch.object(db, 'service_get_by_args',
mock.Mock(return_value='fake_service')):
with mock.patch.object(driver, 'share_update_db',
mock.Mock(return_value=fake_share)):
self.driver.schedule_create_share(self.admin_context,
fake_request_spec, {})
utils.service_is_up.assert_called_once_with('fake_service')
db.service_get_by_args.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
'fake', 'manila-share')
driver.share_update_db.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_id, 'fake')
@mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=False))
def test_create_share_availability_zone_if_service_down(self):
share_id = 'fake'
fake_share = {
'id': share_id,
'availability_zone': 'fake:fake',
'size': 1,
}
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
with mock.patch.object(db, 'service_get_by_args',
mock.Mock(return_value='fake_service')):
self.assertRaises(exception.WillNotSchedule,
self.driver.schedule_create_share,
self.admin_context, fake_request_spec, {})
utils.service_is_up.assert_called_once_with('fake_service')
db.service_get_by_args.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
'fake', 'manila-share')
|
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from nipype.interfaces.base import CommandLineInputSpec, CommandLine, traits, TraitedSpec, File
from nipype.utils.filemanip import split_filename
import os, os.path as op
from nipype.interfaces.traits_extension import isdefined
class FilterTracksInputSpec(CommandLineInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=-2,
desc='input tracks to be filtered')
include_xor = ['include_file', 'include_spec']
include_file = File(exists=True, argstr='-include %s', desc='inclusion file', xor = include_xor)
include_spec = traits.List(traits.Float, desc='inclusion specification in mm and radius (x y z r)', position=2,
argstr='-include %s', minlen=4, maxlen=4, sep=',', units='mm', xor = include_xor)
exclude_xor = ['exclude_file', 'exclude_spec']
exclude_file = File(exists=True, argstr='-exclude %s', desc='exclusion file', xor = exclude_xor)
exclude_spec = traits.List(traits.Float, desc='exclusion specification in mm and radius (x y z r)', position=2,
argstr='-exclude %s', minlen=4, maxlen=4, sep=',', units='mm', xor = exclude_xor)
minimum_tract_length = traits.Float(argstr='-minlength %s', units='mm',
desc="Sets the minimum length of any track in millimeters (default is 10 mm).")
out_file = File(argstr='%s', position=-1, desc='Output filtered track filename',
name_source=['in_file'], hash_files=False, name_template='%s_filt')
no_mask_interpolation = traits.Bool(argstr='-nomaskinterp', desc="Turns off trilinear interpolation of mask images.")
invert = traits.Bool(argstr='-invert', desc="invert the matching process, so that tracks that would" \
"otherwise have been included are now excluded and vice-versa.")
quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.")
debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.")
class FilterTracksOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='the output filtered tracks')
class FilterTracks(CommandLine):
"""
Use regions-of-interest to select a subset of tracks
from a given MRtrix track file.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> filt = mrt.FilterTracks()
>>> filt.inputs.in_file = 'tracks.tck'
>>> filt.run() # doctest: +SKIP
"""
_cmd = 'filter_tracks'
input_spec=FilterTracksInputSpec
output_spec=FilterTracksOutputSpec
class Tracks2ProbInputSpec(CommandLineInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=-2,
desc='tract file')
template_file = File(exists=True, argstr='-template %s', position=1,
desc='an image file to be used as a template for the output (the output image wil have the same transform and field of view)')
voxel_dims = traits.List(traits.Float, argstr='-vox %s', sep=',', position=2, minlen=3, maxlen=3,
desc='Three comma-separated numbers giving the size of each voxel in mm.')
colour = traits.Bool(argstr='-colour', position=3, desc="add colour to the output image according to the direction of the tracks.")
fraction = traits.Bool(argstr='-fraction', position=3, desc="produce an image of the fraction of fibres through each voxel (as a proportion of the total number in the file), rather than the count.")
output_datatype = traits.Enum("Bit","Int8", "UInt8","Int16", "UInt16","Int32", "UInt32", "float32", "float64", argstr='-datatype %s', position=2,
desc='"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"') #, usedefault=True)
resample = traits.Float(argstr='-resample %d', position=3,
units='mm', desc='resample the tracks at regular intervals using Hermite interpolation. If omitted, the program will select an appropriate interpolation factor automatically.')
out_filename = File(genfile=True, argstr='%s', position= -1, desc='output data file')
class Tracks2ProbOutputSpec(TraitedSpec):
tract_image = File(exists=True, desc='Output tract count or track density image')
class Tracks2Prob(CommandLine):
"""
Convert a tract file into a map of the fraction of tracks to enter
each voxel - also known as a tract density image (TDI) - in MRtrix's
image format (.mif). This can be viewed using MRview or converted to
Nifti using MRconvert.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> tdi = mrt.Tracks2Prob()
>>> tdi.inputs.in_file = 'dwi_CSD_tracked.tck'
>>> tdi.inputs.colour = True
>>> tdi.run() # doctest: +SKIP
"""
_cmd = 'tracks2prob'
input_spec=Tracks2ProbInputSpec
output_spec=Tracks2ProbOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['tract_image'] = self.inputs.out_filename
if not isdefined(outputs['tract_image']):
outputs['tract_image'] = op.abspath(self._gen_outfilename())
else:
outputs['tract_image'] = os.path.abspath(outputs['tract_image'])
return outputs
def _gen_filename(self, name):
if name is 'out_filename':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + '_TDI.mif'
class StreamlineTrackInputSpec(CommandLineInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='the image containing the source data.' \
'The type of data required depends on the type of tracking as set in the preceeding argument. For DT methods, ' \
'the base DWI are needed. For SD methods, the SH harmonic coefficients of the FOD are needed.')
seed_xor = ['seed_file', 'seed_spec']
seed_file = File(exists=True, argstr='-seed %s', desc='seed file', xor = seed_xor)
seed_spec = traits.List(traits.Float, desc='seed specification in mm and radius (x y z r)', position=2,
argstr='-seed %s', minlen=4, maxlen=4, sep=',', units='mm', xor = seed_xor)
include_xor = ['include_file', 'include_spec']
include_file = File(exists=True, argstr='-include %s', desc='inclusion file', xor = include_xor)
include_spec = traits.List(traits.Float, desc='inclusion specification in mm and radius (x y z r)', position=2,
argstr='-include %s', minlen=4, maxlen=4, sep=',', units='mm', xor = include_xor)
exclude_xor = ['exclude_file', 'exclude_spec']
exclude_file = File(exists=True, argstr='-exclude %s', desc='exclusion file', xor = exclude_xor)
exclude_spec = traits.List(traits.Float, desc='exclusion specification in mm and radius (x y z r)', position=2,
argstr='-exclude %s', minlen=4, maxlen=4, sep=',', units='mm', xor = exclude_xor)
mask_xor = ['mask_file', 'mask_spec']
mask_file = File(exists=True, argstr='-mask %s', desc='mask file. Only tracks within mask.', xor = mask_xor)
mask_spec = traits.List(traits.Float, desc='Mask specification in mm and radius (x y z r). Tracks will be terminated when they leave the ROI.', position=2,
argstr='-mask %s', minlen=4, maxlen=4, sep=',', units='mm', xor = mask_xor)
inputmodel = traits.Enum('DT_STREAM', 'SD_PROB', 'SD_STREAM',
argstr='%s', desc='input model type', usedefault=True, position=-3)
stop = traits.Bool(argstr='-stop', desc="stop track as soon as it enters any of the include regions.")
do_not_precompute = traits.Bool(argstr='-noprecomputed', desc="Turns off precomputation of the legendre polynomial values. Warning: this will slow down the algorithm by a factor of approximately 4.")
unidirectional = traits.Bool(argstr='-unidirectional', desc="Track from the seed point in one direction only (default is to track in both directions).")
no_mask_interpolation = traits.Bool(argstr='-nomaskinterp', desc="Turns off trilinear interpolation of mask images.")
step_size = traits.Float(argstr='-step %s', units='mm',
desc="Set the step size of the algorithm in mm (default is 0.2).")
minimum_radius_of_curvature = traits.Float(argstr='-curvature %s', units='mm',
desc="Set the minimum radius of curvature (default is 2 mm for DT_STREAM, 0 for SD_STREAM, 1 mm for SD_PROB and DT_PROB)")
desired_number_of_tracks = traits.Int(argstr='-number %d', desc='Sets the desired number of tracks.' \
'The program will continue to generate tracks until this number of tracks have been selected and written to the output file' \
'(default is 100 for *_STREAM methods, 1000 for *_PROB methods).')
maximum_number_of_tracks = traits.Int(argstr='-maxnum %d', desc='Sets the maximum number of tracks to generate.' \
"The program will not generate more tracks than this number, even if the desired number of tracks hasn't yet been reached" \
'(default is 100 x number).')
minimum_tract_length = traits.Float(argstr='-minlength %s', units='mm',
desc="Sets the minimum length of any track in millimeters (default is 10 mm).")
maximum_tract_length = traits.Float(argstr='-length %s', units='mm',
desc="Sets the maximum length of any track in millimeters (default is 200 mm).")
cutoff_value = traits.Float(argstr='-cutoff %s', units='NA',
desc="Set the FA or FOD amplitude cutoff for terminating tracks (default is 0.1).")
initial_cutoff_value = traits.Float(argstr='-initcutoff %s', units='NA',
desc="Sets the minimum FA or FOD amplitude for initiating tracks (default is twice the normal cutoff).")
initial_direction = traits.List(traits.Int, desc='Specify the initial tracking direction as a vector',
argstr='-initdirection %s', minlen=2, maxlen=2, units='voxels')
out_file = File(argstr='%s', position= -1, name_source = ['in_file'], name_template='%s_tracked.tck',
output_name='tracked', desc='output data file')
class StreamlineTrackOutputSpec(TraitedSpec):
tracked = File(exists=True, desc='output file containing reconstructed tracts')
class StreamlineTrack(CommandLine):
"""
Performs tractography using one of the following models:
'dt_prob', 'dt_stream', 'sd_prob', 'sd_stream',
Where 'dt' stands for diffusion tensor, 'sd' stands for spherical
deconvolution, and 'prob' stands for probabilistic.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> strack = mrt.StreamlineTrack()
>>> strack.inputs.inputmodel = 'SD_PROB'
>>> strack.inputs.in_file = 'data.Bfloat'
>>> strack.inputs.seed_file = 'seed_mask.nii'
>>> strack.inputs.mask_file = 'mask.nii'
>>> strack.cmdline
'streamtrack -mask mask.nii -seed seed_mask.nii SD_PROB data.Bfloat data_tracked.tck'
>>> strack.run() # doctest: +SKIP
"""
_cmd = 'streamtrack'
input_spec = StreamlineTrackInputSpec
output_spec = StreamlineTrackOutputSpec
class DiffusionTensorStreamlineTrackInputSpec(StreamlineTrackInputSpec):
gradient_encoding_file = File(exists=True, argstr='-grad %s', mandatory=True, position=-2,
desc='Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix')
class DiffusionTensorStreamlineTrack(StreamlineTrack):
"""
Specialized interface to StreamlineTrack. This interface is used for
streamline tracking from diffusion tensor data, and calls the MRtrix
function 'streamtrack' with the option 'DT_STREAM'
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> dtstrack = mrt.DiffusionTensorStreamlineTrack()
>>> dtstrack.inputs.in_file = 'data.Bfloat'
>>> dtstrack.inputs.seed_file = 'seed_mask.nii'
>>> dtstrack.run() # doctest: +SKIP
"""
input_spec = DiffusionTensorStreamlineTrackInputSpec
def __init__(self, command=None, **inputs):
inputs["inputmodel"] = "DT_STREAM"
return super(DiffusionTensorStreamlineTrack, self).__init__(command, **inputs)
class ProbabilisticSphericallyDeconvolutedStreamlineTrackInputSpec(StreamlineTrackInputSpec):
maximum_number_of_trials = traits.Int(argstr='-trials %s',
desc="Set the maximum number of sampling trials at each point (only used for probabilistic tracking).")
class ProbabilisticSphericallyDeconvolutedStreamlineTrack(StreamlineTrack):
"""
Performs probabilistic tracking using spherically deconvolved data
Specialized interface to StreamlineTrack. This interface is used for
probabilistic tracking from spherically deconvolved data, and calls
the MRtrix function 'streamtrack' with the option 'SD_PROB'
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> sdprobtrack = mrt.ProbabilisticSphericallyDeconvolutedStreamlineTrack()
>>> sdprobtrack.inputs.in_file = 'data.Bfloat'
>>> sdprobtrack.inputs.seed_file = 'seed_mask.nii'
>>> sdprobtrack.run() # doctest: +SKIP
"""
input_spec = ProbabilisticSphericallyDeconvolutedStreamlineTrackInputSpec
def __init__(self, command=None, **inputs):
inputs["inputmodel"] = "SD_PROB"
return super(ProbabilisticSphericallyDeconvolutedStreamlineTrack, self).__init__(command, **inputs)
class SphericallyDeconvolutedStreamlineTrack(StreamlineTrack):
"""
Performs streamline tracking using spherically deconvolved data
Specialized interface to StreamlineTrack. This interface is used for
streamline tracking from spherically deconvolved data, and calls
the MRtrix function 'streamtrack' with the option 'SD_STREAM'
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> sdtrack = mrt.SphericallyDeconvolutedStreamlineTrack()
>>> sdtrack.inputs.in_file = 'data.Bfloat'
>>> sdtrack.inputs.seed_file = 'seed_mask.nii'
>>> sdtrack.run() # doctest: +SKIP
"""
input_spec = StreamlineTrackInputSpec
def __init__(self, command=None, **inputs):
inputs["inputmodel"] = "SD_STREAM"
return super(SphericallyDeconvolutedStreamlineTrack, self).__init__(command, **inputs)
|
|
# -*- coding: utf-8 -*-
#
# Author: Jonas Berg
# Copyright (c) 2015, Semcon Sweden AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Semcon Sweden AB nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import struct
from . import exceptions
from . import constants
from . import utilities
class CanFrame():
"""
CAN frame with data. Does not know how the signals are laid out etc.
Raises:
CanException: For wrong frame ID. See :exc:`.CanException`.
To find the DLC, use one of::
len(myframe)
len(myframe.frame_data)
"""
def __init__(self, frame_id, frame_data, frame_format=constants.CAN_FRAMEFORMAT_STANDARD):
# Properties #
self.frame_format = frame_format # Must be set before frame_id
self.frame_id = frame_id
self.frame_data = frame_data
def __repr__(self):
datastring = " ".join(["{:02X}".format(y) for y in self.frame_data]) if self.frame_data else ""
return "CAN frame ID: {0} (0x{0:03X}, {1}) data: {2} ({3} bytes)".format(
self.frame_id, self.frame_format, datastring, len(self.frame_data))
def __len__(self):
return len(self.frame_data)
@classmethod
def from_empty_bytes(cls, frame_id, number_of_bytes, frame_format=constants.CAN_FRAMEFORMAT_STANDARD):
"""
Create a :class:`.CanFrame` with empty bytes.
Args:
frame_id (int): CAN frame ID number
number_of_bytes (int): number of empty data bytes to initialize the frame with.
frame_format (str): Frame format. Should be ``'standard'`` or ``'extended'``. Defaults to standard frame format.
"""
try:
number_of_bytes = int(number_of_bytes)
except (ValueError, TypeError) as _:
raise exceptions.CanException("number_of_bytes should be an integer. Given: {!r}".format(number_of_bytes))
if (number_of_bytes > constants.MAX_NUMBER_OF_CAN_DATA_BYTES) or (number_of_bytes < 0):
raise exceptions.CanException("Wrong number of number_of_bytes given: {!r}".format(number_of_bytes))
framedata = constants.NULL_BYTE * number_of_bytes
return cls(frame_id, framedata, frame_format)
@classmethod
def from_rawframe(cls, rawframe):
"""
Create a :class:`.CanFrame` from a raw frame from the SocketCAN interface.
Args:
rawframe (bytes): 16 bytes long, includes frame ID, frame format etc
"""
try:
first_part, dlc, framedata8bytes = struct.unpack(constants.FORMAT_CAN_RAWFRAME, rawframe)
except struct.error as err:
raise exceptions.CanException("rawframe is illegal. Given: {!r}. Error: {}".format(rawframe, err))
frame_id = first_part & constants.CAN_MASK_ID_ONLY
frame_format = constants.CAN_FRAMEFORMAT_EXTENDED if first_part & constants.CAN_MASK_EXTENDED_FRAME_BIT \
else constants.CAN_FRAMEFORMAT_STANDARD
framedata = framedata8bytes[:dlc]
# is_remote_request = bool(first_part & constants.CAN_MASK_REMOTE_REQUEST_BIT)
# is_error_frame = bool(first_part & constants.CAN_MASK_ERROR_BIT)
return cls(frame_id, framedata, frame_format)
@property
def frame_id(self):
"""
*int* CAN frame ID number
"""
return self._frame_id
@frame_id.setter
def frame_id(self, value):
utilities.check_frame_id_and_format(value, self.frame_format)
self._frame_id = value
@property
def frame_data(self):
"""
*bytes object* 0-8 bytes of CAN data
"""
return self._frame_data
@frame_data.setter
def frame_data(self, value):
if value is None:
raise exceptions.CanException("frame_data should not be None")
try:
value = bytes(value)
except TypeError:
raise exceptions.CanException("frame_data should be bytes. Given: {!r}".format(value))
if len(value) > constants.MAX_NUMBER_OF_CAN_DATA_BYTES:
raise exceptions.CanException("The frame_data has wrong length: {!r}".format(value))
self._frame_data = value
@property
def frame_format(self):
"""
*str* Frame format. Should be ``'standard'`` or ``'extended'``. Defaults to standard frame format.
"""
return self._frame_format
@frame_format.setter
def frame_format(self, value):
if value not in [constants.CAN_FRAMEFORMAT_STANDARD, constants.CAN_FRAMEFORMAT_EXTENDED]:
raise exceptions.CanException("Wrong frame_format. Given: {!r}".format(value))
self._frame_format = value
def get_signalvalue(self, signaldefinition):
"""
Extract a signal value from the frame.
Args:
signaldefinition (:class:`.CanSignalDefinition` object): The definition of the signal
Returns:
The extracted signal physical value (numerical).
"""
if signaldefinition.signaltype == constants.CAN_SIGNALTYPE_DOUBLE:
if signaldefinition.endianness == constants.LITTLE_ENDIAN:
unpacked_value = struct.unpack(constants.FORMAT_FLOAT_DOUBLE_LITTLE_ENDIAN, self.frame_data)[0]
else:
unpacked_value = struct.unpack(constants.FORMAT_FLOAT_DOUBLE_BIG_ENDIAN, self.frame_data)[0]
else:
bus_value = utilities.get_busvalue_from_bytes(self.frame_data,
signaldefinition.endianness,
signaldefinition.numberofbits,
signaldefinition.startbit)
# Unpack from signal type
if signaldefinition.signaltype == constants.CAN_SIGNALTYPE_UNSIGNED:
unpacked_value = bus_value
elif signaldefinition.signaltype == constants.CAN_SIGNALTYPE_SIGNED:
unpacked_value = utilities.from_twos_complement(bus_value, signaldefinition.numberofbits)
else: # CAN_SIGNALTYPE_SINGLE:
useful_bytes = struct.pack(constants.FORMAT_DATA_4BYTES_INT, bus_value) # Create 'bytes' of length 4
unpacked_value = struct.unpack(constants.FORMAT_FLOAT_SINGLE_BIG_ENDIAN, useful_bytes)[0]
physical_value = (unpacked_value * signaldefinition.scalingfactor) + signaldefinition.valueoffset
# Limit to minvalue and maxvalue
if signaldefinition.minvalue is not None:
physical_value = max(signaldefinition.minvalue, physical_value)
if signaldefinition.maxvalue is not None:
physical_value = min(signaldefinition.maxvalue, physical_value)
return physical_value
def set_signalvalue(self, signaldefinition, physical_value=None):
"""
Set a signal physical_value in the frame.
Args:
signaldefinition (:class:`.CanSignalDefinition` object): The definition of the signal
physical_value (numerical): The physical_value (numerical) of the signal.
If the physical_value not is given, the default physical_value for the *signaldefinition* is used.
Raises:
CanException: For wrong startbit or values. See :exc:`.CanException`.
"""
if signaldefinition.get_minimum_dlc() > len(self):
raise exceptions.CanException('The frame is too short to send_frame this signal. Frame: {}, signal: {}'.
format(self, signaldefinition))
if physical_value is None:
physical_value = signaldefinition.defaultvalue
if physical_value < signaldefinition.get_minimum_possible_value() or \
physical_value > signaldefinition.get_maximum_possible_value():
raise exceptions.CanException('The physical value is out of range. Value: {}, range {} to {}'.format(
physical_value,
signaldefinition.get_minimum_possible_value(),
signaldefinition.get_maximum_possible_value()))
# Limit to minvalue and maxvalue
if signaldefinition.minvalue is not None:
physical_value = max(signaldefinition.minvalue, physical_value)
if signaldefinition.maxvalue is not None:
physical_value = min(signaldefinition.maxvalue, physical_value)
# Scale according to valueoffset and scalingfactor
scaled_value = float((physical_value - signaldefinition.valueoffset) / signaldefinition.scalingfactor)
# Shortcut for double precision floats (occupies full frame)
if signaldefinition.signaltype == constants.CAN_SIGNALTYPE_DOUBLE:
if signaldefinition.endianness == constants.LITTLE_ENDIAN:
self.frame_data = struct.pack(constants.FORMAT_FLOAT_DOUBLE_LITTLE_ENDIAN, scaled_value)
else:
self.frame_data = struct.pack(constants.FORMAT_FLOAT_DOUBLE_BIG_ENDIAN, scaled_value)
return
# Encode to correct signaltype
if signaldefinition.signaltype == constants.CAN_SIGNALTYPE_UNSIGNED:
bus_value = int(scaled_value)
elif signaldefinition.signaltype == constants.CAN_SIGNALTYPE_SIGNED:
bus_value = utilities.twos_complement(int(scaled_value), signaldefinition.numberofbits)
else: # CAN_SIGNALTYPE_SINGLE:
bus_value = utilities.can_bytes_to_int(
struct.pack(constants.FORMAT_FLOAT_SINGLE_BIG_ENDIAN, scaled_value).
rjust(constants.MAX_NUMBER_OF_CAN_DATA_BYTES, constants.NULL_BYTE))
# Limit the size of the field to be written
assert bus_value <= (2 ** signaldefinition.numberofbits - 1), "Trying to set too large signal value to frame."
assert bus_value >= 0, "Trying to set too small signal value to the frame."
bitvalues = utilities.get_shiftedvalue_from_busvalue(bus_value,
signaldefinition.endianness,
signaldefinition.numberofbits,
signaldefinition.startbit)
raw_mask = ((1 << signaldefinition.numberofbits) - 1) # Mask with ones in 'numberofbits' positions
mask = utilities.get_shiftedvalue_from_busvalue(raw_mask,
signaldefinition.endianness,
signaldefinition.numberofbits,
signaldefinition.startbit)
# Parse existing frame_data
dataint = utilities.can_bytes_to_int(self.frame_data)
dlc = len(self.frame_data)
# Modify the frame_data by writing zeros to the appropriate field (using bitwise AND),
# then writing in the relevant data (by using bitwise OR)
dataint = (dataint & ~mask) | bitvalues
self.frame_data = utilities.int_to_can_bytes(dlc, dataint)
def unpack(self, frame_definitions):
"""Unpack the CAN frame, and return all signal values.
Args:
frame_definitions (dict): The keys are frame_id (int) and
the items are :class:`.CanFrameDefinition` objects.
Raises:
CanException: For wrong DLC. See :exc:`.CanException`.
Returns:
A dictionary of signal values. The keys are the signalname (str) and the items are the values (numerical).
If the frame not is described in the 'frame_definitions', an empty dictionary is returned.
"""
try:
fr_def = frame_definitions[self.frame_id]
except KeyError:
return {}
if len(self.frame_data) != fr_def.dlc:
raise exceptions.CanException('The received frame has wrong length: {}, Def: {}'.format(self, fr_def))
outputdict = {}
for sigdef in fr_def.signaldefinitions:
val = self.get_signalvalue(sigdef)
outputdict[sigdef.signalname] = val
return outputdict
def get_rawframe(self):
"""Returns a 16 bytes long 'bytes' object."""
dlc = len(self.frame_data)
framedata8bytes = self.frame_data.ljust(constants.MAX_NUMBER_OF_CAN_DATA_BYTES, constants.NULL_BYTE)
# Set flag for extended frame format, if necessary
first_part = self.frame_id
if self.frame_format == constants.CAN_FRAMEFORMAT_EXTENDED:
first_part |= constants.CAN_MASK_EXTENDED_FRAME_BIT
return struct.pack(constants.FORMAT_CAN_RAWFRAME, first_part, dlc, framedata8bytes)
def get_descriptive_ascii_art(self):
"""Create a visual indication of the frame data
Returns:
A multi-line string.
"""
text = "{!r} \n".format(self)
text += utilities.generate_can_integer_overview(utilities.can_bytes_to_int(self.frame_data))
return text
|
|
# -*- coding: utf-8 -*-
#
# django-codenerix
#
# Copyright 2017 Centrologic Computational Logistic Center S.L.
#
# Project URL : http://www.codenerix.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.template import Library
from django.urls import reverse
from django.utils.encoding import smart_text
from django.utils.safestring import mark_safe
from django.core.exceptions import ValidationError
from django.utils import formats
from django.conf import settings
from codenerix.djng.angular_base import TupleErrorList
from codenerix.helpers import model_inspect
register = Library()
@register.filter
def widgetize(i):
# Initialize structure
attrs = i.__dict__.get("field", {}).__dict__.get("widget", {}).__dict__.get('attrs', {})
# Select
# if 'choices' in i.field.widget.__dict__:
#
# # Set classes for select2 inputs to look properly with and without foreignkeys link button
# if foreignkey(i,""):
# addattr(attrs,"class=select_fk")
# else:
# addattr(attrs,"class=select_nofk")
# # Add a new attribute for ui-select to work
# addattr(attrs,"ui-select2")
# Return result
return attrs
@register.filter
def istype(i, kind):
# Get widget
widget = i.field.widget
# Get format type
if ('format_key' in type(widget).__dict__):
ftype = type(widget).format_key
else:
ftype = None
# Choose kind
if kind == 'datetime':
if ftype == 'DATETIME_INPUT_FORMATS':
answer = 'DATETIME_INPUT_FORMATS'
elif ftype == 'DATE_INPUT_FORMATS':
answer = 'DATE_INPUT_FORMATS'
elif ftype == 'TIME_INPUT_FORMATS':
answer = 'TIME_INPUT_FORMAT'
else:
answer = False
elif kind == 'date2time':
answer = 'DATE_INPUT_FORMATS'
elif kind == 'color':
answer = (ngmodel(i) == 'color')
else:
raise IOError("Unknown type '{0}' in 'istype' filter".format(kind))
# Return answer
return answer
@register.filter
def addextra(attrs, attr):
if attr:
for at in attr:
addattr(attrs, at)
# Return result
return attrs
@register.filter
def addattr(attrs, attr):
# Split the new attr into key/value pair
attrsp = attr.split("=")
key = attrsp[0]
if len(attrsp) >= 2:
value = "=".join(attrsp[1:])
else:
value = ""
if key in attrs:
# Key already exists in the attrs struct
if attrs[key]:
# Key has a value already inside the structure
if value:
# We got a new value to add to the struct, append it
attrs[key] += " {0}".format(value)
else:
# Key doesn't have a value inside the structure
if value:
# We got a new value to add eo the struct, add it
attrs[key] += value
else:
# Add the new key
attrs[key] = value
# Return result
return attrs
@register.filter
def lockattr(attrs, cannot_update):
if cannot_update:
if "ui-select2" in attrs:
attrs.pop("ui-select2")
newattrs = addattr(attrs, "readonly='readonly'")
return addattr(newattrs, "disabled='disabled'")
else:
return attrs
@register.filter
def setattrs(field, attrs):
if attrs:
return field.as_widget(attrs=attrs)
else:
return field
@register.filter
def ngmodel(i):
return getattr(i.field.widget, 'field_name', i.field.widget.attrs['ng-model'])
@register.filter
def inireadonly(attrs, i):
field = ngmodel(i)
return addattr(attrs, 'ng-readonly=readonly_{0}'.format(field))
@register.filter
def date2timewidget(i, langcode):
return datewidget(i, langcode, 'date2time')
@register.filter
def datewidget(i, langcode, kindtype='datetime', kind=None):
# Initialization
final = {}
form = formats.get_format('DATETIME_INPUT_FORMATS', lang=langcode)[0].replace("%", "").replace('d', 'dd').replace('m', 'mm').replace('Y', 'yyyy').replace('H', 'hh').replace('M', 'ii').replace('S', 'ss')
if kind is None:
kind = istype(i, kindtype)
if kind == 'DATETIME_INPUT_FORMATS':
final['format'] = form
final['startview'] = 2
final['minview'] = 0
final['maxview'] = 4
final['icon'] = 'calendar'
elif (kind == 'DATE_INPUT_FORMATS') or (kind == 'date'):
final['format'] = form.split(" ")[0]
final['startview'] = 2
final['minview'] = 2
final['maxview'] = 4
final['icon'] = 'calendar'
elif kind == 'TIME_INPUT_FORMAT':
final['format'] = form.split(" ")[1]
final['startview'] = 1
final['minview'] = 0
final['maxview'] = 1
final['icon'] = 'time'
else:
raise IOError("Unknown kind '{0}' in filter 'datewidget'".format(kind))
# Return result
return final
@register.filter
def unlist(elements):
# Remake the tuple
newtuple = TupleErrorList()
# Process each errror
for error in elements:
# Split errors
(f1, f2, f3, f4, f5, msg) = error
if type(msg) == ValidationError:
newmsg = ""
for error in msg:
if newmsg:
newmsg += " {0}".format(error)
else:
newmsg = error
# Save new msg
msg = newmsg
# Save error with converted text
newtuple.append((f1, f2, f3, f4, f5, msg))
# Return the newtuple
return newtuple
@register.filter
def foreignkey(element, exceptions):
'''
function to determine if each select field needs a create button or not
'''
label = element.field.__dict__['label']
try:
label = unicode(label)
except NameError:
pass
if (not label) or (label in exceptions):
return False
else:
return "_queryset" in element.field.__dict__
@register.filter
def headstyle(group):
# Initialize
style = ""
# Decide about colors
if 'color' in group and group['color']:
style += "color:{0};".format(group['color'])
if 'bgcolor' in group and group['bgcolor']:
style += "background-color:{0};".format(group['bgcolor'])
if 'textalign' in group and group['textalign']:
style += "text-align:{0};".format(group['textalign'])
# Check if we have some style
if style:
return "style={0}".format(style)
else:
return ""
class ColumnCounter:
def __init__(self):
self.__columns = 0
def add(self, columns):
# Control columns
if self.__columns == 12:
self.__columns = 0
answer = True
elif self.__columns > 12:
raise IOError("Columns max number of 12 reached, you requested to use a total of '{}'".format(self.__columns))
else:
answer = False
# Add new columns
self.__columns += columns
# Return answer
return answer
@register.filter
def column_counter(nothing):
return ColumnCounter()
@register.filter
def add_columns(obj, columns):
return obj.add(columns)
@register.filter
def linkedinfo(element, info_input={}):
info = model_inspect(element.field._get_queryset().model())
info.update(info_input)
ngmodel = element.html_name # field.widget.attrs['ng-model']
return mark_safe("'{0}','{1}','{2}', '{3}s'".format(
getattr(settings, 'BASE_URL', ''),
ngmodel,
info['appname'],
info['modelname'].lower())
)
# DEPRECATED: 2017-02-14
@register.filter
def get_depa(queryset, kind):
return queryset.get(kind=kind, alternative=False)
@register.filter
def getws(form, input_name):
if 'autofill' in form.Meta.__dict__ and input_name in form.Meta.autofill:
return "'{}'".format(reverse(form.Meta.autofill[input_name][2], kwargs={'search': '__pk__'}))
else:
return 'undefined'
@register.filter
def get_field_list(forms):
inputs = []
for form in forms:
for field in form.fields:
inputs.append("'{}'".format(field))
if inputs:
inputs = "[{}]".format(','.join(inputs))
return inputs
@register.filter
def invalidator(formname, inp):
return mark_safe("{{'codenerix_invalid':{0}.{1}.$invalid}}".format(smart_text(formname), ngmodel(inp)))
@register.filter
def join_list(l, string):
if l:
return string.join(l)
else:
return ''
|
|
# Copyright (c) 2017 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from apicapi import apic_client
from aim.agent.aid.universes.aci.converters import utils
from aim.api import service_graph
def _dn(mo_type_name, *dn_attrs):
mo = apic_client.ManagedObjectClass(mo_type_name)
return mo.dn(*dn_attrs)
def _aci_obj(mo_type_name, *dn_attrs, **attrs):
obj_attrs = {'dn': _dn(mo_type_name, *dn_attrs)}
obj_attrs.update(attrs)
return {mo_type_name: {'attributes': obj_attrs}}
def device_cluster_converter(object_dict, otype, helper,
source_identity_attributes,
destination_identity_attributes, to_aim=True):
if to_aim:
result = utils.default_converter(object_dict, otype, helper,
source_identity_attributes,
destination_identity_attributes,
to_aim=to_aim)
else:
result = utils.default_converter(object_dict, otype, helper,
source_identity_attributes,
destination_identity_attributes,
to_aim=to_aim)
if object_dict['encap']:
lif = service_graph.DeviceClusterInterface(
tenant_name=object_dict['tenant_name'],
device_cluster_name=object_dict['name'],
name='interface',
encap=object_dict['encap'])
result.append(lif)
else:
lif = None
nodes = []
for node in object_dict['devices']:
if 'name' in node:
cdev = service_graph.ConcreteDevice(
tenant_name=object_dict['tenant_name'],
device_cluster_name=object_dict['name'],
name=node['name'])
cdev_if = service_graph.ConcreteDeviceInterface(
tenant_name=object_dict['tenant_name'],
device_cluster_name=object_dict['name'],
device_name=node['name'],
name='interface')
if 'path' in node:
cdev_if.path = node['path']
nodes.extend([cdev, cdev_if])
if lif:
lif.concrete_interfaces.append(cdev_if.dn)
result.extend(nodes)
return result
def device_cluster_context_converter(object_dict, otype, helper,
source_identity_attributes,
destination_identity_attributes,
to_aim=True):
if to_aim:
result = utils.default_converter(object_dict, otype, helper,
source_identity_attributes,
destination_identity_attributes,
to_aim=to_aim)
else:
object_dict1 = copy.copy(object_dict)
if not object_dict1['device_cluster_tenant_name']:
object_dict1['device_cluster_tenant_name'] = (
object_dict1['tenant_name'])
result = utils.default_converter(object_dict1, otype, helper,
source_identity_attributes,
destination_identity_attributes,
to_aim=to_aim)
cons_ctx = service_graph.DeviceClusterInterfaceContext(
tenant_name=object_dict['tenant_name'],
contract_name=object_dict['contract_name'],
service_graph_name=object_dict['service_graph_name'],
node_name=object_dict['node_name'],
connector_name='consumer')
if object_dict['device_cluster_name']:
cons_ctx.device_cluster_interface_dn = _dn(
'vnsLIf',
(object_dict['device_cluster_tenant_name'] or
cons_ctx.tenant_name),
object_dict['device_cluster_name'],
'interface')
if object_dict['bridge_domain_name']:
cons_ctx.bridge_domain_dn = _dn(
'fvBD',
(object_dict['bridge_domain_tenant_name'] or
cons_ctx.tenant_name),
object_dict['bridge_domain_name'])
if object_dict['service_redirect_policy_name']:
cons_ctx.service_redirect_policy_dn = _dn(
'vnsSvcRedirectPol',
(object_dict['service_redirect_policy_tenant_name'] or
cons_ctx.tenant_name),
object_dict['service_redirect_policy_name'])
if (cons_ctx.device_cluster_interface_dn and
cons_ctx.bridge_domain_dn and
cons_ctx.service_redirect_policy_dn):
prov_ctx = copy.copy(cons_ctx)
prov_ctx.connector_name = 'provider'
result.extend([cons_ctx, prov_ctx])
return result
def service_graph_converter(object_dict, otype, helper,
source_identity_attributes,
destination_identity_attributes, to_aim=True):
if to_aim:
result = utils.default_converter(object_dict, otype, helper,
source_identity_attributes,
destination_identity_attributes,
to_aim=to_aim)
else:
result = utils.default_converter(object_dict, otype, helper,
source_identity_attributes,
destination_identity_attributes,
to_aim=to_aim)
tn = object_dict['tenant_name']
gr = object_dict['name']
term_cons = _aci_obj('vnsAbsTermConn__Con', tn, gr, 'T1')
term_prov = _aci_obj('vnsAbsTermConn__Prov', tn, gr, 'T2')
result.extend([
_aci_obj('vnsAbsTermNodeCon', tn, gr, 'T1'),
term_cons,
_aci_obj('vnsInTerm__Con', tn, gr, 'T1'),
_aci_obj('vnsOutTerm__Con', tn, gr, 'T1'),
_aci_obj('vnsAbsTermNodeProv', tn, gr, 'T2'),
term_prov,
_aci_obj('vnsInTerm__Prov', tn, gr, 'T2'),
_aci_obj('vnsOutTerm__Prov', tn, gr, 'T2')
])
lc_nodes = [n for n in object_dict['linear_chain_nodes']
if n.get('name')]
prev_conn = list(term_cons.values())[0]['attributes']['dn']
cntr = 0
for fn in lc_nodes:
cntr = cntr + 1
node = service_graph.ServiceGraphNode(
tenant_name=tn, service_graph_name=gr, name=fn['name'],
managed=False, routing_mode='Redirect',
sequence_number=str(cntr - 1),
connectors=['consumer', 'provider'])
if fn.get('device_cluster_name'):
node.device_cluster_name = fn['device_cluster_name']
node.device_cluster_tenant_name = (
fn.get('device_cluster_tenant_name', tn))
node_con = _dn('vnsAbsFuncConn', tn, gr, node.name, 'consumer')
node_prov = _dn('vnsAbsFuncConn', tn, gr, node.name, 'provider')
cxn = service_graph.ServiceGraphConnection(
tenant_name=tn, service_graph_name=gr, name='C%s' % cntr,
unicast_route=True,
connector_dns=[prev_conn, node_con])
prev_conn = node_prov
result.extend([node, cxn])
if cntr:
cxn = service_graph.ServiceGraphConnection(
tenant_name=tn, service_graph_name=gr, name='C%s' % (cntr + 1),
unicast_route=True,
connector_dns=[
prev_conn,
list(term_prov.values())[0]['attributes']['dn']])
result.append(cxn)
return result
def vnsRsRedirectHealthGroup_ip_converter(input_dict, input_attr, to_aim=True):
if to_aim:
return utils.default_identity_converter(
input_dict, 'vnsRsRedirectHealthGroup', {})[-1]
else:
return {utils.IGNORE: utils.default_attribute_converter(
input_dict, input_attr, to_aim=to_aim)}
vnsRsALDevToPhysDomP_converter = utils.dn_decomposer(
['physical_domain_name'], 'physDomP')
vnsRsALDevToDomP_converter = utils.dn_decomposer(
['vmm_domain_type', 'vmm_domain_name'], 'vmmDomP')
vnsRsCIfAttN_converter = utils.child_list('concrete_interfaces', 'tDn')
vnsRsCIfPathAtt_converter = utils.child_list('path', 'tDn')
vnsAbsFuncConn_converter = utils.child_list('connectors', 'name')
vnsLDevVip_dn_decomposer = utils.dn_decomposer(
['device_cluster_tenant_name', 'device_cluster_name'],
'vnsLDevVip')
vnsRsAbsConnectionConns_converter = utils.child_list('connector_dns', 'tDn')
vnsRedirectDest_converter = utils.list_dict(
'destinations',
{'ip': {'other': 'ip'},
'mac': {'other': 'mac',
'converter': utils.upper},
'name': {'other': 'destName'}},
['ip'])
vnsRsRedirectHealthGroup_converter = utils.list_dict(
'destinations',
{'redirect_health_group_dn': {'other': 'tDn'},
'ip': {'other': 'dn',
'converter': vnsRsRedirectHealthGroup_ip_converter}},
['ip'], requires=['redirect_health_group_dn'])
vnsRsIPSLAMonitoringPol_converter = utils.tdn_rs_converter(
['monitoring_policy_tenant_name', 'monitoring_policy_name'],
'fvIPSLAMonitoringPol')
resource_map = {
'vnsLDevVip': [{
'resource': service_graph.DeviceCluster,
'skip': ['physical_domain_name', 'encap', 'devices',
'vmm_domain_name', 'vmm_domain_type'],
'exceptions': {
'managed': {'converter': utils.boolean},
'devtype': {'other': 'device_type'},
'svcType': {'other': 'service_type'}
},
'converter': device_cluster_converter,
}],
'vnsRsALDevToPhysDomP': [{
'resource': service_graph.DeviceCluster,
'exceptions': {'tDn': {'other': 'physical_domain_name',
'converter': vnsRsALDevToPhysDomP_converter}},
'to_resource': utils.default_to_resource_strict,
}],
'vnsRsALDevToDomP': [{
'resource': service_graph.DeviceCluster,
'exceptions': {'tDn': {'other': 'vmm_domain_name',
'converter': vnsRsALDevToDomP_converter,
'skip_if_empty': True}},
'to_resource': utils.default_to_resource_strict,
}],
'vnsLIf': [{
'resource': service_graph.DeviceClusterInterface,
'skip': ['concrete_interfaces'],
'alt_resource': service_graph.DeviceCluster
}],
'vnsRsCIfAttN': [{
'resource': service_graph.DeviceClusterInterface,
'converter': vnsRsCIfAttN_converter,
'alt_resource': service_graph.DeviceCluster
}],
'vnsCDev': [{
'resource': service_graph.ConcreteDevice,
'alt_resource': service_graph.DeviceCluster
}],
'vnsCIf': [{
'resource': service_graph.ConcreteDeviceInterface,
'skip': ['path', 'host'],
'alt_resource': service_graph.DeviceCluster
}],
'vnsRsCIfPathAtt': [{
'resource': service_graph.ConcreteDeviceInterface,
'exceptions': {'tDn': {'other': 'path'}},
'skip': ['host'],
'to_resource': utils.default_to_resource_strict,
'alt_resource': service_graph.DeviceCluster
}],
'vnsAbsGraph': [{
'resource': service_graph.ServiceGraph,
'converter': service_graph_converter,
'skip': ['linear_chain_nodes']
}],
'vnsAbsNode': [{
'resource': service_graph.ServiceGraphNode,
'exceptions': {
'funcType': {'other': 'function_type'},
'managed': {'converter': utils.boolean},
},
'skip': ['connectors', 'device_cluster_name',
'device_cluster_tenant_name'],
'alt_resource': service_graph.ServiceGraph,
}],
'vnsAbsFuncConn': [{
'resource': service_graph.ServiceGraphNode,
'converter': vnsAbsFuncConn_converter,
'alt_resource': service_graph.ServiceGraph,
}],
'vnsRsNodeToLDev': [{
'resource': service_graph.ServiceGraphNode,
'exceptions': {
'tDn': {'other': 'device_cluster_name',
'converter': vnsLDevVip_dn_decomposer},
},
'to_resource': utils.default_to_resource_strict,
'alt_resource': service_graph.ServiceGraph,
}],
'vnsAbsConnection': [{
'resource': service_graph.ServiceGraphConnection,
'exceptions': {
'adjType': {'other': 'adjacency_type'},
'connDir': {'other': 'connector_direction'},
'connType': {'other': 'connector_type'},
'directConnect': {'converter': utils.boolean},
'unicastRoute': {'converter': utils.boolean},
},
'skip': ['connector_dns'],
'alt_resource': service_graph.ServiceGraph,
}],
'vnsRsAbsConnectionConns': [{
'resource': service_graph.ServiceGraphConnection,
'converter': vnsRsAbsConnectionConns_converter,
'alt_resource': service_graph.ServiceGraph,
}],
'vnsSvcRedirectPol': [{
'resource': service_graph.ServiceRedirectPolicy,
'skip': ['destinations', 'monitoring_policy_tenant_name',
'monitoring_policy_name'],
}],
'vnsRedirectDest': [{
'resource': service_graph.ServiceRedirectPolicy,
'converter': vnsRedirectDest_converter,
}],
'vnsRsIPSLAMonitoringPol': [{
'resource': service_graph.ServiceRedirectPolicy,
'converter': vnsRsIPSLAMonitoringPol_converter,
'to_resource': utils.default_to_resource_strict
}],
'vnsRsRedirectHealthGroup': [{
'resource': service_graph.ServiceRedirectPolicy,
'converter': vnsRsRedirectHealthGroup_converter,
}],
'vnsLDevCtx': [{
'resource': service_graph.DeviceClusterContext,
'converter': device_cluster_context_converter,
'skip': ['device_cluster_name', 'device_cluster_tenant_name',
'service_redirect_policy_name',
'service_redirect_policy_tenant_name',
'bridge_domain_name', 'bridge_domain_tenant_name'],
}],
'vnsRsLDevCtxToLDev': [{
'resource': service_graph.DeviceClusterContext,
'exceptions': {
'tDn': {'other': 'device_cluster_name',
'converter': vnsLDevVip_dn_decomposer},
},
'to_resource': utils.default_to_resource_strict,
'converter': device_cluster_context_converter,
}],
'vnsLIfCtx': [{
'resource': service_graph.DeviceClusterInterfaceContext,
'skip': ['device_cluster_interface_dn',
'service_redirect_policy_dn',
'bridge_domain_dn'],
'alt_resource': service_graph.DeviceClusterContext
}],
'vnsRsLIfCtxToSvcRedirectPol': [{
'resource': service_graph.DeviceClusterInterfaceContext,
'exceptions': {
'tDn': {'other': 'service_redirect_policy_dn'},
},
'to_resource': utils.default_to_resource_strict,
'alt_resource': service_graph.DeviceClusterContext
}],
'vnsRsLIfCtxToBD': [{
'resource': service_graph.DeviceClusterInterfaceContext,
'exceptions': {
'tDn': {'other': 'bridge_domain_dn'},
},
'to_resource': utils.default_to_resource_strict,
'alt_resource': service_graph.DeviceClusterContext
}],
'vnsRsLIfCtxToLIf': [{
'resource': service_graph.DeviceClusterInterfaceContext,
'exceptions': {
'tDn': {'other': 'device_cluster_interface_dn'},
},
'to_resource': utils.default_to_resource_strict,
'alt_resource': service_graph.DeviceClusterContext
}],
'fvIPSLAMonitoringPol': [{
'resource': service_graph.ServiceRedirectMonitoringPolicy,
'exceptions': {
'slaPort': {'other': 'tcp_port'},
'slaType': {'other': 'type'},
'slaFrequency': {'other': 'frequency'}
},
}],
'vnsRedirectHealthGroup': [{
'resource': service_graph.ServiceRedirectHealthGroup,
}]
}
resource_map_post_reverse = {
'vnsAbsTermNodeCon': [{
'resource': service_graph.ServiceGraph,
'skip': ['display_name', 'name_alias'],
'to_resource': utils.default_to_resource_strict,
}],
'vnsAbsTermConn__Con': [{
'resource': service_graph.ServiceGraph,
'skip': ['display_name', 'name_alias'],
'to_resource': utils.default_to_resource_strict
}],
'vnsInTerm__Con': [{
'resource': service_graph.ServiceGraph,
'skip': ['display_name', 'name_alias'],
'to_resource': utils.default_to_resource_strict
}],
'vnsOutTerm__Con': [{
'resource': service_graph.ServiceGraph,
'skip': ['display_name', 'name_alias'],
'to_resource': utils.default_to_resource_strict
}],
'vnsAbsTermNodeProv': [{
'resource': service_graph.ServiceGraph,
'skip': ['display_name', 'name_alias'],
'to_resource': utils.default_to_resource_strict,
}],
'vnsAbsTermConn__Prov': [{
'resource': service_graph.ServiceGraph,
'skip': ['display_name', 'name_alias'],
'to_resource': utils.default_to_resource_strict
}],
'vnsInTerm__Prov': [{
'resource': service_graph.ServiceGraph,
'skip': ['display_name', 'name_alias'],
'to_resource': utils.default_to_resource_strict
}],
'vnsOutTerm__Prov': [{
'resource': service_graph.ServiceGraph,
'skip': ['display_name', 'name_alias'],
'to_resource': utils.default_to_resource_strict
}],
}
|
|
import json
import pytest
import responses
from koordinates import Set, Client, Group, Publish
from .response_data.responses_3 import (
sets_single_good_simulated_response,
sets_new_draft_good_simulated_response,
sets_single_draft_good_simulated_response,
sets_multi_version_good_simulated_response,
sets_single_version_good_simulated_response,
sets_publish_version_good_simulated_response,
)
from .response_data.responses_4 import sets_multiple_good_simulated_response
@pytest.fixture
def client():
return Client("test.koordinates.com", token="test")
@responses.activate
def test_get_set_by_id(client):
the_response = sets_single_good_simulated_response
responses.add(
responses.GET,
client.get_url("SET", "GET", "single", {"id": 1474}),
body=the_response,
status=200,
content_type="application/json",
)
obj = client.sets.get(1474)
assert isinstance(obj, Set)
assert obj.title == "Ultra Fast Broadband Initiative Coverage"
assert obj.group.name == "New Zealand Broadband Map"
assert (
obj.url_html
== "https://test.koordinates.com/set/933-ultra-fast-broadband-initiative-coverage/"
)
@responses.activate
def test_get_set_set_returns_all_rows(client):
the_response = sets_multiple_good_simulated_response
responses.add(
responses.GET,
client.get_url("SET", "GET", "multi"),
body=the_response,
status=200,
content_type="application/json",
)
cnt_of_sets_returned = 0
for layer in client.sets.list():
cnt_of_sets_returned += 1
assert cnt_of_sets_returned == 2
@responses.activate
def test_set_list_drafts(client):
# create a set, then check that it returns as a draft
responses.add(
responses.POST,
client.get_url("SET", "POST", "create"),
body=sets_new_draft_good_simulated_response,
status=201,
adding_headers={
"Location": "https://test.koordinates.com/services/api/v1/sets/1/"
},
)
responses.add(
responses.GET,
client.get_url("SET", "GET", "single", {"id": 1}),
body=sets_new_draft_good_simulated_response,
status=200,
)
responses.add(
responses.GET,
client.get_url("SET", "GET", "multidraft"),
body=sets_single_draft_good_simulated_response,
status=200,
)
s = Set()
s.title = "New Set"
rs = client.sets.create(s)
sets_amount = 0
for _set in client.sets.list_drafts():
sets_amount += 1
assert sets_amount == 1
assert rs is s
assert rs.publish_to_catalog_services == False
assert isinstance(s.group, Group)
assert len(responses.calls) == 3
@responses.activate
def test_set_create(client):
responses.add(
responses.POST,
client.get_url("SET", "POST", "create"),
body=sets_single_good_simulated_response,
status=201,
adding_headers={
"Location": "https://test.koordinates.com/services/api/v1/sets/933/"
},
)
responses.add(
responses.GET,
client.get_url("SET", "GET", "single", {"id": 933}),
body=sets_single_good_simulated_response,
status=200,
)
s = Set()
s.title = "test title"
s.description = "description"
s.group = 141
s.items = [
"https://test.koordinates.com/services/api/v1/layers/4226/",
"https://test.koordinates.com/services/api/v1/layers/4228/",
"https://test.koordinates.com/services/api/v1/layers/4227/",
"https://test.koordinates.com/services/api/v1/layers/4061/",
"https://test.koordinates.com/services/api/v1/layers/4147/",
"https://test.koordinates.com/services/api/v1/layers/4148/",
]
rs = client.sets.create(s)
assert rs is s
assert isinstance(s.group, Group)
assert s.group.id == 141
assert len(responses.calls) == 2
req = json.loads(responses.calls[0].request.body.decode("utf-8"))
assert len(req["items"]) == 6
assert req["group"] == 141
@responses.activate
def test_set_list_versions(client):
responses.add(
responses.GET,
client.get_url("SET_VERSION", "GET", "multi", {"id": 1}),
body=sets_multi_version_good_simulated_response,
status=200,
)
versions_amount = 0
for _version in client.sets.list_versions(1):
versions_amount += 1
assert versions_amount == 2
@responses.activate
def test_set_get_version(client):
responses.add(
responses.GET,
client.get_url("SET_VERSION", "GET", "single", {"id": 1, "version_id": 1}),
body=sets_new_draft_good_simulated_response,
status=200,
)
rs = client.sets.get_version(1, 1)
assert rs.version.id == 1
@responses.activate
def test_set_get_draft(client):
# should redirect to the draft versions
responses.add(
responses.GET,
client.get_url("SET_VERSION", "GET", "draft", {"id": 1}),
body=sets_new_draft_good_simulated_response,
status=201,
adding_headers={
"Location": "https://test.koordinates.com/services/api/v1/sets/1/"
},
)
rs = client.sets.get_draft(1)
assert rs.version.id == 1
@responses.activate
def test_set_get_published(client):
# should redirect to the published version
responses.add(
responses.GET,
client.get_url("SET_VERSION", "GET", "published", {"id": 1}),
body=sets_new_draft_good_simulated_response,
status=201,
adding_headers={
"Location": "https://test.koordinates.com/services/api/v1/sets/1/"
},
)
rs = client.sets.get_published(1)
assert rs.version.id == 1
@responses.activate
def test_set_get_create_draft(client):
responses.add(
responses.POST,
client.get_url("SET_VERSION", "POST", "create", {"id": 1}),
body=sets_new_draft_good_simulated_response,
status=200,
)
rs = client.sets.create_draft(1)
assert rs.version.id == 1
assert len(responses.calls) == 1
@responses.activate
def test_publish_single_set_version(client):
responses.add(
responses.GET,
client.get_url("SET_VERSION", "GET", "single", {"id": 5, "version_id": 10}),
body=sets_single_version_good_simulated_response,
status=200,
content_type="application/json",
)
lv = client.sets.get_version(5, 10)
assert lv.id == 5
assert lv.version.id == 10
responses.add(
responses.POST,
client.get_url("SET_VERSION", "POST", "publish", {"id": 5, "version_id": 10}),
body="",
status=201,
adding_headers={
"Location": "https://test.koordinates.com/services/api/v1/publish/10/"
},
content_type="application/json",
)
responses.add(
responses.GET,
"https://test.koordinates.com/services/api/v1/publish/10/",
body=sets_publish_version_good_simulated_response,
status=200,
content_type="application/json",
)
p = lv.publish()
assert isinstance(p, Publish)
assert p.id == 10
|
|
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import os
import subprocess
import sys
class BaseOSUtils(object):
PROTOCOL_TCP = "TCP"
PROTOCOL_UDP = "UDP"
def reboot(self):
raise NotImplementedError()
def user_exists(self, username):
raise NotImplementedError()
def generate_random_password(self, length):
# On Windows os.urandom() uses CryptGenRandom, which is a
# cryptographically secure pseudorandom number generator
b64_password = base64.b64encode(os.urandom(256))
return b64_password.replace(
b'/', b'').replace(b'+', b'')[:length].decode()
def execute_process(self, args, shell=True, decode_output=False):
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=shell)
(out, err) = p.communicate()
if decode_output and sys.version_info < (3, 0):
out = out.decode(sys.stdout.encoding)
err = err.decode(sys.stdout.encoding)
return out, err, p.returncode
def sanitize_shell_input(self, value):
raise NotImplementedError()
def create_user(self, username, password, password_expires=False):
raise NotImplementedError()
def rename_user(self, username, new_username):
raise NotImplementedError()
def enum_users(self):
raise NotImplementedError()
def is_builtin_admin(self, username):
raise NotImplementedError()
def set_user_password(self, username, password, password_expires=False):
raise NotImplementedError()
def add_user_to_local_group(self, username, groupname):
raise NotImplementedError()
def set_host_name(self, new_host_name):
raise NotImplementedError()
def get_user_home(self, username):
raise NotImplementedError()
def get_network_adapters(self):
raise NotImplementedError()
def get_network_adapter_name_by_mac_address(self, mac_address):
raise NotImplementedError()
def set_network_adapter_mtu(self, name, mtu):
raise NotImplementedError()
def rename_network_adapter(self, old_name, new_name):
raise NotImplementedError()
def enable_network_adapter(self, name, enabled):
raise NotImplementedError()
def set_static_network_config(self, name, address, prefix_len_or_netmask,
gateway, dnsnameservers):
raise NotImplementedError()
def create_network_team(self, team_name, mode, load_balancing_algorithm,
members, mac_address, primary_nic_name=None,
primary_nic_vlan_id=None, lacp_timer=None):
raise NotImplementedError()
def add_network_team_nic(self, team_name, nic_name, vlan_id):
raise NotImplementedError()
def set_config_value(self, name, value, section=None):
raise NotImplementedError()
def get_config_value(self, name, section=None):
raise NotImplementedError()
def wait_for_boot_completion(self):
pass
def reset_service_password(self):
return False
def terminate(self):
pass
def get_default_gateway(self):
raise NotImplementedError()
def check_static_route_exists(self, destination):
raise NotImplementedError()
def add_static_route(self, destination, mask, next_hop, interface_index,
metric):
raise NotImplementedError()
def get_os_version(self):
raise NotImplementedError()
def check_os_version(self, major, minor, build=0):
raise NotImplementedError()
def get_volume_label(self, drive):
raise NotImplementedError()
def firewall_create_rule(self, name, port, protocol, allow=True):
raise NotImplementedError()
def firewall_remove_rule(self, name, port, protocol, allow=True):
raise NotImplementedError()
def get_maximum_password_length(self):
"""Obtain the maximum password length tailored for each OS."""
raise NotImplementedError()
def set_timezone(self, timezone):
"""Set the timezone for this instance."""
raise NotImplementedError()
def change_password_next_logon(self, username):
"""Force the given user to change his password at the next login."""
raise NotImplementedError()
def set_service_credentials(self, service_name, username, password):
"""Set the username and password for a given service."""
raise NotImplementedError()
def create_service(self, service_name, display_name, path, start_mode,
username=None, password=None):
raise NotImplementedError()
def delete_service(self, service_name):
raise NotImplementedError()
def get_service_status(self, service_name):
raise NotImplementedError()
def check_service_exists(self, service_name):
raise NotImplementedError()
def get_service_start_mode(self, service_name):
raise NotImplementedError()
def set_service_start_mode(self, service_name, start_mode):
raise NotImplementedError()
def start_service(self, service_name):
raise NotImplementedError()
def stop_service(self, service_name, wait=False):
raise NotImplementedError()
def get_service_username(self, service_name):
"""Retrieve the username under which a service runs."""
raise NotImplementedError()
def get_current_user(self):
"""Retrieve the username under which the current thread runs."""
raise NotImplementedError()
def is_real_time_clock_utc(self):
raise NotImplementedError()
def set_real_time_clock_utc(self, utc):
raise NotImplementedError()
def enable_trim(self, enable):
"""Enables or disables TRIM delete notifications."""
raise NotImplementedError()
def get_file_version(self, path):
raise NotImplementedError()
def set_path_admin_acls(self, path):
raise NotImplementedError()
def take_path_ownership(self, path, username=None):
raise NotImplementedError()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import errno
from numpy import *
import os
import re
import string
import subprocess
import sys
from getpass import getuser
QUEUE_RUN = 'o14d'
QUEUE_ANALYZE = 'o12h'
# Paths:
ARRAY_PBS_GOJA = "array_goja.pbs"
ARRAY_PBS_MISSING = "array.pbs.missing"
def run_simulation(type_of_run):
command_run = ''
if type_of_run == "locally":
command_run += 'Gate main.mac'
print('\t' + command_run)
p = subprocess.Popen(command_run, stdout=subprocess.PIPE, shell=True)
p.wait()
else:
command_run += './Gate_parallel.sh'
print('\t' + command_run)
os.system(command_run)
def run_missing_simulations_on_cluster():
with open("./missing_gate_results.txt", 'r') as missing_gate_results_txt:
missing_gate_results = missing_gate_results_txt.readlines()
if len(missing_gate_results)>0:
for m in missing_gate_results:
m = m.replace('\n', '')
with open("array.pbs", "r") as array_pbs:
with open(ARRAY_PBS_MISSING, "w") as array_pbs_missing:
for line in array_pbs:
array_pbs_missing.write(line.replace('${PBS_ARRAYID}', m))
os.system('qsub ' + ARRAY_PBS_MISSING)
def get_goja_command(gate_result, goja_result, eth, eth0, tw, N0):
goja_command = "goja --root " + gate_result \
+ " --eth " + str(eth) \
+ " --eth0 " + str(eth0) \
+ " --tw " + str(tw) \
+ " --N0 " + str(N0) \
+ " --save-real-time-to " + goja_result + "_realtime" \
+ " --save-statistics-to " + goja_result + "_statistics" \
+ " > " + goja_result + "_coincidences"
return goja_command
def analyze_simulations_on_cluster(path_gate_output, single_file_prefix, path_goja_output, splits, eth, eth0, tw, N0):
for s in splits:
ss = str(int(s))
gate_result = path_gate_output + single_file_prefix + ss + ".root"
goja_result = path_goja_output + single_file_prefix + ss
goja_command = get_goja_command(gate_result, goja_result, eth, eth0, tw, N0)
# generate ARRAY_PBS_GOJA:
with open(ARRAY_PBS_GOJA, 'w') as array_pbs:
array_pbs.write('#!/bin/sh\n')
array_pbs.write('#PBS -q ' + QUEUE_ANALYZE + '\n')
array_pbs.write('#PBS -l nodes=1:ppn=1\n')
array_pbs.write('#PBS -N GOJA' + ss + '\n')
array_pbs.write('#PBS -V\n')
array_pbs.write('cd ${PBS_O_WORKDIR}\n')
array_pbs.write(goja_command + '\n')
array_pbs.write('exit 0;\n')
# push into queue:
qsub_command = 'qsub ' + ARRAY_PBS_GOJA
os.system(qsub_command)
def get_nr_of_splits(simulation_path):
nr_of_splits = 0
with open('./Gate_parallel.sh', 'r') as gate_parallel_sh:
for line in gate_parallel_sh:
if line.split('=')[0] == 'NR_OF_SPLITS':
nr_of_splits = int((line.split('=')[1]).replace('\"', '').replace('\'', '').replace('\n', ''))
return nr_of_splits
def verify_gate_output(path_gate_output, type_of_run):
nr_of_missing_files = 0
if type_of_run == "locally":
missing_files = []
output_root = path_gate_output + 'output.root'
if not os.path.isfile(output_root):
print("\tFile ", output_root, " is missing.")
missing_files.append(output_root)
nr_of_missing_files = len(missing_files)
elif type_of_run == "on-cluster":
VERIFY_GATE_RESULTS_PBS = "verify_gate_results.pbs"
with open(VERIFY_GATE_RESULTS_PBS, 'w') as file_pbs:
file_pbs.write('#!/bin/sh\n')
file_pbs.write('#PBS -q o12h\n')
file_pbs.write('#PBS -l nodes=1:ppn=1\n')
file_pbs.write('#PBS -N verify_gate_results.py\n')
file_pbs.write('#PBS -V\n')
file_pbs.write('cd ${PBS_O_WORKDIR}\n')
file_pbs.write('verify_gate_results.py\n')
file_pbs.write('exit 0;\n')
# push into queue:
qsub_command = 'qsub ' + VERIFY_GATE_RESULTS_PBS
os.system(qsub_command)
return nr_of_missing_files
def verify_goja_output(path_gate_output, path_goja_output, type_of_run):
nr_of_missing_files = 0
missing_files = []
if type_of_run == "locally":
for fname in os.listdir(path_gate_output):
if ".root" in fname:
path_coincidences = path_goja_output + fname[:-5] + "_coincidences"
if not os.path.isfile(path_coincidences):
print("\tFile ", path_coincidences, " is missing.")
missing_files.append(path_coincidences)
path_realtime = path_goja_output + fname[:-5] + "_realtime"
if not os.path.isfile(path_realtime):
print("\tFile ", path_realtime, " is missing.")
missing_files.append(path_realtime)
path_statistics = path_goja_output + fname[:-5] + "_statistics"
if not os.path.isfile(path_statistics):
print("\tFile ", path_statistics, " is missing.")
missing_files.append(path_statistics)
nr_of_missing_files = len(missing_files)
elif type_of_run == "on-cluster":
VERIFY_GOJA_RESULTS_PBS = "verify_goja_results.pbs"
with open(VERIFY_GOJA_RESULTS_PBS, 'w') as file_pbs:
file_pbs.write('#!/bin/sh\n')
file_pbs.write('#PBS -q o12h\n')
file_pbs.write('#PBS -l nodes=1:ppn=1\n')
file_pbs.write('#PBS -N verify_goja_results.py\n')
file_pbs.write('#PBS -V\n')
file_pbs.write('cd ${PBS_O_WORKDIR}\n')
file_pbs.write('verify_goja_results.py\n')
file_pbs.write('exit 0;\n')
# push into queue:
qsub_command = 'qsub ' + VERIFY_GOJA_RESULTS_PBS
os.system(qsub_command)
return nr_of_missing_files, missing_files
def concatenate_files(fnames):
path_coincidences = path_goja_output + args.simulation_name + "_COINCIDENCES"
try:
os.unlink(path_coincidences)
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise e
path_realtime = path_goja_output + args.simulation_name + "_REALTIME"
try:
os.unlink(path_realtime)
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise e
path_statistics = path_goja_output + args.simulation_name + "_STATISTICS"
try:
os.unlink(path_statistics)
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise e
realtime = 0.
counter_all_compton_hits = 0
counter_compton_hits_over_the_ETH0 = 0
counter_compton_hits_over_the_ETH = 0
with open(path_coincidences, 'w') as outfile:
for fname in fnames:
basename = fname[0:-13]
basepath_goja = path_goja_output + basename
realtime += loadtxt(basepath_goja + "_realtime")
with open(basepath_goja + "_coincidences") as infile:
for line in infile:
outfile.write(line)
counters = genfromtxt(basepath_goja + "_statistics", usecols=(0))
counter_all_compton_hits += counters[0]
counter_compton_hits_over_the_ETH0 += counters[1]
counter_compton_hits_over_the_ETH += counters[2]
savetxt(path_realtime, [realtime])
with open(path_statistics, 'w') as stats:
stats.write(str(int(counter_all_compton_hits)) + " # all Compton hits\n")
stats.write(str(int(counter_compton_hits_over_the_ETH0)) + " # compton hits with edep over the ETH0\n")
stats.write(str(int(counter_compton_hits_over_the_ETH)) + " # compton hits with edep over the ETH\n")
if args.clean:
for fname in fnames:
basename = fname[0:-13]
basepath_goja = path_goja_output + basename
os.unlink(basepath_goja + "_coincidences")
os.unlink(basepath_goja + "_realtime")
os.unlink(basepath_goja + "_statistics")
print("Goja output succesfully concatenated.")
if __name__ == "__main__":
help_message = "Type goja_manager.py --help."
parser = argparse.ArgumentParser(description='Analyze, verify and concatenate the GOJA results.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-gt', '--gate-output',
dest='path_gate_output',
type=str,
default="",
help='path to dir with the splitted GATE results')
parser.add_argument('-sfp', '--single-file-prefix',
dest='single_file_prefix',
type=str,
default="output",
help='single file prefix')
parser.add_argument('-gj', '--goja-output',
dest='path_goja_output',
type=str,
default="",
help='path to dir with the GOJA results')
parser.add_argument('-sp', '--simulation-path',
dest='simulation_path',
type=str,
default=".",
help='path to dir with the simulation (for the purpose of the Simulations Manager)')
parser.add_argument('-m', '--mode',
dest='mode',
type=str,
default="analyze",
help='mode of the script: run, run-missing, analyze, analyze-missing, verify, verify-gate, verify-goja, concatenate, clear-gate, clear-goja, clear-cluster-artifacts')
parser.add_argument('--eth',
dest='eth',
type=float,
default=0.2,
help='fixed energy threshold in MeV [for mode \'analyze\']')
parser.add_argument('--eth0',
dest='eth0',
type=float,
default=0.0,
help='noise energy threshold in MeV [for mode \'analyze\']')
parser.add_argument('-tw', '--time-window',
dest='tw',
type=float,
default=3,
help='time window in ns [for mode \'analyze\']')
parser.add_argument('--N0',
dest='N0',
type=int,
default=1000,
help='maximum number of events above the noise energy threshold in the coincidence window [for mode \'analyze\']')
parser.add_argument('-nos', '--nr-of-splits',
dest='nr_of_splits',
type=int,
default=0,
help='number of splits')
parser.add_argument('-r', '--run',
dest='type_of_run',
type=str,
default='on-cluster',
help='run \'locally\' or \'on-cluster\' [for modes: run, run-missing, analyze, analyze-missing]')
parser.add_argument('-sn', '--simulation-name',
dest='simulation_name',
type=str,
default="simulation",
help='name of the simulation [for modes \'concatenate\' and \'concatenate-force\']')
parser.add_argument('-c', '--clean',
action='store_true',
help='remove partial files after concatenation [for modes \'concatenate\' and \'concatenate-force\']')
parser.add_argument('--lustre',
action='store_true',
help='use lustre file system (if not nfs is used)')
args = parser.parse_args()
current_path = os.getcwd()
path_gate_output = ""
if args.path_gate_output == "":
if args.type_of_run == "locally":
path_gate_output = current_path + "/output/"
elif args.type_of_run == "on-cluster":
if args.lustre:
path_gate_output = '/mnt/lustre/home/' + getuser() + '/' + args.simulation_path + "/output/"
else:
path_gate_output = current_path + '/output/'
else:
path_gate_output = args.path_gate_output
path_goja_output = ""
if args.path_goja_output == "":
path_goja_output = current_path + "/goja/"
else:
path_goja_output = args.path_goja_output
if args.type_of_run=="locally" and not ('run' in args.mode) and not os.path.isdir(path_gate_output):
print("Directory " + path_gate_output + " does not exist. " + help_message)
sys.exit()
if not os.path.isdir(path_goja_output):
try:
os.system('mkdir -p ' + path_goja_output)
except:
pass
if not os.path.isdir(path_goja_output):
print("Directory " + path_goja_output + " does not exist. " + help_message)
sys.exit()
if args.mode == "run":
print("Run:")
run_simulation(args.type_of_run)
elif args.mode == "run-missing":
print("Run missing:")
if args.type_of_run == "locally":
if verify_gate_output(path_gate_output, args.type_of_run)>0:
run_simulation(args.type_of_run)
else:
if os.path.isfile("./missing_gate_results.txt"):
run_missing_simulations_on_cluster()
elif args.mode == "analyze":
print("Analyze:")
if args.type_of_run == 'locally':
gate_result = path_gate_output + "output.root"
goja_result = path_goja_output + args.single_file_prefix
goja_command = get_goja_command(gate_result, goja_result, args.eth, args.eth0, args.tw, args.N0)
print(goja_command)
p = subprocess.Popen(goja_command, shell=True)
p.wait()
elif args.type_of_run == 'on-cluster':
if args.nr_of_splits == 0:
nr_of_splits = get_nr_of_splits(args.simulation_path)
else:
nr_of_splits = args.nr_of_splits
splits = linspace(1, nr_of_splits, nr_of_splits)
analyze_simulations_on_cluster(path_gate_output, args.single_file_prefix, path_goja_output, splits, args.eth, args.eth0, args.tw, args.N0)
elif args.mode == "analyze-missing":
print("Analyze missing:")
if args.type_of_run == 'locally':
gate_result = path_gate_output + "output.root"
goja_result = path_goja_output + args.single_file_prefix
if not os.path.isfile(goja_result + "coincidences") or \
not os.path.isfile(goja_result + "realtime") or \
not os.path.isfile(goja_result + "statistics"):
goja_command = get_goja_command(gate_result, goja_result, args.eth, args.eth0, args.tw, args.N0)
print(goja_command)
p = subprocess.Popen(goja_command, shell=True)
p.wait()
elif args.type_of_run == 'on-cluster':
if os.path.isfile("./missing_goja_results.txt"):
missing_goja_results = loadtxt("./missing_goja_results.txt")
if len(missing_goja_results)>0:
analyze_simulations_on_cluster(path_gate_output, path_goja_output, missing_goja_results, args.eth, args.eth0, args.tw, args.N0)
elif args.mode == "verify":
print("Verify:")
verify_gate_output(path_gate_output, args.type_of_run)
verify_goja_output(path_gate_output, path_goja_output, args.type_of_run)
elif args.mode == "verify-gate":
print("Verify (GATE):")
verify_gate_output(path_gate_output, args.type_of_run)
elif args.mode == "verify-goja":
print("Verify (GOJA):")
verify_goja_output(path_gate_output, path_goja_output, args.type_of_run)
elif args.mode == "concatenate":
print("Concatenate:")
fnames_tmp = os.listdir(path_goja_output)
fnames_tmp = [fname for fname in fnames_tmp if "_coincidences" in fname]
fnames = []
for fname in fnames_tmp:
path_coincidences = path_goja_output + fname
path_realtime = path_goja_output + fname.replace("_coincidences", "") + "_realtime"
path_statistics = path_goja_output + fname.replace("_coincidences", "") + "_statistics"
if os.path.isfile(path_coincidences) and os.path.isfile(path_realtime) and os.path.isfile(path_statistics):
fnames.append(fname)
if len(fnames)>1:
fnames = sorted(fnames, key=lambda x: (int(re.sub('\D','',x)),x))
concatenate_files(fnames)
elif args.mode == "clear-gate":
print("Clear (GATE):")
command = 'rm -f ' + path_gate_output + '*'
print('\t' + command)
os.system(command)
elif args.mode == "clear-goja":
print("Clear (GOJA):")
command = 'rm -f ' + path_goja_output + '*'
print('\t' + command)
os.system(command)
elif args.mode == "clear-cluster-artifacts":
print("Clear (cluster artifacts):")
command = 'rm -f *.o* *.e* ' + ARRAY_PBS_GOJA + ' ' + ARRAY_PBS_MISSING
print('\t' + command)
os.system(command)
else:
print('Improper mode. ' + help_message)
|
|
# Copyright (C) 2014 New York University
# This file is part of ReproZip which is released under the Revised BSD License
# See file LICENSE for full license details.
"""Utility functions.
These functions are shared between reprozip and reprounzip but are not specific
to this software (more utilities).
"""
import codecs
import contextlib
from datetime import datetime
import email.utils
import io
import itertools
import locale
import logging
import operator
import os
from pathlib import Path, PurePosixPath
import requests
import shutil
import stat
import subprocess
import time
import yaml
logger = logging.getLogger('reprozip_core')
class StreamWriter(object):
def __init__(self, stream):
writer = codecs.getwriter(locale.getpreferredencoding())
self._writer = writer(stream, 'replace')
self.buffer = stream
def writelines(self, lines):
self.write(''.join(lines))
def write(self, obj):
if isinstance(obj, bytes):
self.buffer.write(obj)
else:
self._writer.write(obj)
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self._writer, name)
def flatten(n, iterable):
"""Flattens an iterable by repeatedly calling chain.from_iterable() on it.
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> b = [[7, 8], [9, 10, 11, 12, 13, 14, 15, 16]]
>>> l = [a, b]
>>> list(flatten(0, a))
[[1, 2, 3], [4, 5, 6]]
>>> list(flatten(1, a))
[1, 2, 3, 4, 5, 6]
>>> list(flatten(1, l))
[[1, 2, 3], [4, 5, 6], [7, 8], [9, 10, 11, 12, 13, 14, 15, 16]]
>>> list(flatten(2, l))
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
"""
for _ in range(n):
iterable = itertools.chain.from_iterable(iterable)
return iterable
class UniqueNames(object):
"""Makes names unique amongst the ones it's already seen.
"""
def __init__(self):
self.names = set()
def insert(self, name):
assert name not in self.names
self.names.add(name)
def __call__(self, name):
nb = 1
attempt = name
while attempt in self.names:
nb += 1
attempt = '%s_%d' % (name, nb)
self.names.add(attempt)
return attempt
def escape(s):
"""Escapes backslashes and double quotes in strings.
This does NOT add quotes around the string.
"""
return s.replace('\\', '\\\\').replace('"', '\\"')
class YamlIndentedListDumper(yaml.SafeDumper):
def __init__(self, *args, initial_indent=0, **kwargs):
super(YamlIndentedListDumper, self).__init__(*args, **kwargs)
for _ in range(initial_indent):
self.increase_indent()
# https://stackoverflow.com/a/39681672
def increase_indent(self, flow=False, indentless=False):
return super(YamlIndentedListDumper, self).increase_indent(flow, False)
def yaml_dumps(x, *, initial_indent=0):
"""Version of yaml.safe_dump() that indents lists.
Indenting items of a list plays better with comments. In addition, this
allows setting the initial indentation before dumping.
"""
stream = io.StringIO()
dumper = YamlIndentedListDumper(
stream,
allow_unicode=True,
initial_indent=initial_indent,
)
try:
dumper.open()
dumper.represent(x)
dumper.close()
finally:
dumper.dispose()
return stream.getvalue().rstrip(' ')
def optional_return_type(req_args, other_args):
"""Sort of namedtuple but with name-only fields.
When deconstructing a namedtuple, you have to get all the fields:
>>> o = namedtuple('T', ['a', 'b', 'c'])(1, 2, 3)
>>> a, b = o
ValueError: too many values to unpack
You thus cannot easily add new return values. This class allows it:
>>> o2 = optional_return_type(['a', 'b'], ['c'])(1, 2, 3)
>>> a, b = o2
>>> c = o2.c
"""
if len(set(req_args) | set(other_args)) != len(req_args) + len(other_args):
raise ValueError
# Maps argument name to position in each list
req_args_pos = dict((n, i) for i, n in enumerate(req_args))
other_args_pos = dict((n, i) for i, n in enumerate(other_args))
def cstr(cls, *args, **kwargs):
if len(args) > len(req_args) + len(other_args):
raise TypeError(
"Too many arguments (expected at least %d and no more than "
"%d)" % (len(req_args),
len(req_args) + len(other_args)))
args1, args2 = args[:len(req_args)], args[len(req_args):]
req = dict((i, v) for i, v in enumerate(args1))
other = dict(zip(other_args, args2))
for k, v in kwargs.items():
if k in req_args_pos:
pos = req_args_pos[k]
if pos in req:
raise TypeError("Multiple values for field %s" % k)
req[pos] = v
elif k in other_args_pos:
if k in other:
raise TypeError("Multiple values for field %s" % k)
other[k] = v
else:
raise TypeError("Unknown field name %s" % k)
args = []
for i, k in enumerate(req_args):
if i not in req:
raise TypeError("Missing value for field %s" % k)
args.append(req[i])
inst = tuple.__new__(cls, args)
inst.__dict__.update(other)
return inst
dct = {'__new__': cstr}
for i, n in enumerate(req_args):
dct[n] = property(operator.itemgetter(i))
return type('OptionalReturnType', (tuple,), dct)
def tz_offset():
offset = time.timezone if time.localtime().tm_isdst == 0 else time.altzone
return -offset
def isodatetime():
offset = tz_offset()
sign = '+'
if offset < 0:
sign = '-'
offset = -offset
if offset % 60 == 0:
offset = '%02d:%02d' % (offset // 3600, (offset // 60) % 60)
else:
offset = '%02d:%02d:%02d' % (offset // 3600, (offset // 60) % 60,
offset % 60)
# Remove microsecond
now = datetime.now()
now = datetime(year=now.year, month=now.month, day=now.day,
hour=now.hour, minute=now.minute, second=now.second)
return '%s%s%s' % (now.isoformat(),
sign,
offset)
def hsize(nbytes):
"""Readable size.
"""
if nbytes is None:
return "unknown"
KB = 1 << 10
MB = 1 << 20
GB = 1 << 30
TB = 1 << 40
PB = 1 << 50
nbytes = float(nbytes)
if nbytes < KB:
return "{0} bytes".format(int(nbytes))
elif nbytes < MB:
return "{0:.2f} KB".format(nbytes / KB)
elif nbytes < GB:
return "{0:.2f} MB".format(nbytes / MB)
elif nbytes < TB:
return "{0:.2f} GB".format(nbytes / GB)
elif nbytes < PB:
return "{0:.2f} TB".format(nbytes / TB)
else:
return "{0:.2f} PB".format(nbytes / PB)
def normalize_path(path):
"""Normalize a path obtained from the database.
"""
# For some reason, os.path.normpath() keeps multiple leading slashes
# We don't want this since it has no meaning on Linux
path = PurePosixPath(path)
if str(path).startswith('//'):
path = PurePosixPath(str(path)[1:])
return path
def find_all_links_recursive(filename, files):
path = Path('/')
for c in filename.parts[1:]:
# At this point, path is a canonical path, and all links in it have
# been resolved
# We add the next path component
path = path / c
# That component is possibly a link
if path.is_symlink():
# Adds the link itself
files.add(path)
target = Path(os.readlink(path))
target = (path.parent / target).absolute()
# Here, target might contain a number of symlinks
if target not in files:
# Recurse on this new path
find_all_links_recursive(target, files)
# Restores the invariant; realpath might resolve several links here
path = path.resolve()
return path
def find_all_links(filename, include_target=False):
"""Dereferences symlinks from a path.
If include_target is True, this also returns the real path of the final
target.
Example:
/
a -> b
b
g -> c
c -> ../a/d
d
e -> /f
f
>>> find_all_links('/a/g/e', True)
['/a', '/b/c', '/b/g', '/b/d/e', '/f']
"""
files = set()
filename = Path(filename)
assert filename.absolute()
path = find_all_links_recursive(filename, files)
files = list(files)
if include_target:
files.append(path)
return files
def join_root(root, path):
"""Prepends `root` to the absolute path `path`.
"""
path = str(path)
assert path.startswith('/')
path = path[1:]
if path.startswith('/'):
path = path[1:]
return root / path
@contextlib.contextmanager
def make_dir_writable(directory):
"""Context-manager that sets write permission on a directory.
This assumes that the directory belongs to you. If the u+w permission
wasn't set, it gets set in the context, and restored to what it was when
leaving the context. u+x also gets set on all the directories leading to
that path.
"""
uid = os.getuid()
try:
sb = directory.stat()
except OSError:
pass
else:
if sb.st_uid != uid or sb.st_mode & 0o700 == 0o700:
yield
return
# These are the permissions to be restored, in reverse order
restore_perms = []
try:
# Add u+x to all directories up to the target
path = Path('/')
for c in directory.parts[1:-1]:
path = path / c
sb = path.stat()
if sb.st_uid == uid and not sb.st_mode & 0o100:
logger.debug("Temporarily setting u+x on %s", path)
restore_perms.append((path, sb.st_mode))
path.chmod(sb.st_mode | 0o700)
# Add u+wx to the target
sb = directory.stat()
if sb.st_uid == uid and sb.st_mode & 0o700 != 0o700:
logger.debug("Temporarily setting u+wx on %s", directory)
restore_perms.append((directory, sb.st_mode))
directory.chmod(sb.st_mode | 0o700)
yield
finally:
for path, mod in reversed(restore_perms):
path.chmod(mod)
def rmtree_fixed(path):
"""Like :func:`shutil.rmtree` but doesn't choke on annoying permissions.
If a directory with -w or -x is encountered, it gets fixed and deletion
continues.
"""
if path.is_symlink():
raise OSError("Cannot call rmtree on a symbolic link")
uid = os.getuid()
st = path.lstat()
if st.st_uid == uid and st.st_mode & 0o700 != 0o700:
path.chmod(st.st_mode | 0o700)
for entry in path.iterdir():
if stat.S_ISDIR(entry.lstat().st_mode):
rmtree_fixed(entry)
else:
entry.unlink()
path.rmdir()
# Compatibility with ReproZip <= 1.0.3
check_output = subprocess.check_output
def download_file(url, dest, cachename=None, ssl_verify=None):
"""Downloads a file using a local cache.
If the file cannot be downloaded or if it wasn't modified, the cached
version will be used instead.
The cache lives in ``~/.cache/reprozip/``.
"""
if cachename is None:
if dest is None:
raise ValueError("One of 'dest' or 'cachename' must be specified")
cachename = dest.name
headers = {}
if 'XDG_CACHE_HOME' in os.environ:
cache = Path(os.environ['XDG_CACHE_HOME'])
else:
cache = Path('~/.cache').expanduser()
cache = cache / 'reprozip' / cachename
if cache.exists():
mtime = email.utils.formatdate(cache.stat().st_mtime, usegmt=True)
headers['If-Modified-Since'] = mtime
cache.parent.mkdir(parents=True, exist_ok=True)
try:
response = requests.get(url, headers=headers,
timeout=2 if cache.exists() else 10,
stream=True, verify=ssl_verify)
response.raise_for_status()
if response.status_code == 304:
raise requests.HTTPError(
'304 File is up to date, no data returned',
response=response)
except requests.RequestException as e:
if cache.exists():
if e.response and e.response.status_code == 304:
logger.info("Download %s: cache is up to date", cachename)
else:
logger.warning("Download %s: error downloading %s: %s",
cachename, url, e)
if dest is not None:
shutil.copy(cache, dest)
return dest
else:
return cache
else:
raise
logger.info("Download %s: downloading %s", cachename, url)
try:
with cache.open('wb') as f:
for chunk in response.iter_content(4096):
f.write(chunk)
response.close()
except Exception as e: # pragma: no cover
try:
cache.unlink()
except OSError:
pass
raise e
logger.info("Downloaded %s successfully", cachename)
if dest is not None:
shutil.copy(cache, dest)
return dest
else:
return cache
|
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
from .. import ivi
from .. import scpi
Source = set(['internal', 'external'])
ALCSourceMapping = {'internal': 'int',
'external': 'diode'}
PowerMode = set(['fixed', 'sweep'])
FrequencyModeMapping = {'cw': 'cw',
'sweep': 'sweep'}
TrackingHost = set(['hp8560', 'hp8561', 'hp8562', 'hp8562old', 'hp8563', 'hp8563e', 'hp8566',
'hp8593', 'hp8594', 'hp8595', 'hp8596', 'hp8340_5', 'hp8340_1', 'hp8341_5',
'hp8341_1', 'hp70909', 'hp70910', 'hp83590_5', 'hp83590_1', 'hp83592_5',
'hp83592_1', 'hp83594_5', 'hp83594_1', 'hp83595_5', 'hp83595_1'])
SweeptuneSetting = set(['default', 'custom'])
class agilent85644A(ivi.Driver, scpi.common.Memory):
"Agilent 85644A IVI tracking source driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '85644A')
super(agilent85644A, self).__init__(*args, **kwargs)
self._memory_size = 10
self._rf_frequency = 3e9
self._rf_frequency_offset = 0.0
self._rf_frequency_mode = 'cw'
self._rf_level = 0.0
self._rf_attenuation = 0.0
self._rf_attenuation_auto = True
self._rf_output_enabled = False
self._rf_power_mode = 'fixed'
self._rf_power_slope = 0.0
self._rf_power_center = 0.0
self._rf_power_span = 0.0
self._rf_tracking_adjust = 0
self._rf_tracking_host = 'hp8560'
self._rf_tracking_sweeptune = 'default'
self._alc_enabled = True
self._alc_source = 'internal'
self._reference_oscillator_source = 'internal'
self._identity_description = "Agilent 85644/5A IVI tracking source driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Agilent Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 2
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = list(['85644A', '85645A'])
self._frequency_low = 300e3
self._frequency_high = 6.5e9
self._add_property('rf.frequency',
self._get_rf_frequency,
self._set_rf_frequency)
self._add_property('rf.frequency_offset',
self._get_rf_frequency_offset,
self._set_rf_frequency_offset)
self._add_property('rf.frequency_mode',
self._get_rf_frequency_mode,
self._set_rf_frequency_mode)
self._add_property('rf.level',
self._get_rf_level,
self._set_rf_level)
self._add_property('rf.attenuation',
self._get_rf_attenuation,
self._set_rf_attenuation)
self._add_property('rf.attenuation_auto',
self._get_rf_attenuation_auto,
self._set_rf_attenuation_auto)
self._add_property('rf.output_enabled',
self._get_rf_output_enabled,
self._set_rf_output_enabled)
self._add_property('rf.power_mode',
self._get_rf_power_mode,
self._set_rf_power_mode)
self._add_property('rf.power_slope',
self._get_rf_power_slope,
self._set_rf_power_slope)
self._add_property('rf.power_center',
self._get_rf_power_center,
self._set_rf_power_center)
self._add_property('rf.power_span',
self._get_rf_power_span,
self._set_rf_power_span)
self._add_property('rf.tracking_adjust',
self._get_rf_tracking_adjust,
self._set_rf_tracking_adjust)
self._add_property('rf.tracking_host',
self._get_rf_tracking_host,
self._set_rf_tracking_host)
self._add_property('rf.tracking_sweeptune',
self._get_rf_tracking_sweeptune,
self._set_rf_tracking_sweeptune)
self._add_method('rf.configure',
self._rf_configure)
self._add_method('rf.is_unleveled',
self._rf_is_unleveled)
self._add_property('alc.enabled',
self._get_alc_enabled,
self._set_alc_enabled)
self._add_property('alc.source',
self._get_alc_source,
self._set_alc_source)
self._add_property('reference_oscillator.source',
self._get_reference_oscillator_source)
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(agilent85644A, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
#if not self._driver_operation_simulate:
# self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility_reset()
def _load_id_string(self):
if self._driver_operation_simulate:
self._identity_instrument_manufacturer = "Not available while simulating"
self._identity_instrument_model = "Not available while simulating"
self._identity_instrument_firmware_revision = "Not available while simulating"
else:
lst = self._ask("*IDN?").split(",")
self._identity_instrument_manufacturer = lst[0]
self._identity_instrument_model = lst[1]
self._identity_instrument_firmware_revision = lst[3]
self._set_cache_valid(True, 'identity_instrument_manufacturer')
self._set_cache_valid(True, 'identity_instrument_model')
self._set_cache_valid(True, 'identity_instrument_firmware_revision')
def _get_identity_instrument_manufacturer(self):
if self._get_cache_valid():
return self._identity_instrument_manufacturer
self._load_id_string()
return self._identity_instrument_manufacturer
def _get_identity_instrument_model(self):
if self._get_cache_valid():
return self._identity_instrument_model
self._load_id_string()
return self._identity_instrument_model
def _get_identity_instrument_firmware_revision(self):
if self._get_cache_valid():
return self._identity_instrument_firmware_revision
self._load_id_string()
return self._identity_instrument_firmware_revision
def _utility_disable(self):
pass
def _utility_error_query(self):
error_code = 0
error_message = "No error"
if not self._driver_operation_simulate:
error_code, error_message = self._ask(":system:error?").split(',')
error_code = int(error_code)
error_message = error_message.strip(' "')
return (error_code, error_message)
def _utility_lock_object(self):
pass
def _utility_reset(self):
if not self._driver_operation_simulate:
self._write("*RST")
self.driver_operation.invalidate_all_attributes()
def _utility_reset_with_defaults(self):
self._utility_reset()
def _utility_self_test(self):
code = 0
message = "Self test passed"
if not self._driver_operation_simulate:
self._write("*TST?")
# wait for test to complete
time.sleep(30)
code = int(self._read())
if code != 0:
message = "Self test failed"
return (code, message)
def _utility_unlock_object(self):
pass
def _get_rf_frequency(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._rf_frequency = float(self._ask("source:frequency?"))
self._set_cache_valid()
return self._rf_frequency
def _set_rf_frequency(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("source:frequency %e" % value)
self._rf_frequency = value
self._set_cache_valid()
def _get_rf_frequency_offset(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._rf_frequency_offset = float(self._ask("source:frequency:offset?"))
self._set_cache_valid()
return self._rf_frequency_offset
def _set_rf_frequency_offset(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("source:frequency:offset %e" % value)
self._rf_frequency_offset = value
self._set_cache_valid()
def _get_rf_frequency_mode(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask("source:frequency:mode?").lower()
self._rf_frequency_mode = [k for k,v in FrequencyModeMapping.items() if v==value][0]
self._set_cache_valid()
return self._rf_frequency_mode
def _set_rf_frequency_mode(self, value):
if value not in FrequencyModeMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write("source:frequency:mode %s" % FrequencyModeMapping[value])
self._rf_frequency_mode = value
self._set_cache_valid()
def _get_rf_level(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._rf_level = float(self._ask("source:power:level?"))
self._set_cache_valid()
return self._rf_level
def _set_rf_level(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("source:power:level %e" % value)
self._rf_level = value
self._set_cache_valid(False, 'rf_power_center')
self._set_cache_valid()
def _get_rf_attenuation(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._rf_attenuation = float(self._ask("source:power:attenuation?"))
self._set_cache_valid()
return self._rf_attenuation
def _set_rf_attenuation(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("source:power:attenuation %e" % value)
self._rf_attenuation = value
self._rf_attenuation_auto = False
self._set_cache_valid()
def _get_rf_attenuation_auto(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._rf_attenuation_auto = bool(int(self._ask("source:power:attenuation:auto?")))
self._set_cache_valid()
return self._rf_attenuation_auto
def _set_rf_attenuation_auto(self, value):
value = bool(value)
if not self._driver_operation_simulate:
self._write("source:power:attenuation:auto %d" % int(value))
self._rf_attenuation_auto = value
self._set_cache_valid()
def _get_rf_output_enabled(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._rf_output_enabled = bool(int(self._ask("output:state?")))
return self._rf_output_enabled
def _set_rf_output_enabled(self, value):
value = bool(value)
if not self._driver_operation_simulate:
self._write("output:state %d" % int(value))
self._rf_output_enabled = value
self._set_cache_valid()
def _get_rf_power_mode(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._rf_power_mode = self._ask("source:power:mode?").lower()
self._set_cache_valid()
return self._rf_power_mode
def _set_rf_power_mode(self, value):
if value not in PowerMode:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write("source:power:mode %s" % value)
self._rf_power_mode = value
self._set_cache_valid(False, 'rf_power_span')
self._set_cache_valid()
def _get_rf_power_slope(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._rf_power_slope = float(self._ask("source:power:slope?"))
self._set_cache_valid()
return self._rf_power_slope
def _set_rf_power_slope(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("source:power:slope %e" % value)
self._rf_power_slope = value
self._set_cache_valid()
def _get_rf_power_center(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._rf_power_center = float(self._ask("source:power:center?"))
self._set_cache_valid()
return self._rf_power_center
def _set_rf_power_center(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("source:power:center %e" % value)
self._rf_power_center = value
self._set_cache_valid(False, 'rf_level')
self._set_cache_valid()
def _get_rf_power_span(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._rf_power_span = float(self._ask("source:power:span?"))
self._set_cache_valid()
return self._rf_power_span
def _set_rf_power_span(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("source:power:span %e" % value)
self._rf_power_span = value
self._set_cache_valid(False, 'rf_power_mode')
self._set_cache_valid()
def _get_rf_tracking_adjust(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._rf_tracking_adjust = int(self._ask("calibration:track:adj?"))
self._set_cache_valid()
return self._rf_tracking_adjust
def _set_rf_tracking_adjust(self, value):
value = int(value)
if not self._driver_operation_simulate:
self._write("calibration:track:adj %d" % value)
self._rf_tracking_adjust = value
self._set_cache_valid()
def _get_rf_tracking_host(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._rf_tracking_host = self._ask("source:sweep:rselect?").lower()
self._set_cache_valid()
return self._rf_tracking_host
def _set_rf_tracking_host(self, value):
if value not in TrackingHost:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write("source:sweep:rselect %s" % value)
self._rf_tracking_host = value
self._set_cache_valid()
def _get_rf_tracking_sweeptune(self):
# read not implemented
#if not self._driver_operation_simulate and not self._get_cache_valid():
# self._rf_tracking_sweeptune = self._ask("calibration:sweeptune:setting?").lower()
# self._set_cache_valid()
return self._rf_tracking_sweeptune
def _set_rf_tracking_sweeptune(self, value):
if value not in SweeptuneSetting:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
# appears that the options are swapped (firmware bug)
self._write("calibration:sweeptune:setting %s" % ('default' if value == 'custom' else 'custom'))
#self._write("calibration:sweeptune:setting %s" % value)
self._rf_tracking_sweeptune = value
self._set_cache_valid()
def _rf_configure(self, frequency, level):
self._set_rf_frequency(frequency)
self._set_rf_level(level)
def _rf_is_unleveled(self):
if not self._driver_operation_simulate:
return bool(int(self._ask("diagnostic:unleveled?")))
return False
def _get_alc_enabled(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._alc_enabled = bool(int(self._ask("source:power:alc:state?")))
self._set_cache_valid()
return self._alc_enabled
def _set_alc_enabled(self, value):
value = bool(value)
if not self._driver_operation_simulate:
self._write("source:power:alc:state %d" % int(value))
self._set_cache_valid()
self._alc_enabled = value
def _get_alc_source(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask("source:power:alc:source?").lower()
self._alc_source = [k for k,v in ALCSourceMapping.items() if v==value][0]
self._set_cache_valid()
return self._alc_source
def _set_alc_source(self, value):
if value not in ALCSourceMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write("source:power:alc:source %s" % ALCSourceMapping[value])
self._set_cache_valid()
self._alc_source = value
def _get_reference_oscillator_source(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._reference_oscillator_source = 'external' if int(self._ask("source:roscillator:source?")) else 'internal'
return self._reference_oscillator_source
|
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from operator import attrgetter
from unittest import mock
from oslo_log import log as logging
from oslo_serialization import jsonutils
import oslotest.base
import testtools
from designate import exceptions
from designate import objects
from designate.objects import base
from designate.objects import fields
LOG = logging.getLogger(__name__)
@base.DesignateRegistry.register
class TestObject(objects.DesignateObject):
fields = {
'id': fields.AnyField(nullable=True),
'name': fields.AnyField(nullable=True),
'nested': fields.ObjectFields('TestObject', nullable=True),
'nested_list': fields.ObjectFields('TestObjectList', nullable=True),
}
@base.DesignateRegistry.register
class TestObjectDict(TestObject, objects.DictObjectMixin):
pass
@base.DesignateRegistry.register
class TestObjectList(objects.ListObjectMixin, objects.DesignateObject):
LIST_ITEM_TYPE = TestObject
fields = {
'objects': fields.ListOfObjectsField('TestObject'),
}
@base.DesignateRegistry.register
class TestValidatableObject(objects.DesignateObject):
fields = {
'id': fields.UUIDFields(),
'nested': fields.ObjectFields('TestValidatableObject',
nullable=True),
}
class DesignateObjectTest(oslotest.base.BaseTestCase):
def test_obj_cls_from_name(self):
cls = objects.DesignateObject.obj_cls_from_name('TestObject')
self.assertEqual(TestObject, cls)
cls = objects.DesignateObject.obj_cls_from_name('TestObjectDict')
self.assertEqual(TestObjectDict, cls)
cls = objects.DesignateObject.obj_cls_from_name('TestObjectList')
self.assertEqual(TestObjectList, cls)
def test_from_primitive(self):
primitive = {
'designate_object.name': 'TestObject',
'designate_object.data': {
'id': 1,
},
'designate_object.changes': [],
'designate_object.namespace': 'designate',
'designate_object.version': '1.0',
}
obj = objects.DesignateObject.from_primitive(primitive)
# Validate it has been thawed correctly
self.assertEqual(1, obj.id)
# Ensure the ID field has a value
self.assertTrue(obj.obj_attr_is_set('id'))
# Ensure the name field has no value
self.assertFalse(obj.obj_attr_is_set('name'))
# Ensure the changes list is empty
self.assertEqual(0, len(obj.obj_what_changed()))
def test_from_primitive_recursive(self):
primitive = {
'designate_object.name': 'TestObject',
'designate_object.data': {
'id': 1,
'nested': {
'designate_object.name': 'TestObject',
'designate_object.data': {
'id': 2,
},
'designate_object.changes': [],
'designate_object.namespace': 'designate',
'designate_object.version': '1.0',
}
},
'designate_object.changes': [],
'designate_object.namespace': 'designate',
'designate_object.version': '1.0',
}
obj = objects.DesignateObject.from_primitive(primitive)
# Validate it has been thawed correctly
self.assertEqual(1, obj.id)
self.assertEqual(2, obj.nested.id)
def test_from_dict(self):
obj = TestObject.from_dict({
'id': 1,
})
# Validate it has been thawed correctly
self.assertEqual(1, obj.id)
# Ensure the ID field has a value
self.assertTrue(obj.obj_attr_is_set('id'))
# Ensure the name field has no value
self.assertFalse(obj.obj_attr_is_set('name'))
# Ensure the changes list has one entry for the id field
self.assertEqual({'id'}, obj.obj_what_changed())
def test_from_dict_recursive(self):
obj = TestObject.from_dict({
'id': 1,
'nested': {
'id': 2,
},
})
# Validate it has been thawed correctly
self.assertEqual(1, obj.id)
self.assertEqual(2, obj.nested.id)
# Ensure the changes list has two entries, one for the id field and the
# other for the nested field
self.assertEqual({'id', 'nested'}, obj.obj_what_changed())
# Ensure the changes list has one entry for the id field
self.assertEqual({'id'}, obj.nested.obj_what_changed())
def test_from_dict_nested_list(self):
obj = TestObject.from_dict({
'id': 1,
'nested_list': [{
'id': 2,
}, {
'id': 3,
}],
})
# Validate it has been thawed correctly
self.assertEqual(1, obj.id)
self.assertEqual(2, obj.nested_list[0].id)
self.assertEqual(3, obj.nested_list[1].id)
# Ensure the changes list has two entries, one for the id field and the
# other for the nested field
self.assertEqual({'id', 'nested_list'}, obj.obj_what_changed())
def test_from_list(self):
with testtools.ExpectedException(NotImplementedError):
TestObject.from_list([])
def test_init_invalid(self):
with testtools.ExpectedException(TypeError):
TestObject(extra_field='Fail')
def test_hasattr(self):
obj = TestObject()
# Success Cases
self.assertTrue(hasattr(obj, 'id'),
"Should have id attribute")
self.assertTrue(hasattr(obj, 'name'),
"Should have name attribute")
# Failure Cases
self.assertFalse(hasattr(obj, 'email'),
"Should not have email attribute")
self.assertFalse(hasattr(obj, 'names'),
"Should not have names attribute")
def test_setattr(self):
obj = TestObject()
obj.id = 1
self.assertEqual(1, obj.id)
self.assertEqual(1, len(obj.obj_what_changed()))
obj.name = 'MyName'
self.assertEqual('MyName', obj.name)
self.assertEqual(2, len(obj.obj_what_changed()))
def test_setattr_neg(self):
obj = TestObject()
with testtools.ExpectedException(AttributeError):
obj.badthing = 'demons'
def test_to_primitive(self):
obj = TestObject(id=1)
# Ensure only the id attribute is returned
primitive = obj.to_primitive()
expected = {
'designate_object.name': 'TestObject',
'designate_object.data': {
'id': 1,
},
'designate_object.changes': ['id'],
'designate_object.namespace': 'designate',
'designate_object.version': '1.0',
}
self.assertEqual(expected, primitive)
# Set the name attribute to a None value
obj.name = None
# Ensure both the id and name attributes are returned
primitive = obj.to_primitive()
expected = {
'designate_object.name': 'TestObject',
'designate_object.data': {
'id': 1,
'name': None,
},
'designate_object.changes': ['id', 'name'],
'designate_object.namespace': 'designate',
'designate_object.version': '1.0',
}
self.assertEqual(expected, primitive)
def test_to_primitive_recursive(self):
obj = TestObject(id=1, nested=TestObject(id=2))
# Ensure only the id attribute is returned
primitive = obj.to_primitive()
expected = {
'designate_object.name': 'TestObject',
'designate_object.data': {
'id': 1,
'nested': {
'designate_object.name': 'TestObject',
'designate_object.data': {
'id': 2,
},
'designate_object.changes': ['id'],
'designate_object.namespace': 'designate',
'designate_object.version': '1.0',
}
},
'designate_object.changes': ['id', 'nested'],
'designate_object.namespace': 'designate',
'designate_object.version': '1.0',
}
self.assertEqual(expected, primitive)
def test_to_dict(self):
obj = TestObject(id=1)
# Ensure only the id attribute is returned
dict_ = obj.to_dict()
expected = {
'id': 1,
}
self.assertEqual(expected, dict_)
# Set the name attribute to a None value
obj.name = None
# Ensure both the id and name attributes are returned
dict_ = obj.to_dict()
expected = {
'id': 1,
'name': None,
}
self.assertEqual(expected, dict_)
def test_to_dict_recursive(self):
obj = TestObject(id=1, nested=TestObject(id=2))
# Ensure only the id attribute is returned
dict_ = obj.to_dict()
expected = {
'id': 1,
'nested': {
'id': 2,
},
}
self.assertEqual(expected, dict_)
def test_update(self):
obj = TestObject(id=1, name='test')
obj.update({'id': 'new_id', 'name': 'new_name'})
self.assertEqual('new_id', obj.id)
self.assertEqual('new_name', obj.name)
def test_update_unexpected_attribute(self):
obj = TestObject(id=1, name='test')
with testtools.ExpectedException(AttributeError):
obj.update({'id': 'new_id', 'new_key': 3})
def test_validate(self):
obj = TestValidatableObject()
# ID is required, so the object is not valid
with testtools.ExpectedException(exceptions.InvalidObject):
obj.validate()
with testtools.ExpectedException(ValueError):
obj.id = 'MyID'
# Set the ID field to a valid value
obj.id = 'ffded5c4-e4f6-4e02-a175-48e13c5c12a0'
obj.validate()
def test_validate_recursive(self):
with testtools.ExpectedException(ValueError):
TestValidatableObject(
id='MyID',
nested=TestValidatableObject(id='MyID'))
with testtools.ExpectedException(ValueError):
TestValidatableObject(
id='ffded5c4-e4f6-4e02-a175-48e13c5c12a0',
nested=TestValidatableObject(
id='MyID'))
obj = TestValidatableObject(
id='ffded5c4-e4f6-4e02-a175-48e13c5c12a0',
nested=TestValidatableObject(
id='ffded5c4-e4f6-4e02-a175-48e13c5c12a0'))
obj.validate()
def test_obj_attr_is_set(self):
obj = TestObject()
self.assertFalse(obj.obj_attr_is_set('name'))
obj.name = "My Name"
self.assertTrue(obj.obj_attr_is_set('name'))
def test_obj_what_changed(self):
obj = TestObject()
self.assertEqual(set([]), obj.obj_what_changed())
obj.name = "My Name"
self.assertEqual({'name'}, obj.obj_what_changed())
def test_obj_get_changes(self):
obj = TestObject()
self.assertEqual({}, obj.obj_get_changes())
obj.name = "My Name"
self.assertEqual({'name': "My Name"}, obj.obj_get_changes())
def test_obj_reset_changes(self):
obj = TestObject()
obj.name = "My Name"
self.assertEqual(1, len(obj.obj_what_changed()))
obj.obj_reset_changes()
self.assertEqual(0, len(obj.obj_what_changed()))
def test_obj_reset_changes_subset(self):
obj = TestObject()
obj.id = "My ID"
obj.name = "My Name"
self.assertEqual(2, len(obj.obj_what_changed()))
obj.obj_reset_changes(['id'])
self.assertEqual(1, len(obj.obj_what_changed()))
self.assertEqual({'name': "My Name"}, obj.obj_get_changes())
def test_obj_reset_changes_recursive(self):
obj = TestObject()
obj.id = "My ID"
obj.name = "My Name"
obj.nested = TestObject()
obj.nested.id = "My ID"
self.assertEqual(3, len(obj.obj_what_changed()))
obj.obj_reset_changes()
self.assertEqual(1, len(obj.obj_what_changed()))
obj.obj_reset_changes(recursive=True)
self.assertEqual(0, len(obj.obj_what_changed()))
def test_obj_get_original_value(self):
# Create an object
obj = TestObject()
obj.id = "My ID"
obj.name = "My Name"
# Rset one of the changes
obj.obj_reset_changes(['id'])
# Update the reset field
obj.id = "My New ID"
# Ensure the "current" value is correct
self.assertEqual("My New ID", obj.id)
# Ensure the "original" value is correct
self.assertEqual("My ID", obj.obj_get_original_value('id'))
self.assertEqual("My Name", obj.obj_get_original_value('name'))
# Update the reset field again
obj.id = "My New New ID"
# Ensure the "current" value is correct
self.assertEqual("My New New ID", obj.id)
# Ensure the "original" value is still correct
self.assertEqual("My ID", obj.obj_get_original_value('id'))
self.assertEqual("My Name", obj.obj_get_original_value('name'))
# Ensure a KeyError is raised when value exists
with testtools.ExpectedException(KeyError):
obj.obj_get_original_value('nested')
def test_deepcopy(self):
# Create the Original object
o_obj = TestObject()
o_obj.id = "My ID"
o_obj.name = "My Name"
# Clear the "changed" flag for one of the two fields we set
o_obj.obj_reset_changes(['name'])
# Deepcopy the object
c_obj = copy.deepcopy(o_obj)
# Ensure the copy was successful
self.assertEqual(o_obj.id, c_obj.id)
self.assertEqual(o_obj.name, c_obj.name)
self.assertEqual(o_obj.obj_attr_is_set('nested'),
c_obj.obj_attr_is_set('nested'))
self.assertEqual(o_obj.obj_get_changes(), c_obj.obj_get_changes())
self.assertEqual(o_obj.to_primitive(), c_obj.to_primitive())
def test_eq(self):
# Create two equal objects
obj_one = TestObject(id="My ID", name="My Name")
obj_two = TestObject(id="My ID", name="My Name")
# Ensure they evaluate to equal
self.assertEqual(obj_one, obj_two)
# Change a value on one object
obj_two.name = 'Other Name'
# Ensure they do not evaluate to equal
self.assertNotEqual(obj_one, obj_two)
def test_eq_false(self):
obj = TestObject(id="My ID", name="My Name")
self.assertFalse(obj == tuple())
self.assertNotEqual(obj, tuple())
def test_ne(self):
# Create two equal objects
obj_one = TestObject(id="My ID", name="My Name")
obj_two = TestObject(id="My ID", name="My Name")
# Ensure they evaluate to equal
self.assertEqual(obj_one, obj_two)
# Change a value on one object
obj_two.name = 'Other Name'
# Ensure they do not evaluate to equal
self.assertNotEqual(obj_one, obj_two)
class DictObjectMixinTest(oslotest.base.BaseTestCase):
def test_cast_to_dict(self):
# Create an object
obj = TestObjectDict()
obj.id = 1
obj.name = "My Name"
expected = {
'id': 1,
'name': 'My Name',
}
self.assertEqual(expected, dict(obj))
def test_gititem(self):
obj = TestObjectDict(name=1)
self.assertEqual(1, obj['name'])
def test_setitem(self):
obj = TestObjectDict()
obj['name'] = 1
self.assertEqual(1, obj.name)
def test_contains(self):
obj = TestObjectDict(name=1)
self.assertIn('name', obj)
def test_get(self):
obj = TestObjectDict(name=1)
v = obj.get('name')
self.assertEqual(1, v)
def test_get_missing(self):
obj = TestObjectDict(name=1)
self.assertFalse(obj.obj_attr_is_set('foo'))
with testtools.ExpectedException(AttributeError):
obj.get('foo')
def test_get_default(self):
obj = TestObjectDict(name='n')
v = obj.get('name', value='default')
self.assertEqual('n', v)
def test_get_default_with_patch(self):
obj = TestObjectDict(name='v')
fname = 'designate.objects.base.DesignateObject.obj_attr_is_set'
with mock.patch(fname) as attr_is_set:
attr_is_set.return_value = False
v = obj.get('name', value='default')
self.assertEqual('default', v)
def test_iteritems(self):
obj = TestObjectDict(name=None, id=1)
items = tuple(obj.items())
self.assertEqual(
[('id', 1), ('name', None)],
sorted(items)
)
def test_jsonutils_to_primitive(self):
obj = TestObjectDict(name="foo")
dumped = jsonutils.to_primitive(obj, convert_instances=True)
self.assertIsInstance(dumped, dict)
self.assertEqual('foo', dumped['name'])
class ListObjectMixinTest(oslotest.base.BaseTestCase):
def test_from_primitive(self):
primitive = {
'designate_object.name': 'TestObjectList',
'designate_object.data': {
'objects': [
{'designate_object.changes': ['id'],
'designate_object.data': {'id': 'One'},
'designate_object.name': 'TestObject',
'designate_object.namespace': 'designate',
'designate_object.version': '1.0'},
{'designate_object.changes': ['id'],
'designate_object.data': {'id': 'Two'},
'designate_object.name': 'TestObject',
'designate_object.namespace': 'designate',
'designate_object.version': '1.0'},
],
},
'designate_object.changes': ['objects'],
'designate_object.namespace': 'designate',
'designate_object.version': '1.0',
}
obj = objects.DesignateObject.from_primitive(primitive)
self.assertEqual(2, len(obj))
self.assertEqual(2, len(obj.objects))
self.assertIsInstance(obj[0], TestObject)
self.assertIsInstance(obj[1], TestObject)
self.assertEqual('One', obj[0].id)
self.assertEqual('Two', obj[1].id)
def test_from_primitive_with_changes(self):
primitive = {
'designate_object.name': 'TestObjectList',
'designate_object.data': {
'objects': [
{'designate_object.changes': ['id'],
'designate_object.data': {'id': 'One'},
'designate_object.name': 'TestObject',
'designate_object.namespace': 'designate',
'designate_object.version': '1.0'},
{'designate_object.changes': ['id'],
'designate_object.data': {'id': 'Two'},
'designate_object.name': 'TestObject',
'designate_object.namespace': 'designate',
'designate_object.version': '1.0'},
],
},
'designate_object.changes': ['objects'],
'designate_object.namespace': 'designate',
'designate_object.version': '1.0',
}
obj = objects.DesignateObject.from_primitive(primitive)
self.assertEqual(2, len(obj))
self.assertEqual(2, len(obj.objects))
self.assertIsInstance(obj[0], TestObject)
self.assertIsInstance(obj[1], TestObject)
self.assertEqual('One', obj[0].id)
self.assertEqual('Two', obj[1].id)
self.assertEqual(1, len(obj.obj_what_changed()))
def test_from_primitive_no_changes(self):
primitive = {
'designate_object.name': 'TestObjectList',
'designate_object.data': {
'objects': [
{'designate_object.changes': [],
'designate_object.data': {'id': 'One'},
'designate_object.name': 'TestObject',
'designate_object.namespace': 'designate',
'designate_object.version': '1.0'},
{'designate_object.changes': [],
'designate_object.data': {'id': 'Two'},
'designate_object.name': 'TestObject',
'designate_object.namespace': 'designate',
'designate_object.version': '1.0'},
],
},
'designate_object.changes': [],
'designate_object.namespace': 'designate',
'designate_object.version': '1.0',
}
obj = objects.DesignateObject.from_primitive(primitive)
self.assertEqual(2, len(obj))
self.assertEqual(2, len(obj.objects))
self.assertIsInstance(obj[0], TestObject)
self.assertIsInstance(obj[1], TestObject)
self.assertEqual('One', obj[0].id)
self.assertEqual('Two', obj[1].id)
self.assertEqual(0, len(obj.obj_what_changed()))
def test_cast_to_list(self):
# Create a few objects
obj_one = TestObject()
obj_one.id = "One"
obj_two = TestObject()
obj_two.id = "Two"
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two])
expected = [obj_one, obj_two]
self.assertEqual(expected, list(obj))
def test_to_primitive(self):
# Create a few objects
obj_one = TestObject()
obj_one.id = "One"
obj_two = TestObject()
obj_two.id = "Two"
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two])
primitive = obj.to_primitive()
expected = {
'designate_object.name': 'TestObjectList',
'designate_object.data': {
'objects': [
{'designate_object.changes': ['id'],
'designate_object.data': {'id': 'One'},
'designate_object.name': 'TestObject',
'designate_object.namespace': 'designate',
'designate_object.version': '1.0'},
{'designate_object.changes': ['id'],
'designate_object.data': {'id': 'Two'},
'designate_object.name': 'TestObject',
'designate_object.namespace': 'designate',
'designate_object.version': '1.0'},
],
},
'designate_object.changes': ['objects'],
'designate_object.namespace': 'designate',
'designate_object.version': '1.0'
}
self.assertEqual(expected, primitive)
def test_to_primitive_nested_obj(self):
# Create a few objects
obj_one = TestObject()
obj_two = TestObject()
obj_two.id = "Two"
obj_one.nested = obj_two
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two])
primitive = obj.to_primitive()
expected = {
'designate_object.name': 'TestObjectList',
'designate_object.changes': ['objects'],
'designate_object.data': {
'objects': [
{'designate_object.changes': ['nested'],
'designate_object.data': {'nested':
{
'designate_object.changes': [
'id'],
'designate_object.data': {
'id': 'Two'},
'designate_object.name': 'TestObject',
'designate_object.namespace': 'designate',
'designate_object.version': '1.0'}},
'designate_object.name': 'TestObject',
'designate_object.namespace': 'designate',
'designate_object.version': '1.0'},
{'designate_object.changes': ['id'],
'designate_object.data': {'id': 'Two'},
'designate_object.name': 'TestObject',
'designate_object.namespace': 'designate',
'designate_object.version': '1.0'}]},
'designate_object.namespace': 'designate',
'designate_object.version': '1.0'}
self.assertEqual(expected, primitive)
def test_obj_what_changed(self):
# Create a few objects
obj_one = TestObject()
obj_two = TestObject()
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two])
# Make sure there are no changes
obj.obj_reset_changes()
changes = obj.obj_what_changed()
expected = set([])
self.assertEqual(expected, changes)
# Make some changes
obj_one.id = "One"
obj_two.id = "Two"
changes = obj.obj_what_changed()
expected = {'objects'}
self.assertEqual(expected, changes)
def test_get_slice(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_two = TestObject(id="Two")
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two])
theslice = obj[1:]
expected = TestObjectList(objects=[obj_two])
self.assertEqual(expected.objects, theslice.objects)
self.assertNotEqual(obj.objects, theslice.objects)
def test_setitem(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_two = TestObject(id="Two")
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two])
obj[1] = obj_one
self.assertEqual(obj.objects, [obj_one, obj_one])
def test_contains(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_two = TestObject(id="Two")
obj_three = TestObject(id="Three")
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two])
self.assertIn(obj_one, obj)
self.assertIn(obj_two, obj)
self.assertNotIn(obj_three, obj)
def test_extend(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_two = TestObject(id="Two")
obj_three = TestObject(id="Three")
# Create a ListObject
ext_obj = TestObjectList(objects=[obj_one])
obj = TestObjectList(objects=[obj_one, obj_two, obj_three])
ext_obj.extend([obj_two, obj_three])
self.assertEqual(obj.objects, ext_obj.objects)
def test_insert(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_two = TestObject(id="Two")
obj_three = TestObject(id="Three")
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_three])
obj.insert(1, obj_two)
self.assertEqual([obj_one, obj_two, obj_three], obj.objects)
def test_remove(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_two = TestObject(id="Two")
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two])
obj.remove(obj_one)
self.assertEqual([obj_two], obj.objects)
def test_index(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_two = TestObject(id="Two")
obj_three = TestObject(id="Three")
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two, obj_three])
self.assertEqual(1, obj.index(obj_two))
def test_count(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_two = TestObject(id="Two")
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two, obj_two])
self.assertEqual(2, obj.count(obj_two))
def test_sort(self):
# Create a few objects
obj_one = TestObject(id=1)
obj_two = TestObject(id=2)
obj_three = TestObject(id=3)
# Create a ListObject
obj = TestObjectList(objects=[obj_two, obj_three, obj_one])
obj.sort(key=attrgetter('id'))
self.assertEqual([obj_one, obj_two, obj_three], obj.objects)
def test_to_dict_list_mixin(self):
# Create a ListObject containing an ObjectList
obj = TestObjectList(objects=[TestObject()])
dict_ = obj.to_dict()
expected = {'objects': [{}]}
self.assertEqual(expected, dict_)
def test_to_list(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_three = TestObject(id="Three")
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_three])
li = obj.to_list()
self.assertEqual([{'id': 'One'}, {'id': 'Three'}], li)
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from nose.tools import assert_true, assert_equal, assert_false
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access
from hadoop import pseudo_hdfs4
from liboozie.oozie_api_tests import OozieServerProvider
from oozie.tests import OozieBase
from pig.models import create_or_update_script, PigScript
from pig.api import OozieApi, get
class TestPigBase(object):
SCRIPT_ATTRS = {
'id': 1000,
'name': 'Test',
'script': 'A = LOAD "$data"; STORE A INTO "$output";',
'parameters': [],
'resources': [],
'hadoopProperties': []
}
def setUp(self):
self.c = make_logged_in_client(is_superuser=False)
grant_access("test", "test", "pig")
self.user = User.objects.get(username='test')
def create_script(self):
return create_script(self.user)
def create_script(user, xattrs=None):
attrs = {'user': user}
attrs.update(TestPigBase.SCRIPT_ATTRS)
if xattrs is not None:
attrs.update(xattrs)
return create_or_update_script(**attrs)
def test_make_log_links():
# FileBrowser
assert_equal(
"""<a href="/filebrowser/view/user/romain/tmp" target="_blank">hdfs://localhost:8020/user/romain/tmp</a> <dir>""",
OozieApi._make_links('hdfs://localhost:8020/user/romain/tmp <dir>')
)
assert_equal(
"""<a href="/filebrowser/view/user/romain/tmp" target="_blank">hdfs://localhost:8020/user/romain/tmp</a><dir>""",
OozieApi._make_links('hdfs://localhost:8020/user/romain/tmp<dir>')
)
assert_equal(
"""output: <a href="/filebrowser/view/user/romain/tmp" target="_blank">/user/romain/tmp</a> <dir>""",
OozieApi._make_links('output: /user/romain/tmp <dir>')
)
assert_equal(
'Successfully read 3760 records (112648 bytes) from: "<a href="/filebrowser/view/user/hue/pig/examples/data/midsummer.txt" target="_blank">/user/hue/pig/examples/data/midsummer.txt</a>"',
OozieApi._make_links('Successfully read 3760 records (112648 bytes) from: "/user/hue/pig/examples/data/midsummer.txt"')
)
assert_equal(
'data,upper_case MAP_ONLY <a href="/filebrowser/view/user/romain/out/fffff" target="_blank">hdfs://localhost:8020/user/romain/out/fffff</a>,',
OozieApi._make_links('data,upper_case MAP_ONLY hdfs://localhost:8020/user/romain/out/fffff,')
)
assert_equal(
'MAP_ONLY <a href="/filebrowser/view/user/romain/out/fffff" target="_blank">hdfs://localhost:8020/user/romain/out/fffff</a>\n2013',
OozieApi._make_links('MAP_ONLY hdfs://localhost:8020/user/romain/out/fffff\n2013')
)
assert_equal(
' <a href="/filebrowser/view/jobs.tsv" target="_blank">/jobs.tsv</a> ',
OozieApi._make_links(' /jobs.tsv ')
)
assert_equal(
'<a href="/filebrowser/view/user/romain/job_pos_2012.tsv" target="_blank">hdfs://localhost:8020/user/romain/job_pos_2012.tsv</a>',
OozieApi._make_links('hdfs://localhost:8020/user/romain/job_pos_2012.tsv')
)
# JobBrowser
assert_equal(
"""<a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
OozieApi._make_links('job_201306261521_0058')
)
assert_equal(
"""Hadoop Job IDs executed by Pig: <a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
OozieApi._make_links('Hadoop Job IDs executed by Pig: job_201306261521_0058')
)
assert_equal(
"""MapReduceLauncher - HadoopJobId: <a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
OozieApi._make_links('MapReduceLauncher - HadoopJobId: job_201306261521_0058')
)
assert_equal(
"""- More information at: http://localhost:50030/jobdetails.jsp?jobid=<a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
OozieApi._make_links('- More information at: http://localhost:50030/jobdetails.jsp?jobid=job_201306261521_0058')
)
assert_equal(
""" Logging error messages to: job_201307091553_0028/attempt_201307091553_002""",
OozieApi._make_links(' Logging error messages to: job_201307091553_0028/attempt_201307091553_002')
)
assert_equal(
""" pig-job_201307091553_0028.log""",
OozieApi._make_links(' pig-job_201307091553_0028.log')
)
class TestMock(TestPigBase):
def test_create_script(self):
pig_script = self.create_script()
assert_equal('Test', pig_script.dict['name'])
def test_check_hcatalogs_sharelib(self):
api = get(None, None, self.user)
pig_script = self.create_script()
# Regular
wf = api._create_workflow(pig_script, '[]')
assert_false({'name': u'oozie.action.sharelib.for.pig', 'value': u'pig,hcatalog'} in wf.find_all_parameters(), wf.find_all_parameters())
# With HCat
pig_script.update_from_dict({
'script':"""
a = LOAD 'sample_07' USING org.apache.hcatalog.pig.HCatLoader();
dump a;
"""})
pig_script.save()
wf = api._create_workflow(pig_script, '[]')
assert_true({'name': u'oozie.action.sharelib.for.pig', 'value': u'pig,hcatalog'} in wf.find_all_parameters(), wf.find_all_parameters())
def test_editor_view(self):
response = self.c.get(reverse('pig:app'))
assert_true('Unsaved script' in response.content)
def test_save(self):
attrs = {'user': self.user,}
attrs.update(TestPigBase.SCRIPT_ATTRS)
attrs['parameters'] = json.dumps(TestPigBase.SCRIPT_ATTRS['parameters'])
attrs['resources'] = json.dumps(TestPigBase.SCRIPT_ATTRS['resources'])
attrs['hadoopProperties'] = json.dumps(TestPigBase.SCRIPT_ATTRS['hadoopProperties'])
# Save
self.c.post(reverse('pig:save'), data=attrs, follow=True)
# Update
self.c.post(reverse('pig:save'), data=attrs, follow=True)
def parse_oozie_logs(self):
api = get(None, None, self.user)
assert_equal(
'''Run pig script using PigRunner.run() for Pig version 0.8+
Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported)
compiled Jun 30 2013, 03:40:22
Run pig script using PigRunner.run() for Pig version 0.8+
2013-10-09 17:30:39,709 [main] INFO org.apache.pig.Main - Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported) compiled Jun 30 2013, 03:40:22
2013-10-09 17:30:39,709 [main] INFO org.apache.pig.Main - Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported) compiled Jun 30 2013, 03:40:22
2013-10-09 17:30:39,710 [main] INFO org.apache.pig.Main - Logging error messages to: /var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/pig-job_1381360805876_0001.log
2013-10-09 17:30:39,710 [main] INFO org.apache.pig.Main - Logging error messages to: /var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/pig-job_1381360805876_0001.log
2013-10-09 17:30:39,739 [main] WARN org.apache.hadoop.conf.Configuration - dfs.df.interval is deprecated. Instead, use fs.df.interval
2013-10-09 17:30:39,739 [main] WARN org.apache.hadoop.conf.Configuration - mapred.task.tracker.http.address is deprecated. Instead, use mapreduce.tasktracker.http.address
2013-10-09 17:30:39,833 [main] INFO org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to map-reduce job tracker at: localhost:8032
hdfs://localhost:8020/user/romain/.Trash <dir>
hdfs://localhost:8020/user/romain/examples <dir>
hdfs://localhost:8020/user/romain/tweets <dir>
hdfs://localhost:8020/user/romain/wordcount.jar<r 1> 3165
hdfs://localhost:8020/user/romain/words <dir>
hdfs://localhost:8020/user/romain/yelp <dir>''', api._match_logs({'logs': [None, OOZIE_LOGS]}))
class TestWithHadoop(OozieBase):
def setUp(self):
super(TestWithHadoop, self).setUp()
self.c = make_logged_in_client(is_superuser=False)
grant_access("test", "test", "pig")
self.user = User.objects.get(username='test')
self.c.post(reverse('pig:install_examples'))
def test_create_workflow(self):
cluster = pseudo_hdfs4.shared_cluster()
api = OozieApi(cluster.fs, cluster.jt, self.user)
xattrs = {
'parameters': [
{'name': 'output', 'value': '/tmp'},
{'name': '-param', 'value': 'input=/data'}, # Alternative way for params
{'name': '-optimizer_off', 'value': 'SplitFilter'},
{'name': '-v', 'value': ''},
],
'resources': [
{'type': 'file', 'value': '/tmp/file'},
{'type': 'archive', 'value': '/tmp/file.zip'},
],
'hadoopProperties': [
{'name': 'mapred.map.tasks.speculative.execution', 'value': 'false'},
{'name': 'mapred.job.queue', 'value': 'fast'},
]
}
pig_script = create_script(self.user, xattrs)
params = json.dumps([
{'name': 'output', 'value': '/tmp2'},
])
workflow = api._create_workflow(pig_script, params)
pig_action = workflow.start.get_child('to').get_full_node()
assert_equal([
{u'type': u'argument', u'value': u'-param'}, {u'type': u'argument', u'value': u'output=/tmp2'},
{u'type': u'argument', u'value': u'-param'}, {u'type': u'argument', u'value': u'input=/data'},
{u'type': u'argument', u'value': u'-optimizer_off'}, {u'type': u'argument', u'value': u'SplitFilter'},
{u'type': u'argument', u'value': u'-v'},
], pig_action.get_params())
assert_equal([
{u'name': u'mapred.map.tasks.speculative.execution', u'value': u'false'},
{u'name': u'mapred.job.queue', u'value': u'fast'},
], pig_action.get_properties())
assert_equal(['/tmp/file'], pig_action.get_files())
assert_equal([
{u'dummy': u'', u'name': u'/tmp/file.zip'},
], pig_action.get_archives())
def wait_until_completion(self, pig_script_id, timeout=300.0, step=5, expected_status='SUCCEEDED'):
script = PigScript.objects.get(id=pig_script_id)
job_id = script.dict['job_id']
response = self.c.get(reverse('pig:watch', args=[job_id]))
response = json.loads(response.content)
start = time.time()
while response['workflow']['status'] in ['PREP', 'RUNNING'] and time.time() - start < timeout:
time.sleep(step)
response = self.c.get(reverse('pig:watch', args=[job_id]))
response = json.loads(response.content)
logs = OozieServerProvider.oozie.get_job_log(job_id)
if response['workflow']['status'] != expected_status:
msg = "[%d] %s took more than %d to complete or %s: %s" % (time.time(), job_id, timeout, response['workflow']['status'], logs)
raise Exception(msg)
return pig_script_id
def test_submit(self):
script = PigScript.objects.get(id=1100713)
script_dict = script.dict
post_data = {
'id': script.id,
'name': script_dict['name'],
'script': script_dict['script'],
'user': script.owner,
'parameters': json.dumps(script_dict['parameters']),
'resources': json.dumps(script_dict['resources']),
'hadoopProperties': json.dumps(script_dict['hadoopProperties']),
'submissionVariables': json.dumps([{"name": "output", "value": '/tmp/test_pig'}]),
}
response = self.c.post(reverse('pig:run'), data=post_data, follow=True)
job_id = json.loads(response.content)['id']
self.wait_until_completion(job_id)
def test_stop(self):
script = PigScript.objects.get(id=1100713)
script_dict = script.dict
post_data = {
'id': script.id,
'name': script_dict['name'],
'script': script_dict['script'],
'user': script.owner,
'parameters': json.dumps(script_dict['parameters']),
'resources': json.dumps(script_dict['resources']),
'hadoopProperties': json.dumps(script_dict['hadoopProperties']),
'submissionVariables': json.dumps([{"name": "output", "value": '/tmp/test_pig'}]),
}
submit_response = self.c.post(reverse('pig:run'), data=post_data, follow=True)
script = PigScript.objects.get(id=json.loads(submit_response.content)['id'])
assert_true(script.dict['job_id'], script.dict)
self.c.post(reverse('pig:stop'), data={'id': script.id}, follow=True)
self.wait_until_completion(json.loads(submit_response.content)['id'], expected_status='KILLED')
OOZIE_LOGS =""" Log Type: stdout
Log Length: 117627
Oozie Launcher starts
Heart beat
Starting the execution of prepare actions
Completed the execution of prepare actions successfully
Files in current dir:/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/.
======================
File: commons-cli-1.2.jar
File: antlr-runtime-3.4.jar
File: stringtemplate-3.2.1.jar
File: script.pig
File: jyson-1.0.2.jar
Oozie Java/Map-Reduce/Pig action launcher-job configuration
=================================================================
Workflow job id : 0000000-131009162028638-oozie-oozi-W
Workflow action id: 0000000-131009162028638-oozie-oozi-W@pig
Classpath :
------------------------
/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002
/etc/hadoop/conf
/usr/lib/hadoop/hadoop-nfs-2.1.0-cdh5.0.0-SNAPSHOT.jar
/usr/lib/hadoop/hadoop-common-2.1.0-cdh5.0.0-SNAPSHOT.jar
/usr/lib/hadoop/hadoop-auth-2.1.0-cdh5.0.0-SNAPSHOT.jar
/usr/lib/hadoop/hadoop-common.jar
/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/jyson-1.0.2.jar
------------------------
Main class : org.apache.oozie.action.hadoop.PigMain
Maximum output : 2048
Arguments :
Java System Properties:
------------------------
#
#Wed Oct 09 17:30:39 PDT 2013
java.runtime.name=Java(TM) SE Runtime Environment
awt.toolkit=sun.awt.X11.XToolkit
java.vm.info=mixed mode
java.version=1.7.0_40
java.ext.dirs=/usr/lib/jvm/java-7-oracle/jre/lib/ext\:/usr/java/packages/lib/ext
sun.boot.class.path=/usr/lib/jvm/java-7-oracle/jre/lib/resources.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/rt.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/sunrsasign.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/jsse.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/jce.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/charsets.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/jfr.jar\:/usr/lib/jvm/java-7-oracle/jre/classes
java.vendor=Oracle Corporation
file.separator=/
oozie.launcher.job.id=job_1381360805876_0001
oozie.action.stats.properties=/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/stats.properties
java.vendor.url.bug=http\://bugreport.sun.com/bugreport/
sun.io.unicode.encoding=UnicodeLittle
sun.cpu.endian=little
sun.cpu.isalist=
------------------------
=================================================================
>>> Invoking Main class now >>>
Oozie Pig action configuration
=================================================================
------------------------
Setting env property for mapreduce.job.credentials.binary to:/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/container_tokens
------------------------
pig.properties:
--------------------
mapreduce.job.ubertask.enable : false
yarn.resourcemanager.max-completed-applications : 10000
yarn.resourcemanager.delayed.delegation-token.removal-interval-ms : 30000
yarn.nodemanager.delete.debug-delay-sec : 0
hadoop.ssl.require.client.cert : false
dfs.datanode.max.transfer.threads : 4096
--------------------
Pig script [script.pig] content:
------------------------
ls
------------------------
Current (local) dir = /var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002
Pig command arguments :
-file
script.pig
-log4jconf
/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/piglog4j.properties
-logfile
pig-job_1381360805876_0001.log
=================================================================
>>> Invoking Pig command line now >>>
Run pig script using PigRunner.run() for Pig version 0.8+
Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported)
compiled Jun 30 2013, 03:40:22
Run pig script using PigRunner.run() for Pig version 0.8+
2013-10-09 17:30:39,709 [main] INFO org.apache.pig.Main - Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported) compiled Jun 30 2013, 03:40:22
2013-10-09 17:30:39,709 [main] INFO org.apache.pig.Main - Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported) compiled Jun 30 2013, 03:40:22
2013-10-09 17:30:39,710 [main] INFO org.apache.pig.Main - Logging error messages to: /var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/pig-job_1381360805876_0001.log
2013-10-09 17:30:39,710 [main] INFO org.apache.pig.Main - Logging error messages to: /var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/pig-job_1381360805876_0001.log
2013-10-09 17:30:39,739 [main] WARN org.apache.hadoop.conf.Configuration - dfs.df.interval is deprecated. Instead, use fs.df.interval
2013-10-09 17:30:39,739 [main] WARN org.apache.hadoop.conf.Configuration - mapred.task.tracker.http.address is deprecated. Instead, use mapreduce.tasktracker.http.address
2013-10-09 17:30:39,833 [main] INFO org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to map-reduce job tracker at: localhost:8032
hdfs://localhost:8020/user/romain/.Trash <dir>
hdfs://localhost:8020/user/romain/examples <dir>
hdfs://localhost:8020/user/romain/tweets <dir>
hdfs://localhost:8020/user/romain/wordcount.jar<r 1> 3165
hdfs://localhost:8020/user/romain/words <dir>
hdfs://localhost:8020/user/romain/yelp <dir>
<<< Invocation of Pig command completed <<<
Hadoop Job IDs executed by Pig:
<<< Invocation of Main class completed <<<
Oozie Launcher ends
2013-10-09 17:30:40,009 [main] INFO org.apache.hadoop.mapred.Task - Task:attempt_1381360805876_0001_m_000000_0 is done. And is in the process of committing
2013-10-09 17:30:40,087 [main] INFO org.apache.hadoop.mapred.Task - Task attempt_1381360805876_0001_m_000000_0 is allowed to commit now
2013-10-09 17:30:40,094 [main] INFO org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter - Saved output of task 'attempt_1381360805876_0001_m_000000_0' to hdfs://localhost:8020/user/romain/oozie-oozi/0000000-131009162028638-oozie-oozi-W/pig--pig/output/_temporary/1/task_1381360805876_0001_m_000000
2013-10-09 17:30:40,153 [main] INFO org.apache.hadoop.mapred.Task - Task 'attempt_1381360805876_0001_m_000000_0' done.
2013-10-09 17:30:40,254 [main] INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl - Stopping MapTask metrics system...
2013-10-09 17:30:40,257 [main] INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl - MapTask metrics system stopped.
2013-10-09 17:30:40,257 [main] INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl - MapTask metrics system shutdown complete.
"""
|
|
# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import inspect
from collections import namedtuple
from thrift import Thrift
from tornado import gen
from tchannel.errors import OneWayNotSupportedError
from ..serializer.thrift import ThriftSerializer
from .reflection import get_service_methods
# Generated clients will use this base class.
_ClientBase = namedtuple(
'_ClientBase',
'tchannel hostport service trace protocol_headers'
)
def client_for(service, service_module, thrift_service_name=None):
"""Build a client class for the given Thrift service.
The generated class accepts a TChannel and an optional hostport as
initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered with
Hyperbahn under the name "comment", here's how this may be used:
.. code-block:: python
from comment import CommentService
CommentServiceClient = client_for("comment", CommentService)
@gen.coroutine
def post_comment(articleId, msg, hostport=None):
client = CommentServiceClient(tchannel, hostport)
yield client.postComment(articleId, CommentService.Comment(msg))
:param service:
Name of the Hyperbahn service being called. This is the name with
which the service registered with Hyperbahn.
:param service_module:
The Thrift-generated module for that service. This usually has the
same name as defined for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use this
parameter to specify it.
:returns:
An object with the same interface as the service that uses the given
TChannel to call the service.
"""
assert service_module, 'service_module is required'
service = service or '' # may be blank for non-hyperbahn use cases
if not thrift_service_name:
thrift_service_name = service_module.__name__.rsplit('.', 1)[-1]
method_names = get_service_methods(service_module.Iface)
def new(cls, tchannel, hostport=None, trace=False, protocol_headers=None):
"""
:param tchannel:
TChannel through which the requests will be sent.
:param hostport:
Address of the machine to which the requests will be sent, or None
if the TChannel will do peer selection on a per-request basis.
:param trace:
Whether Zipkin tracing is enabled.
:param protocol_headers:
Protocol-level headers to send with the request.
"""
protocol_headers = protocol_headers or {}
protocol_headers['as'] = 'thrift'
return _ClientBase.__new__(
cls, tchannel, hostport, service, trace, protocol_headers
)
new.__name__ = '__new__'
methods = {'__new__': new}
for method_name in method_names:
methods[method_name] = generate_method(
service_module, thrift_service_name, method_name
)
return type(thrift_service_name + 'Client', (_ClientBase,), methods)
def generate_method(service_module, service_name, method_name):
"""Generate a method for the given Thrift service.
:param service_module:
Thrift-generated service module
:param service_name:
Name of the Thrift service
:param method_name:
Method being called
"""
assert service_module
assert service_name
assert method_name
args_type = getattr(service_module, method_name + '_args')
result_type = getattr(service_module, method_name + '_result', None)
serializer = ThriftSerializer(result_type)
# oneway not currently supported
# TODO - write test for this
if result_type is None:
def not_supported(self, *args, **kwags):
raise OneWayNotSupportedError(
'TChannel+Thrift does not currently support oneway procedues'
)
return not_supported
result_spec = result_type.thrift_spec
# result_spec is a tuple of tuples in the form:
#
# (fieldId, fieldType, fieldName, ...)
#
# Where "..." is other information we don't care about right now.
#
# result_spec will be empty if there is no return value or exception for
# the method.
#
# Its first element, with field ID 0, contains the spec for the return
# value. It is None if the result type is void but the method may still
# throw exceptions.
#
# Elements after the first one are specs for the exceptions.
endpoint = '%s::%s' % (service_name, method_name)
@gen.coroutine
def send(self, *args, **kwargs):
params = inspect.getcallargs(
getattr(service_module.Iface, method_name), self, *args, **kwargs
)
params.pop('self') # self is already known
# $methodName_args is the implicit struct containing the various
# method parameters.
call_args = args_type()
for name, value in params.items():
setattr(call_args, name, value)
body = serializer.serialize_body(call_args)
header = serializer.serialize_header({})
response = yield self.tchannel.request(
hostport=self.hostport, service=self.service
).send(
arg1=endpoint,
arg2=header,
arg3=body, # body
headers=self.protocol_headers,
traceflag=self.trace
)
body = yield response.get_body()
call_result = serializer.deserialize_body(body)
if not result_spec:
# void return type and no exceptions allowed
raise gen.Return(None)
for exc_spec in result_spec[1:]:
# May have failed with an exception
exc = getattr(call_result, exc_spec[2])
if exc is not None:
raise exc
if result_spec[0]:
# Non-void return type. Return the result.
success = getattr(call_result, result_spec[0][2])
if success is not None:
raise gen.Return(success)
else:
# No return type specified and no exceptions raised.
raise gen.Return(None)
# Expected a result but nothing was present in the object. Something
# went wrong.
raise Thrift.TApplicationException(
Thrift.TApplicationException.MISSING_RESULT,
'%s failed: did not receive a result as expected' % method_name
)
# TODO: We should probably throw a custom exception instead.
send.__name__ = method_name
return send
|
|
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from datetime import timedelta
import uuid
import keystone.logic.types.auth as auth
import keystone.logic.types.atom as atom
import keystone.db.sqlalchemy.api as db_api
import keystone.db.sqlalchemy.models as db_models
import keystone.logic.types.fault as fault
import keystone.logic.types.tenant as tenants
import keystone.logic.types.user as users
class IDMService(object):
"This is the logical implemenation of the IDM service"
#
# Token Operations
#
def authenticate(self, credentials):
if not isinstance(credentials, auth.PasswordCredentials):
raise fault.BadRequestFault("Expecting Password Credentials!")
duser = db_api.user_get(credentials.username)
if duser == None:
raise fault.UnauthorizedFault("Unauthorized")
if not duser.enabled:
raise fault.UserDisabledFault("Your account has been disabled")
if duser.password != credentials.password:
raise fault.UnauthorizedFault("Unauthorized")
#
# Look for an existing token, or create one,
# TODO: Handle tenant/token search
#
# removing following code for multi-token
"""if not credentials.tenant_id:
dtoken = db_api.token_for_user(duser.id)
else:
dtoken = db_api.token_for_user_tenant(duser.id,
credentials.tenant_id)
"""
# added following code
dtoken = db_api.token_for_user_tenant(duser.id, credentials.tenant_id)
#---
if not dtoken or dtoken.expires < datetime.now():
dtoken = db_models.Token()
dtoken.token_id = str(uuid.uuid4())
dtoken.user_id = duser.id
if not duser.tenants:
raise fault.IDMFault("Strange: user %s is not associated "
"with a tenant!" % duser.id)
user = db_api.user_get_by_tenant(duser.id, credentials.tenant_id)
if not credentials.tenant_id or not user:
raise fault.ForbiddenFault("Error: user %s is "
"not associated "
"with a tenant! %s" % (duser.id,
credentials.tenant_id))
dtoken.tenant_id = credentials.tenant_id
#removing following code for multi token
"""else:
dtoken.tenant_id = duser.tenants[0].tenant_id"""
dtoken.expires = datetime.now() + timedelta(days=1)
db_api.token_create(dtoken)
return self.__get_auth_data(dtoken, duser)
def validate_token(self, admin_token, token_id, belongs_to=None):
self.__validate_token(admin_token)
if not token_id:
raise fault.UnauthorizedFault("Missing token")
(token, user) = self.__get_dauth_data(token_id)
if not token:
raise fault.UnauthorizedFault("Bad token, please reauthenticate")
if token.expires < datetime.now():
raise fault.ForbiddenFault("Token expired, please renew")
if not user.enabled:
raise fault.UserDisabledFault("The user %s has been disabled!"
% user.id)
return self.__get_auth_data(token, user)
def revoke_token(self, admin_token, token_id):
self.__validate_token(admin_token)
dtoken = db_api.token_get(token_id)
if not dtoken:
raise fault.ItemNotFoundFault("Token not found")
db_api.token_delete(token_id)
#
# Tenant Operations
#
def create_tenant(self, admin_token, tenant):
self.__validate_token(admin_token)
if not isinstance(tenant, tenants.Tenant):
raise fault.BadRequestFault("Expecting a Tenant")
if tenant.tenant_id == None:
raise fault.BadRequestFault("Expecting a unique Tenant Id")
if db_api.tenant_get(tenant.tenant_id) != None:
raise fault.TenantConflictFault(
"A tenant with that id already exists")
dtenant = db_models.Tenant()
dtenant.id = tenant.tenant_id
dtenant.desc = tenant.description
dtenant.enabled = tenant.enabled
db_api.tenant_create(dtenant)
return tenant
##
## GET Tenants with Pagination
##
def get_tenants(self, admin_token, marker, limit, url):
self.__validate_token(admin_token)
ts = []
dtenants = db_api.tenant_get_page(marker, limit)
for dtenant in dtenants:
ts.append(tenants.Tenant(dtenant.id,
dtenant.desc, dtenant.enabled))
prev, next = db_api.tenant_get_page_markers(marker, limit)
links = []
if prev:
links.append(atom.Link('prev', "%s?'marker=%s&limit=%s'" \
% (url, prev, limit)))
if next:
links.append(atom.Link('next', "%s?'marker=%s&limit=%s'" \
% (url, next, limit)))
return tenants.Tenants(ts, links)
def get_tenant(self, admin_token, tenant_id):
self.__validate_token(admin_token)
dtenant = db_api.tenant_get(tenant_id)
if not dtenant:
raise fault.ItemNotFoundFault("The tenant could not be found")
return tenants.Tenant(dtenant.id, dtenant.desc, dtenant.enabled)
def update_tenant(self, admin_token, tenant_id, tenant):
self.__validate_token(admin_token)
if not isinstance(tenant, tenants.Tenant):
raise fault.BadRequestFault("Expecting a Tenant")
dtenant = db_api.tenant_get(tenant_id)
if dtenant == None:
raise fault.ItemNotFoundFault("The tenant cloud not be found")
values = {'desc': tenant.description, 'enabled': tenant.enabled}
db_api.tenant_update(tenant_id, values)
return tenants.Tenant(dtenant.id, tenant.description, tenant.enabled)
def delete_tenant(self, admin_token, tenant_id):
self.__validate_token(admin_token)
dtenant = db_api.tenant_get(tenant_id)
if dtenant == None:
raise fault.ItemNotFoundFault("The tenant cloud not be found")
if not db_api.tenant_is_empty(tenant_id):
raise fault.ForbiddenFault("You may not delete a tenant that "
"contains users or groups")
db_api.tenant_delete(dtenant.id)
return None
#
# Tenant Group Operations
#
def create_tenant_group(self, admin_token, tenant, group):
self.__validate_token(admin_token)
if not isinstance(group, tenants.Group):
raise fault.BadRequestFault("Expecting a Group")
if tenant == None:
raise fault.BadRequestFault("Expecting a Tenant Id")
dtenant = db_api.tenant_get(tenant)
if dtenant == None:
raise fault.ItemNotFoundFault("The tenant not found")
if group.group_id == None:
raise fault.BadRequestFault("Expecting a Group Id")
if db_api.group_get(group.group_id) != None:
raise fault.TenantGroupConflictFault(
"A tenant group with that id already exists")
dtenant = db_models.Group()
dtenant.id = group.group_id
dtenant.desc = group.description
dtenant.tenant_id = tenant
db_api.tenant_group_create(dtenant)
return tenants.Group(dtenant.id, dtenant.desc, dtenant.tenant_id)
def get_tenant_groups(self, admin_token, tenant_id, marker, limit, url):
self.__validate_token(admin_token)
if tenant_id == None:
raise fault.BadRequestFault("Expecting a Tenant Id")
dtenant = db_api.tenant_get(tenant_id)
if dtenant == None:
raise fault.ItemNotFoundFault("The tenant not found")
ts = []
dtenantgroups = db_api.tenant_group_get_page(tenant_id, marker, limit)
for dtenantgroup in dtenantgroups:
ts.append(tenants.Group(dtenantgroup.id,
dtenantgroup.desc,
dtenantgroup.tenant_id))
prev, next = db_api.tenant_group_get_page_markers(tenant_id, marker,
limit)
links = []
if prev:
links.append(atom.Link('prev', "%s?'marker=%s&limit=%s'" \
% (url, prev, limit)))
if next:
links.append(atom.Link('next', "%s?'marker=%s&limit=%s'"\
% (url, next, limit)))
return tenants.Groups(ts, links)
def get_tenant_group(self, admin_token, tenant_id, group_id):
self.__validate_token(admin_token)
dtenant = db_api.tenant_get(tenant_id)
if dtenant == None:
raise fault.ItemNotFoundFault("The tenant not found")
dtenant = db_api.tenant_group_get(group_id, tenant_id)
if not dtenant:
raise fault.ItemNotFoundFault("The tenant group not found")
return tenants.Group(dtenant.id, dtenant.desc, dtenant.tenant_id)
def update_tenant_group(self, admin_token, tenant_id, group_id, group):
self.__validate_token(admin_token)
if not isinstance(group, tenants.Group):
raise fault.BadRequestFault("Expecting a Group")
True
dtenant = db_api.tenant_get(tenant_id)
if dtenant == None:
raise fault.ItemNotFoundFault("The tenant not found")
dtenant = db_api.tenant_group_get(group_id, tenant_id)
if not dtenant:
raise fault.ItemNotFoundFault("The tenant group not found")
if group_id != group.group_id:
raise fault.BadRequestFault("Wrong Data Provided,\
Group id not matching")
if str(tenant_id) != str(group.tenant_id):
raise fault.BadRequestFault("Wrong Data Provided,\
Tenant id not matching ")
values = {'desc': group.description}
db_api.tenant_group_update(group_id, tenant_id, values)
return tenants.Group(group_id, group.description, tenant_id)
def delete_tenant_group(self, admin_token, tenant_id, group_id):
self.__validate_token(admin_token)
dtenant = db_api.tenant_get(tenant_id)
if dtenant == None:
raise fault.ItemNotFoundFault("The tenant not found")
dtenant = db_api.tenant_group_get(group_id, tenant_id)
if not dtenant:
raise fault.ItemNotFoundFault("The tenant group not found")
if not db_api.tenant_group_is_empty(group_id):
raise fault.ForbiddenFault("You may not delete a tenant that "
"contains users or groups")
db_api.tenant_group_delete(group_id, tenant_id)
return None
def get_users_tenant_group(self, admin_token, tenantId, groupId, marker,
limit, url):
self.__validate_token(admin_token)
if tenantId == None:
raise fault.BadRequestFault("Expecting a Tenant Id")
if db_api.tenant_get(tenantId) == None:
raise fault.ItemNotFoundFault("The tenant not found")
if db_api.tenant_group_get(groupId, tenantId) == None:
raise fault.ItemNotFoundFault(
"A tenant group with that id not found")
ts = []
dgroupusers = db_api.users_tenant_group_get_page(groupId, marker,
limit)
for dgroupuser, dgroupuserAsso in dgroupusers:
ts.append(tenants.User(dgroupuser.id,
dgroupuser.email, dgroupuser.enabled,
tenantId, None))
links = []
if ts.__len__():
prev, next = db_api.users_tenant_group_get_page_markers(groupId,
marker, limit)
if prev:
links.append(atom.Link('prev', "%s?'marker=%s&limit=%s'" %
(url, prev, limit)))
if next:
links.append(atom.Link('next', "%s?'marker=%s&limit=%s'" %
(url, next, limit)))
return tenants.Users(ts, links)
def add_user_tenant_group(self, admin_token, tenant, group, user):
self.__validate_token(admin_token)
if db_api.tenant_get(tenant) == None:
raise fault.ItemNotFoundFault("The Tenant not found")
if db_api.group_get(group) == None:
raise fault.ItemNotFoundFault("The Group not found")
duser = db_api.user_get(user)
if duser == None:
raise fault.ItemNotFoundFault("The User not found")
if db_api.tenant_group_get(group, tenant) == None:
raise fault.ItemNotFoundFault("A tenant group with"
" that id not found")
if db_api.get_user_by_group(user, group) != None:
raise fault.UserGroupConflictFault(
"A user with that id already exists in group")
dusergroup = db_models.UserGroupAssociation()
dusergroup.user_id = user
dusergroup.group_id = group
db_api.user_tenant_group(dusergroup)
return tenants.User(duser.id, duser.email, duser.enabled,
tenant, group)
def delete_user_tenant_group(self, admin_token, tenant, group, user):
self.__validate_token(admin_token)
if db_api.tenant_get(tenant) == None:
raise fault.ItemNotFoundFault("The Tenant not found")
if db_api.group_get(group) == None:
raise fault.ItemNotFoundFault("The Group not found")
duser = db_api.user_get(user)
if duser == None:
raise fault.ItemNotFoundFault("The User not found")
if db_api.tenant_group_get(group, tenant) == None:
raise fault.ItemNotFoundFault("A tenant group with"
" that id not found")
if db_api.get_user_by_group(user, group) == None:
raise fault.ItemNotFoundFault("A user with that id "
"in a group not found")
db_api.user_tenant_group_delete(user, group)
return None
#
# Private Operations
#
def __get_dauth_data(self, token_id):
"""return token and user object for a token_id"""
token = None
user = None
if token_id:
token = db_api.token_get(token_id)
if token:
user = db_api.user_get(token.user_id)
return (token, user)
#
# User Operations
#
def create_user(self, admin_token, tenant_id, user):
self.__validate_token(admin_token)
print "@" * 80
print tenant_id
print user
dtenant = db_api.tenant_get(tenant_id)
if dtenant == None:
raise fault.UnauthorizedFault("Unauthorized")
if not dtenant.enabled:
raise fault.TenantDisabledFault("Your account has been disabled")
if not isinstance(user, users.User):
raise fault.BadRequestFault("Expecting a User")
if user.user_id == None:
raise fault.BadRequestFault("Expecting a unique User Id")
if db_api.user_get_by_tenant(user.user_id,tenant_id) != None:
raise fault.UserConflictFault(
"An user with that id already exists in the given tenant")
if db_api.user_get(user.user_id) != None:
raise fault.UserConflictFault(
"An user with that id already exists")
if db_api.user_get_email(user.email) != None:
raise fault.EmailConflictFault(
"Email already exists")
duser = db_models.User()
duser.id = user.user_id
duser.password = user.password
duser.email = user.email
duser.enabled = user.enabled
db_api.user_create(duser)
duser_tenant = db_models.UserTenantAssociation()
duser_tenant.user_id = user.user_id
duser_tenant.tenant_id = tenant_id
db_api.user_tenant_create(duser_tenant)
return user
def get_tenant_users(self, admin_token, tenant_id, marker, limit, url):
self.__validate_token(admin_token)
if tenant_id == None:
raise fault.BadRequestFault("Expecting a Tenant Id")
dtenant = db_api.tenant_get(tenant_id)
if dtenant is None:
raise fault.ItemNotFoundFault("The tenant not found")
if not dtenant.enabled:
raise fault.TenantDisabledFault("Your account has been disabled")
ts = []
dtenantusers = db_api.users_get_by_tenant_get_page(tenant_id, marker,
limit)
for dtenantuser, dtenantuserAsso in dtenantusers:
ts.append(users.User(None, dtenantuser.id, tenant_id,
dtenantuser.email, dtenantuser.enabled))
links = []
if ts.__len__():
prev, next = db_api.users_get_by_tenant_get_page_markers(tenant_id,
marker, limit)
if prev:
links.append(atom.Link('prev', "%s?'marker=%s&limit=%s'" %
(url, prev, limit)))
if next:
links.append(atom.Link('next', "%s?'marker=%s&limit=%s'" %
(url, next, limit)))
return users.Users(ts, links)
def get_user(self, admin_token, tenant_id, user_id):
self.__validate_token(admin_token)
dtenant = db_api.tenant_get(tenant_id)
if dtenant == None:
raise fault.UnauthorizedFault("Unauthorized")
if not dtenant.enabled:
raise fault.TenantDisabledFault("Your account has been disabled")
duser = db_api.user_get(user_id)
if not duser:
raise fault.ItemNotFoundFault("The user could not be found")
if not duser.enabled:
raise fault.UserDisabledFault("User has been disabled")
if len(duser.tenants) > 0:
tenant_user = duser.tenants[0].tenant_id
else:
tenant_user = tenant_id
ts = []
dusergroups = db_api.user_groups_get_all(user_id)
for dusergroup, dusergroupAsso in dusergroups:
ts.append(tenants.Group(dusergroup.id, dusergroup.tenant_id, None))
return users.User_Update(None, duser.id, tenant_user, duser.email,
duser.enabled, ts)
def update_user(self, admin_token, user_id, user, tenant_id):
self.__validate_token(admin_token)
dtenant = db_api.tenant_get(tenant_id)
if dtenant == None:
raise fault.UnauthorizedFault("Unauthorized")
if not dtenant.enabled:
raise fault.TenantDisabledFault("Your account has been disabled")
duser = db_api.user_get(user_id)
if not duser:
raise fault.ItemNotFoundFault("The user could not be found")
if not duser.enabled:
raise fault.UserDisabledFault("User has been disabled")
if not isinstance(user, users.User):
raise fault.BadRequestFault("Expecting a User")
if db_api.user_get_email(user.email) is not None:
raise fault.EmailConflictFault(
"Email already exists")
values = {'email': user.email}
db_api.user_update(user_id, values)
duser = db_api.user_get_update(user_id)
return users.User(duser.password, duser.id, tenant_id, duser.email,
duser.enabled)
def set_user_password(self, admin_token, user_id, user, tenant_id):
self.__validate_token(admin_token)
dtenant = db_api.tenant_get(tenant_id)
if dtenant == None:
raise fault.UnauthorizedFault("Unauthorized")
if not dtenant.enabled:
raise fault.TenantDisabledFault("Your account has been disabled")
duser = db_api.user_get(user_id)
if not duser:
raise fault.ItemNotFoundFault("The user could not be found")
if not duser.enabled:
raise fault.UserDisabledFault("User has been disabled")
if not isinstance(user, users.User):
raise fault.BadRequestFault("Expecting a User")
duser = db_api.user_get(user_id)
if duser == None:
raise fault.ItemNotFoundFault("The user could not be found")
values = {'password': user.password}
db_api.user_update(user_id, values)
return users.User_Update(user.password, None, None, None, None, None)
def enable_disable_user(self, admin_token, user_id, user, tenant_id):
self.__validate_token(admin_token)
dtenant = db_api.tenant_get(tenant_id)
if dtenant == None:
raise fault.UnauthorizedFault("Unauthorized")
if not dtenant.enabled:
raise fault.TenantDisabledFault("Your account has been disabled")
duser = db_api.user_get(user_id)
if not duser:
raise fault.ItemNotFoundFault("The user could not be found")
if not isinstance(user, users.User):
raise fault.BadRequestFault("Expecting a User")
duser = db_api.user_get(user_id)
if duser == None:
raise fault.ItemNotFoundFault("The user could not be found")
values = {'enabled': user.enabled}
db_api.user_update(user_id, values)
return users.User_Update(None, None, None, None, user.enabled, None)
def delete_user(self, admin_token, user_id, tenant_id):
self.__validate_token(admin_token)
dtenant = db_api.tenant_get(tenant_id)
if dtenant == None:
raise fault.UnauthorizedFault("Unauthorized")
if not dtenant.enabled:
raise fault.TenantDisabledFault("Your account has been disabled")
duser = db_api.user_get(user_id)
if not duser:
raise fault.ItemNotFoundFault("The user could not be found")
duser = db_api.user_get_by_tenant(user_id, tenant_id)
if not duser:
raise fault.ItemNotFoundFault("The user could not be "
"found under given tenant")
db_api.user_delete_tenant(user_id, tenant_id)
return None
def get_user_groups(self, admin_token, tenant_id, user_id, marker, limit,
url):
self.__validate_token(admin_token)
if tenant_id == None:
raise fault.BadRequestFault("Expecting a Tenant Id")
if db_api.tenant_get(tenant_id) == None:
raise fault.ItemNotFoundFault("The tenant not found")
if not db_api.tenant_get(tenant_id).enabled:
raise fault.TenantDisabledFault("Your account has been disabled")
ts = []
dusergroups = db_api.groups_get_by_user_get_page(user_id, marker,
limit)
for dusergroup, dusergroupAsso in dusergroups:
ts.append(tenants.Group(dusergroup.id, dusergroup.desc,
dusergroup.tenant_id))
links = []
if ts.__len__():
prev, next = db_api.groups_get_by_user_get_page_markers(user_id,
marker, limit)
if prev:
links.append(atom.Link('prev', "%s?'marker=%s&limit=%s'" %
(url, prev, limit)))
if next:
links.append(atom.Link('next', "%s?'marker=%s&limit=%s'" %
(url, next, limit)))
return tenants.Groups(ts, links)
def add_user_tenant(self, admin_token, user_id, tenant_id):
self.__validate_token(admin_token)
dtenant = db_api.tenant_get(tenant_id)
if dtenant == None:
raise fault.UnauthorizedFault("Unauthorized")
if not dtenant.enabled:
raise fault.TenantDisabledFault("Your account has been disabled")
if user_id == None:
raise fault.BadRequestFault("Expecting a unique User Id")
if db_api.user_get(user_id) is None:
raise fault.ItemNotFoundFault(
"user does not exists")
if db_api.user_get_by_tenant(user_id,tenant_id) != None:
raise fault.UserConflictFault(
"An user with that id already exists in the given tenant")
duser_tenant = db_models.UserTenantAssociation()
duser_tenant.user_id = user_id
duser_tenant.tenant_id = tenant_id
db_api.user_tenant_create(duser_tenant)
return None
#
# Global Group Operations
# TODO:(India Team) Rename functions
# and to maintain consistency
# with server.py
def __check_create_global_tenant(self):
dtenant = db_api.tenant_get('GlobalTenant')
if dtenant is None:
dtenant = db_models.Tenant()
dtenant.id = 'GlobalTenant'
dtenant.desc = 'GlobalTenant is Default tenant for global groups'
dtenant.enabled = True
db_api.tenant_create(dtenant)
return dtenant
def create_global_group(self, admin_token, group):
self.__validate_token(admin_token)
if not isinstance(group, tenants.GlobalGroup):
raise fault.BadRequestFault("Expecting a Group")
if group.group_id == None:
raise fault.BadRequestFault("Expecting a Group Id")
if db_api.group_get(group.group_id) != None:
raise fault.TenantGroupConflictFault(
"A tenant group with that id already exists")
gtenant = self.__check_create_global_tenant()
dtenant = db_models.Group()
dtenant.id = group.group_id
dtenant.desc = group.description
dtenant.tenant_id = gtenant.id
db_api.tenant_group_create(dtenant)
return tenants.GlobalGroup(dtenant.id, dtenant.desc, None)
def get_global_groups(self, admin_token, marker, limit, url):
self.__validate_token(admin_token)
gtenant = self.__check_create_global_tenant()
ts = []
dtenantgroups = db_api.tenant_group_get_page(gtenant.id, \
marker, limit)
for dtenantgroup in dtenantgroups:
ts.append(tenants.GlobalGroup(dtenantgroup.id,
dtenantgroup.desc))
prev, next = db_api.tenant_group_get_page_markers(gtenant.id,
marker, limit)
links = []
if prev:
links.append(atom.Link('prev', "%s?'marker=%s&limit=%s'" %
(url, prev, limit)))
if next:
links.append(atom.Link('next', "%s?'marker=%s&limit=%s'" %
(url, next, limit)))
return tenants.GlobalGroups(ts, links)
def get_global_group(self, admin_token, group_id):
self.__validate_token(admin_token)
gtenant = self.__check_create_global_tenant()
dtenant = db_api.tenant_get(gtenant.id)
if dtenant == None:
raise fault.ItemNotFoundFault("The Global tenant not found")
dtenant = db_api.tenant_group_get(group_id, gtenant.id)
if not dtenant:
raise fault.ItemNotFoundFault("The Global tenant group not found")
return tenants.GlobalGroup(dtenant.id, dtenant.desc)
def update_global_group(self, admin_token, group_id, group):
self.__validate_token(admin_token)
gtenant = self.__check_create_global_tenant()
if not isinstance(group, tenants.GlobalGroup):
raise fault.BadRequestFault("Expecting a Group")
dtenant = db_api.tenant_get(gtenant.id)
if dtenant == None:
raise fault.ItemNotFoundFault("The global tenant not found")
dtenant = db_api.tenant_group_get(group_id, gtenant.id)
if not dtenant:
raise fault.ItemNotFoundFault("The Global tenant group not found")
if group_id != group.group_id:
raise fault.BadRequestFault("Wrong Data Provided,"
"Group id not matching")
values = {'desc': group.description}
db_api.tenant_group_update(group_id, gtenant.id, values)
return tenants.GlobalGroup(group_id, group.description, gtenant.id)
def delete_global_group(self, admin_token, group_id):
self.__validate_token(admin_token)
gtenant = self.__check_create_global_tenant()
dtenant = db_api.tenant_get(gtenant.id)
if dtenant == None:
raise fault.ItemNotFoundFault("The global tenant not found")
dtenant = db_api.tenant_group_get(group_id, dtenant.id)
if not dtenant:
raise fault.ItemNotFoundFault("The global tenant group not found")
if not db_api.tenant_group_is_empty(group_id):
raise fault.ForbiddenFault("You may not delete a group that "
"contains users")
db_api.tenant_group_delete(group_id, gtenant.id)
return None
def get_users_global_group(self, admin_token, groupId, marker, limit, url):
self.__validate_token(admin_token)
gtenant = self.__check_create_global_tenant()
if gtenant.id == None:
raise fault.BadRequestFault("Expecting a global Tenant")
if db_api.tenant_get(gtenant.id) == None:
raise fault.ItemNotFoundFault("The global tenant not found")
if db_api.tenant_group_get(groupId, gtenant.id) == None:
raise fault.ItemNotFoundFault(
"A global tenant group with that id not found")
ts = []
dgroupusers = db_api.users_tenant_group_get_page(groupId, marker,
limit)
for dgroupuser, dgroupuserassoc in dgroupusers:
ts.append(tenants.User(dgroupuser.id, dgroupuser.email,
dgroupuser.enabled))
links = []
if ts.__len__():
prev, next = db_api.users_tenant_group_get_page_markers(groupId,
marker, limit)
if prev:
links.append(atom.Link('prev', "%s?'marker=%s&limit=%s'"
% (url, prev, limit)))
if next:
links.append(atom.Link('next', "%s?'marker=%s&limit=%s'"
% (url, next, limit)))
return tenants.Users(ts, links)
def add_user_global_group(self, admin_token, group, user):
self.__validate_token(admin_token)
gtenant = self.__check_create_global_tenant()
if db_api.tenant_get(gtenant.id) == None:
raise fault.ItemNotFoundFault("The Global Tenant not found")
if db_api.group_get(group) == None:
raise fault.ItemNotFoundFault("The Group not found")
duser = db_api.user_get(user)
if duser == None:
raise fault.ItemNotFoundFault("The User not found")
if db_api.tenant_group_get(group, gtenant.id) == None:
raise fault.ItemNotFoundFault("A global tenant group with"
" that id not found")
if db_api.get_user_by_group(user, group) != None:
raise fault.UserGroupConflictFault(
"A user with that id already exists in group")
dusergroup = db_models.UserGroupAssociation()
dusergroup.user_id = user
dusergroup.group_id = group
db_api.user_tenant_group(dusergroup)
return tenants.User(duser.id, duser.email, duser.enabled,
group_id=group)
def delete_user_global_group(self, admin_token, group, user):
self.__validate_token(admin_token)
gtenant = self.__check_create_global_tenant()
if db_api.tenant_get(gtenant.id) == None:
raise fault.ItemNotFoundFault("The Global Tenant not found")
if db_api.group_get(group) == None:
raise fault.ItemNotFoundFault("The Group not found")
duser = db_api.user_get(user)
if duser == None:
raise fault.ItemNotFoundFault("The User not found")
if db_api.tenant_group_get(group, gtenant.id) == None:
raise fault.ItemNotFoundFault("A global tenant group with "
"that id not found")
if db_api.get_user_by_group(user, group) == None:
raise fault.ItemNotFoundFault("A user with that id in a "
"group not found")
db_api.user_tenant_group_delete(user, group)
return None
#
def __get_auth_data(self, dtoken, duser):
"""return AuthData object for a token/user pair"""
token = auth.Token(dtoken.expires, dtoken.token_id)
gs = []
for ug in duser.groups:
dgroup = db_api.group_get(ug.group_id)
gs.append(auth.Group(dgroup.id, dgroup.tenant_id))
groups = auth.Groups(gs, [])
if len(duser.tenants) == 0:
raise fault.IDMFault("Strange: user %s is not associated "
"with a tenant!" % duser.id)
if not dtoken.tenant_id and \
db_api.user_get_by_tenant(duser.id, dtoken.tenant_id):
raise fault.IDMFault("Error: user %s is not associated "
"with a tenant! %s" % (duser.id,
dtoken.tenant_id))
user = auth.User(duser.id, dtoken.tenant_id, groups)
return auth.AuthData(token, user)
def __validate_token(self, token_id, admin=True):
if not token_id:
raise fault.UnauthorizedFault("Missing token")
(token, user) = self.__get_dauth_data(token_id)
if not token:
raise fault.ItemNotFoundFault("Bad token, please reauthenticate")
if token.expires < datetime.now():
raise fault.ForbiddenFault("Token expired, please renew")
if not user.enabled:
raise fault.UserDisabledFault("The user %s has been disabled!"
% user.id)
if admin:
for ug in user.groups:
if ug.group_id == "Admin":
return (token, user)
raise fault.UnauthorizedFault("You are not authorized "
"to make this call")
return (token, user)
|
|
import json
from datetime import datetime, timedelta
from test.factories import ProjectFactory, OrganizationFactory, IssueFactory
from test.harness import IntegrationTest
from app import db, Issue
class TestProjects(IntegrationTest):
def test_all_projects_order(self):
"""
Test that projects gets returned in order of last_updated
"""
ProjectFactory(name=u'Project 1', last_updated='Mon, 01 Jan 2010 00:00:00 GMT')
ProjectFactory(name=u'Project 2', last_updated='Tue, 01 Jan 2011 00:00:00 GMT')
ProjectFactory(name=u'Non Github Project', last_updated='Wed, 01 Jan 2013 00:00:00', github_details=None)
ProjectFactory(name=u'Project 3', last_updated='Thu, 01 Jan 2014 00:00:00 GMT')
db.session.commit()
response = self.app.get('/api/projects')
response = json.loads(response.data)
self.assertEqual(response['objects'][0]['name'], u'Project 3')
self.assertEqual(response['objects'][1]['name'], u'Non Github Project')
self.assertEqual(response['objects'][2]['name'], u'Project 2')
self.assertEqual(response['objects'][3]['name'], u'Project 1')
def test_projects(self):
ProjectFactory()
db.session.commit()
response = self.app.get('/api/projects')
response = json.loads(response.data)
assert isinstance(response, dict)
assert isinstance(response['pages'], dict)
assert isinstance(response['total'], int)
assert isinstance(response['objects'], list)
assert isinstance(response['objects'][0]['categories'], unicode)
assert isinstance(response['objects'][0]['tags'], unicode)
assert isinstance(response['objects'][0]['code_url'], unicode)
assert isinstance(response['objects'][0]['description'], unicode)
assert isinstance(response['objects'][0]['github_details'], dict)
assert isinstance(response['objects'][0]['id'], int)
assert isinstance(response['objects'][0]['api_url'], unicode)
assert isinstance(response['objects'][0]['link_url'], unicode)
assert isinstance(response['objects'][0]['name'], unicode)
assert isinstance(response['objects'][0]['organization'], dict)
assert isinstance(response['objects'][0]['organization_name'], unicode)
assert isinstance(response['objects'][0]['type'], unicode)
assert isinstance(response['objects'][0]['status'], unicode)
assert isinstance(response['objects'][0]['languages'], list)
def test_project_search_nonexisting_text(self):
''' Searching for non-existing text in the project and org/project
endpoints returns no results
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'Coder')
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(project_response['total'], 0)
self.assertEqual(len(project_response['objects']), 0)
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(org_project_response['total'], 0)
self.assertEqual(len(org_project_response['objects']), 0)
def test_project_search_existing_text(self):
''' Searching for existing text in the project and org/project endpoints
returns expected results
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby')
ProjectFactory(organization_name=organization.name, description=u'python')
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(project_response['total'], 1)
self.assertEqual(len(project_response['objects']), 1)
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(org_project_response['total'], 1)
self.assertEqual(len(org_project_response['objects']), 1)
def test_project_search_existing_phrase(self):
''' Searching for an existing phrase in the project and org/project endpoints
returns expected results
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby on rails')
ProjectFactory(organization_name=organization.name, description=u'i love lamp')
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby on rails')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(project_response['total'], 1)
self.assertEqual(len(project_response['objects']), 1)
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby on rails')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(org_project_response['total'], 1)
self.assertEqual(len(org_project_response['objects']), 1)
def test_project_search_existing_part_of_phrase(self):
''' Searching for a partial phrase in the project and org/project endpoints
returns expected results
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby on rails')
ProjectFactory(organization_name=organization.name, description=u'i love lamp')
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(project_response['total'], 1)
self.assertEqual(len(project_response['objects']), 1)
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(org_project_response['total'], 1)
self.assertEqual(len(org_project_response['objects']), 1)
def test_project_search_nonexisting_phrase(self):
''' Searching for a term that is not part of an existing phrase in the project and
org/project endpoints returns no results
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby on rails')
db.session.commit()
project_response = self.app.get('/api/projects?q=joomla')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(project_response['total'], 0)
self.assertEqual(len(project_response['objects']), 0)
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=joomla')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(org_project_response['total'], 0)
self.assertEqual(len(org_project_response['objects']), 0)
def test_project_search_order_by_relevance(self):
''' Search results from the project and org/project endpoints are returned
in order of relevance
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby ruby ruby ruby ruby', last_updated=datetime.now() - timedelta(10))
ProjectFactory(organization_name=organization.name, description=u'ruby', last_updated=datetime.now() - timedelta(1))
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(len(project_response["objects"]), 2)
self.assertEqual(project_response['objects'][0]['description'], 'ruby ruby ruby ruby ruby')
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(len(org_project_response["objects"]), 2)
self.assertEqual(org_project_response['objects'][0]['description'], 'ruby ruby ruby ruby ruby')
def test_project_search_order_by_relevance_requested(self):
''' Search results from the project and org/project endpoints are returned
in order of relevance when explicitly requested
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby ruby ruby ruby ruby', last_updated=datetime.now() - timedelta(10))
ProjectFactory(organization_name=organization.name, description=u'ruby', last_updated=datetime.now() - timedelta(1))
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby&sort_by=relevance')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(len(project_response["objects"]), 2)
self.assertEqual(project_response['objects'][0]['description'], 'ruby ruby ruby ruby ruby')
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby&sort_by=relevance')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(len(org_project_response["objects"]), 2)
self.assertEqual(org_project_response['objects'][0]['description'], 'ruby ruby ruby ruby ruby')
def test_project_search_order_by_last_updated(self):
''' Search results from the project and org/project endpoints are returned
in order of last_updated, if requested
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby ruby ruby ruby ruby', last_updated=datetime.now() - timedelta(10))
ProjectFactory(organization_name=organization.name, description=u'ruby', last_updated=datetime.now() - timedelta(1))
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby&sort_by=last_updated')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(len(project_response["objects"]), 2)
self.assertEqual(project_response['objects'][0]['description'], 'ruby')
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby&sort_by=last_updated')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(len(org_project_response["objects"]), 2)
self.assertEqual(org_project_response['objects'][0]['description'], 'ruby')
def test_project_search_order_by_last_updated_sort_desc(self):
''' Search results from the project and org/project endpoints are returned
in descending order of last_updated, if requested
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby ruby ruby ruby ruby', last_updated=datetime.now() - timedelta(10))
ProjectFactory(organization_name=organization.name, description=u'ruby', last_updated=datetime.now() - timedelta(1))
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby&sort_by=last_updated&sort_dir=desc')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(len(project_response["objects"]), 2)
self.assertEqual(project_response['objects'][0]['description'], 'ruby')
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby&sort_by=last_updated&sort_dir=desc')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(len(org_project_response["objects"]), 2)
self.assertEqual(org_project_response['objects'][0]['description'], 'ruby')
def test_project_search_order_by_last_updated_sort_asc(self):
''' Search results from the project and org/project endpoints are returned
in ascending order of last_updated, if requested
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby ruby ruby ruby ruby', last_updated=datetime.now() - timedelta(10))
ProjectFactory(organization_name=organization.name, description=u'ruby', last_updated=datetime.now() - timedelta(1))
db.session.commit()
project_response = self.app.get('/api/projects?q=ruby&sort_by=last_updated&sort_dir=asc')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(len(project_response["objects"]), 2)
self.assertEqual(project_response['objects'][0]['description'], 'ruby ruby ruby ruby ruby')
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby&sort_by=last_updated&sort_dir=asc')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(len(org_project_response["objects"]), 2)
self.assertEqual(org_project_response['objects'][0]['description'], 'ruby ruby ruby ruby ruby')
def test_project_search_ranked_order(self):
''' Search results from the project and org/project endpoints are returned
with correct ranking values
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, status='TEST', last_updated=datetime.now() - timedelta(10000))
ProjectFactory(organization_name=organization.name, description='testing a new thing', last_updated=datetime.now() - timedelta(1))
ProjectFactory(organization_name=organization.name, tags='test,tags,what,ever', last_updated=datetime.now() - timedelta(100))
ProjectFactory(organization_name=organization.name, last_updated=datetime.now())
db.session.commit()
project_response = self.app.get('/api/projects?q=TEST')
project_response = json.loads(project_response.data)
self.assertEqual(project_response['total'], 3)
self.assertEqual(project_response['objects'][0]['status'], 'TEST')
self.assertEqual(project_response['objects'][1]['tags'], 'test,tags,what,ever')
self.assertEqual(project_response['objects'][2]['description'], 'testing a new thing')
def test_project_return_only_ids(self):
''' Search results from the project and org/project endpoints are returned
as only IDs if requested
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
project_one = ProjectFactory(organization_name=organization.name, description=u'ruby ruby ruby ruby ruby', last_updated=datetime.now() - timedelta(10))
project_two = ProjectFactory(organization_name=organization.name, description=u'ruby', last_updated=datetime.now() - timedelta(1))
db.session.commit()
project_one_id = project_one.id
project_two_id = project_two.id
project_response = self.app.get('/api/projects?q=ruby&only_ids=true')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(len(project_response["objects"]), 2)
assert isinstance(project_response['objects'][0], int)
assert isinstance(project_response['objects'][1], int)
self.assertEqual(project_response['objects'][0], project_one_id)
self.assertEqual(project_response['objects'][1], project_two_id)
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=ruby&only_ids=true')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(len(org_project_response["objects"]), 2)
assert isinstance(org_project_response['objects'][0], int)
assert isinstance(org_project_response['objects'][1], int)
self.assertEqual(org_project_response['objects'][0], project_one_id)
self.assertEqual(org_project_response['objects'][1], project_two_id)
def test_project_search_empty_string(self):
''' Searching an empty string on the project and org/project endpoints returns all projects
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby ruby ruby ruby ruby', last_updated=datetime.now() - timedelta(10))
ProjectFactory(organization_name=organization.name, description=u'ruby', last_updated=datetime.now() - timedelta(1))
db.session.commit()
project_response = self.app.get('/api/projects?q=')
project_response = json.loads(project_response.data)
assert isinstance(project_response['total'], int)
assert isinstance(project_response['objects'], list)
self.assertEqual(project_response['total'], 2)
self.assertEqual(len(project_response['objects']), 2)
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=')
org_project_response = json.loads(org_project_response.data)
assert isinstance(org_project_response['total'], int)
assert isinstance(org_project_response['objects'], list)
self.assertEqual(org_project_response['total'], 2)
self.assertEqual(len(org_project_response['objects']), 2)
def test_project_search_tsv_body_not_in_response(self):
''' The tsv_body field is not in the response from the project and org/project endpoints
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, description=u'ruby ruby ruby ruby ruby', last_updated=datetime.now() - timedelta(10))
ProjectFactory(organization_name=organization.name, description=u'ruby', last_updated=datetime.now() - timedelta(1))
db.session.commit()
project_response = self.app.get('/api/projects?q=')
project_response = json.loads(project_response.data)
self.assertEqual(len(project_response['objects']), 2)
self.assertFalse('tsv_body' in project_response['objects'][0])
self.assertFalse('tsv_body' in project_response['objects'][1])
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=')
org_project_response = json.loads(org_project_response.data)
self.assertEqual(len(org_project_response['objects']), 2)
self.assertFalse('tsv_body' in org_project_response['objects'][0])
self.assertFalse('tsv_body' in org_project_response['objects'][1])
def test_project_orgs_dont_include_tsv(self):
OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=u"Code for San Francisco")
db.session.commit()
response = self.app.get('/api/projects')
response = json.loads(response.data)
self.assertFalse('tsv_body' in response['objects'][0]['organization'])
def test_project_search_includes_status(self):
''' The status field is included in search results from the project and org/project endpoints
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, status=u'Beta')
ProjectFactory(organization_name=organization.name, status=u'Alpha')
db.session.commit()
project_response = self.app.get('/api/projects?q=alpha')
project_response = json.loads(project_response.data)
self.assertEqual(len(project_response['objects']), 1)
self.assertEqual(project_response['objects'][0]['status'], 'Alpha')
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=alpha')
org_project_response = json.loads(org_project_response.data)
self.assertEqual(len(org_project_response['objects']), 1)
self.assertEqual(org_project_response['objects'][0]['status'], 'Alpha')
def test_project_search_includes_name(self):
''' The name field is included in search results from the project and org/project endpoints
'''
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, name=u'My Cool Project')
ProjectFactory(organization_name=organization.name, name=u'My Dumb Project')
db.session.commit()
project_response = self.app.get('/api/projects?q=cool')
project_response = json.loads(project_response.data)
self.assertEqual(len(project_response['objects']), 1)
self.assertEqual(project_response['objects'][0]['name'], 'My Cool Project')
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=cool')
org_project_response = json.loads(org_project_response.data)
self.assertEqual(len(org_project_response['objects']), 1)
self.assertEqual(org_project_response['objects'][0]['name'], 'My Cool Project')
def test_project_search_includes_tags(self):
"""
The tags field is included in search results from the project and org/project endpoints
"""
organization = OrganizationFactory(name=u"Code for San Francisco")
ProjectFactory(organization_name=organization.name, tags=u'mapping, philly')
ProjectFactory(organization_name=organization.name, tags=u'food stamps, health')
db.session.commit()
project_response = self.app.get('/api/projects?q=stamps')
project_response = json.loads(project_response.data)
self.assertEqual(len(project_response['objects']), 1)
self.assertEqual(project_response['objects'][0]['tags'], 'food stamps, health')
org_project_response = self.app.get('/api/organizations/Code-for-San-Francisco/projects?q=stamps')
org_project_response = json.loads(org_project_response.data)
self.assertEqual(len(org_project_response['objects']), 1)
self.assertEqual(org_project_response['objects'][0]['tags'], 'food stamps, health')
def test_project_query_filter(self):
'''
Test that project query params work as expected.
'''
brigade = OrganizationFactory(name=u'Whatever', type=u'Brigade')
brigade_somewhere_far = OrganizationFactory(name=u'Brigade Organization', type=u'Brigade, Code for All')
web_project = ProjectFactory(name=u'Random Web App', type=u'web service')
other_web_project = ProjectFactory(name=u'Random Web App 2', type=u'web service', description=u'Another')
non_web_project = ProjectFactory(name=u'Random Other App', type=u'other service')
web_project.organization = brigade
non_web_project.organization = brigade_somewhere_far
db.session.add(web_project)
db.session.add(non_web_project)
db.session.commit()
response = self.app.get('/api/projects?type=web%20service')
self.assertEqual(response.status_code, 200)
response = json.loads(response.data)
self.assertEqual(response['total'], 2)
self.assertEqual(response['objects'][0]['name'], u'Random Web App')
self.assertEqual(response['objects'][1]['name'], u'Random Web App 2')
response = self.app.get('/api/projects?type=web%20service&description=Another')
self.assertEqual(response.status_code, 200)
response = json.loads(response.data)
self.assertEqual(response['total'], 1)
self.assertEqual(response['objects'][0]['name'], u'Random Web App 2')
response = self.app.get('/api/projects?type=different%20service')
self.assertEqual(response.status_code, 200)
response = json.loads(response.data)
self.assertEqual(response['total'], 0)
response = self.app.get('/api/projects?organization_type=Code+for+All')
self.assertEqual(response.status_code, 200)
response = json.loads(response.data)
self.assertEqual(response['total'], 1)
def test_project_cascading_deletes(self):
''' Test that issues get deleted when their parent
project and org is deleted
'''
# set up test objects and delete a project
organization = OrganizationFactory(name=u'TEST ORG')
db.session.flush()
project = ProjectFactory(organization_name=organization.name, name=u'TEST PROJECT')
db.session.flush()
issue = IssueFactory(title=u'TEST ISSUE', project_id=project.id)
another_issue = IssueFactory(title=u'ANOTHER TEST ISSUE', project_id=project.id)
a_third_issue = IssueFactory(title=u'A THIRD TEST ISSUE', project_id=project.id)
db.session.commit()
# make sure the issues are in the db
issues = db.session.query(Issue).all()
self.assertTrue(len(issues) == 3)
db.session.execute('DELETE FROM project')
db.session.commit()
issues = db.session.query(Issue).all()
self.assertFalse(len(issues))
# delete an organization
project = ProjectFactory(organization_name=organization.name, name=u'TEST PROJECT')
db.session.flush()
issue = IssueFactory(title=u'TEST ISSUE', project_id=project.id)
another_issue = IssueFactory(title=u'ANOTHER TEST ISSUE', project_id=project.id)
a_third_issue = IssueFactory(title=u'A THIRD TEST ISSUE', project_id=project.id)
db.session.commit()
# make sure the issues are in the db
issues = db.session.query(Issue).all()
self.assertTrue(len(issues) == 3)
db.session.execute('DELETE FROM organization')
db.session.commit()
issues = db.session.query(Issue).all()
self.assertFalse(len(issues))
|
|
#!/usr/bin/env python
"""Routine to make it easy to read command line arguments in a keyword=value
style format. Also has support for --help, help=, -h. Version keyword can
be printed with -v or --version.
lists can be given as e.g.: 1-3;5,6 which becomes: [[1,2,3],[5,6]]
File globbing and expansion of ~ is supported for strings
@filename will read filename and put each line as an element in a list
Available variables:
self.name # name of calling program
self.validkeys # list of allowed keywords given as strings
"""
#
# 15-mar-2003 Created as miriad.py PJT
# 16-apr-2003 added run,keyr,keyi,keya\n
# 05-mar-2004 Added help comments nemo style NLC
# 16-may-2004 Deleted all the code we don't use for map2 NLC
# 19-feb-2008 Changed keya, keyi, and keyr to not use the
# keypresent function. This allows the programmer to
# specify a default keyword value of nothing. However,
# error checking was added to keyini so that if the user
# tries to input an empty keyword on the command line,
# than an error message will still occur. Also, some cleanups
# to remove extraneous comments, combined the badkeyword
# function with keya, and added the keyf function which works
# the same as keyr.
# 07-may-2008 Cleanup and simplifying the code. Also made a blank keyword
# value return None or False. False is returned for keyb, all
# others return None
# 19-nov-2008 More code cleanup. Also added the write_keyval function and
# added spacing so the help text lines up with show_keyval
# 09-dec-2008 Sort the keywords when printing them
# 04-aug-2010 Added checking for required number of arguments in keyl
# 04-oct-2010 Added the check_existance and check_nonexistance. I may
# move these to optional keywords in keya()
# 20-oct-2010 check_existance and check_nonexistance can be called from
# keya() and keyl(). keyl() also has more smarts about ignoring
# comment lines and is more pythonic now.
# 8-may-2011 keyl() now accepts *,?, and [] shell-globbing now for input
# lists of files, and ~ for home directories. You can also
# make mixed lists of name, wildcards, tildes, and @files.
# 15-july-2011 keyl() will return a blank list instead of None.
# 18-july-2011 _at_file() will skip blank lines from input files
# 08-aug-2011 Just added some comments to various docstrings
# 26-sep-2011 Allow ranges to use dashes for keyl(val='i').
# 7-feb-2012 Two improvements. First, keyl() now allows semicolons to
# define nested lists. Second, writekeys() can format the
# output to 80 characters per line.
# 24-april-2012 show_keyval() will sort output by listing required keywords
# first, then remaining keywords
# 26 April 2012 check_existance() and check_nonexistance() will now allow
# input filenames to be a - or . for sys.stdin/sys.stdout and
# /dev/null. Also sort keywords in writekeys(), reorganized
# functions alphabetically, and deleted keyr().
# 16 Aug 2012 Added new arguments to check for min/max allowed values,
# and allowed options for keywords. Also made error(),
# warning(), check_existance(), and check_nonexistance()
# hidden functions. Lastly, commented out dprintf() since
# it shouldn't be part of this module. debug() statement
# might also disappear at a future date.
# 20 Aug 2012 Switch to raising errors rather than _error() function.
# Ditto for _warning(). Both have deprecation warnings
# printed when used.
# 24 Aug 2012 Reimplemented code as readcmd.py. Removed debug and dprintf,
# changed the way keywords specifications are done (see example
# in __main__).
# 6 Sep 2012 Fixed bug in getbool(). I forgot to return the value. Duh.
# 17 Sep 2012 Fixed bug with reading @files. Also fixed typo in naming
# of _checkMinMax()
# 6 Nov 2012 Sort validkeys
# 20 Nov 2012 Allowed values to have spaces, which should have been
# obvious from the start.
# 18 Dec 2012 Changed so format keyword in getkeys() is a number for line
# length, not just true/false
# 01 Apr 2013 Added ignorecase option to getstr() and getliststr() methods.
# 03 Apr 2013 I think it is fixed now so that 1e4 can be interpreted as
# an integer
# 16 Jul 2013 Added option to print out current time in getkeys(). Useful
# for writing history to a FITS header
# 28 Aug 2013 Added ability for getstr() to check if string is a file or
# directory. Haven't tested yet. Need to add to getliststr().
# 02 Dec 2013 Added error() and warning() methods. Since I seem to often
# import nlclib just for these two functions, why not have
# them here? Plus, the built-in argparse module has similar
# functions.
# 17 Feb 2014 Added support in getliststr() for type checking.
# 02 Apr 2014 Added support for version keyword
import glob,os,re,sys
import time as timemodule
class ReadCmd(object):
def __init__(self,spec,head=None):
"""Create ReadCmd object to read commandline with validation
spec = must be a multi-line string, list of strings, or a single
string specifying a filename.
head = pass header as a string here. Any string here will be
prepended to header parsed from spec."""
pattern0 = r"#(.*)" # full line comment
pattern1 = r"(\w+)\s*=(.+)" # key=value
pattern2 = r"(\w+)=(.+)" # key=value on command line
helpFlag = False # set to true if -h, --help, help=h on cmd line
versionFlag = False # set to true if -v or --version on cmd line
if isinstance(spec,str):
if os.path.isfile(spec): # read spec from a file
fp = open(spec,'r')
speclist = fp.readlines()
fp.close()
else:
speclist = spec.split('\n')
elif isinstance(spec,list) or isinstance(spec,tuple):
speclist = list(spec)
else:
self._error("TypeError: spec must be string, list, or tuple")
self.name = os.path.split(sys.argv[0])[1]
self.args = {}
if head is None:
self.head = [] # will hold head comments on usage
else:
self.head = [head]
for line in speclist: # first read spec file for defaults and help
if line.strip() == '': # skip blank lines
continue
m = re.match(pattern0,line.strip()) # look for comment lines
if m is not None:
self.head.append(m.group(1).strip())
else:
m = re.match(pattern1,line.strip())
if m is None: # didn't match
self._error("SyntaxError: Cannot read '%s' from spec" %line)
else:
key = m.group(1).strip()
junk = m.group(2).strip()
idx = junk.find('#')
if idx == -1: # no comment
value = junk
comment = ""
else:
n = junk.count('#')
if n == 1:
tmp = junk[:idx].strip()
if len(tmp) == 0: # no default value given
self._error("SyntaxError: Cannot read '%s' from spec" %line)
value = tmp
comment = junk[idx+1:].strip()
else: # n > 1
tmp = junk[:idx].strip()
if len(tmp) == 0: # first # sign is the value
value = '#'
else:
value = tmp
comment = junk[idx+1:].strip()
if self.args.has_key(key):
self._error("KeyError: Duplicate keyword '%s' in spec" %key)
self.args[key] = [value,comment]
self.validkeys = self.args.keys() # valid keywords
self.validkeys.sort()
if len(sys.argv) > 1: # stuff given on command line
junk = {} # will hold all keys read from command line
# now loop over command line and parse key=value pairs
for tmp in sys.argv[1:]:
if tmp in ['-h','--help','help=h']: # user wants help
helpFlag = True
elif tmp in ['-v','--version']: # user wants version number
versionFlag = True
else:
m = re.match(pattern2,tmp)
if m is None:
self._error("SyntaxError: Cannot read '%s'" %tmp)
key = m.group(1)
value = m.group(2)
if junk.has_key(key):
self._error("KeyError: Duplicate keyword '%s'" %key)
junk[key] = value
# now substitute command line keywords for defaults
for key in junk.iterkeys():
if key not in self.validkeys:
self._error("KeyError: Unknown keyword %s" %key)
self.args[key][0] = junk[key] # replace value, but not comment
if helpFlag:
print self,
sys.exit()
if versionFlag:
if 'version' in self.validkeys:
print(self.getstr('version'))
sys.exit()
self._checkRequired()
def error(self,msg):
"""Print a message to screen as a fatal error and quit"""
sys.stderr.write("### Fatal Error! %s\n" %msg)
sys.exit()
def getbool(self,key):
"""Return keyword value as a boolean True/False. A value of None returns
None.
Can understand True,False,1,0,yes,no, and None. Any capitalization
accepted (except for None).
key = keyword given as a string"""
self._checkKey(key)
temp = self.args[key][0]
try:
value = self._convertBool(temp)
return value
except ValueError:
self._error("ValueError: %s is not a valid boolean for keyword %s" %(temp,key))
def getfloat(self,key,min=None,max=None,option=None):
"""Return keyword value as a float. A value of None returns None.
key = keyword given as a string
min = check for minimum value
max = check for maximum value
option = list/tuple of allowed values"""
self._checkKey(key)
value = self.args[key][0]
if value == 'None':
return None
else:
try:
tmp = float(value)
if min is not None or max is not None:
self._checkMinMax(key,tmp,min,max)
if option is not None:
self._checkOption(key,tmp,option)
return tmp
except ValueError:
self._error("ValueError: %s is not a valid float for keyword %s" %(value,key))
def getint(self,key,min=None,max=None,option=None):
"""Return keyword value as integer. A value of None returns None.
key = keyword given as a string
min = check for minimum value
max = check for maximum value
option = list/tuple of allowed values"""
self._checkKey(key)
value = self.args[key][0]
if value == 'None':
return None
else:
try:
tmp = float(value)
if tmp%1 != 0:
raise ValueError
else:
tmp = int(tmp)
if min is not None or max is not None:
self._checkMinMax(key,tmp,min,max)
if option is not None:
self._checkOption(key,tmp,option)
return tmp
except ValueError:
self._error("ValueError: %s is not a valid integer for keyword %s" %(value,key))
def getkeys(self,comment='#',format=None,time=False):
"""Make a short string of all keyword=values.
Can format for 80 chars per line and also add a comment symbol
at the beginning of each line
comment = comment character or string for each line (can be None)
format = Can set to a number to limit line length to that no. of
chars
time = If set to true, include current time in returned string"""
keys = self.validkeys
if comment is None or comment == '':
commentchar = ''
else:
commentchar = "%s " %comment
outstr = ''
if time is True:
outstr += "%s%s\n" %(commentchar,timemodule.asctime())
outstr += "%s%s " %(commentchar,self.name)
if format is not None:
maxlen = format
else:
maxlen = 1e6
n = len(commentchar) + len(self.name) + 1 # don't count len(time)
for k in keys:
tmp = '%s=%s ' %(k,self.args[k][0])
n += len(tmp)
if format is not None and n > maxlen:
outstr += "\n%s" %commentchar
n = len(tmp) + len(commentchar)
outstr += tmp
outstr += "\n"
return outstr
def getlistbool(self,key,length=None):
"""Return keyword value as a list of booleans. A value of None returns
an empty list.
key = keyword given as a string
length = int/list/tuple of allowed number of elements in list"""
out = self._getlistbase(key,type=bool,length=length)
return out
def getlistfloat(self,key,length=None,min=None,max=None,option=None):
"""Return keyword value as a list of floats. A value of None returns an
empty list.
key = keyword given as a string
length = int/list/tuple of allowed number of elements in list
min = check for minimum value
max = check for maximum value
option = list/tuple of allowed values"""
out = self._getlistbase(key,type=float,length=length,min=min,max=max,
option=option)
return out
def getlistint(self,key,length=None,min=None,max=None,option=None):
"""Return keyword value as a list of integers. A value of None returns
an empty list.
key = keyword given as a string
length = int/list/tuple of allowed number of elements in list
min = check for minimum value
max = check for maximum value
option = list/tuple of allowed values"""
out = self._getlistbase(key,type=int,length=length,min=min,max=max,
option=option)
return out
def getliststr(self,key,comment='#',exist=None,length=None,option=None,
ignorecase=False,type=None):
"""Return keyword value as a list of strings. A value of None returns
an empty list.
key = keyword given as a string
comment = String character for comment lines to ignore in an @file
exist = Can check to make sure all all input files exist. Default is
to not check. Note, if you give an @file, then the @file
will always be checked for existance no matter what.
length = int/list/tuple of allowed number of elements in list
option = list/tuple of allowed values (for each element)
ignorecase = boolean on whether to ignore differences between
upper/lower case when checking options
type = set to 'file' to check if input is a file, or set to
'dir' to check if input is a directory. Only applies when
exist is also True"""
out = self._getlistbase(key,type=str,comment=comment,exist=exist,
length=length,option=option,ignorecase=ignorecase)
if exist is True: # filename must exist
if type is not None:
self._checkType(type,out)
return out
def getstr(self,key,exist=None,option=None,ignorecase=False,type=None):
"""Return keyword value as a string. A value of None returns None.
key = keyword given as a string
exist = Assume keyword references a filename, check for existance
or not (boolean)
option = str/list/tuple of allowed values.
ignorecase = boolean on whether to ignore differences between
upper/lower case when checking options
type = set to 'file' to check if input is a file, or set to
'dir' to check if input is a directory. Only applies when
exist is also True"""
self._checkKey(key)
value = self.args[key][0]
if value == 'None':
return None
if exist is True: # filename must exist
self._checkExist(value)
if type is not None:
self._checkType(type,value)
elif exist is False: # filename must NOT exist
self._checkNotExist(value)
if option is not None:
self._checkOption(key,value,option,ignorecase)
return value
def warning(self,msg):
"""Print a string of text as a warning"""
sys.stderr.write("### Warning! %s\n" %msg)
def __str__(self):
"""Print out the current keywords, their values and a help message, if
one is present."""
key1 = [] # keys with missing required arguments
key2 = [] # keys with optional/default arguments
maxlength = 0
for k,v in self.args.iteritems():
if k == 'version': # skip version keyword
continue
n = len(k+v[0])
if n > maxlength:
maxlength = n
if v[0] == '???':
key1.append(k)
else:
key2.append(k)
key1.sort()
key2.sort()
output = ""
for line in self.head:
output += "%s\n" %line
#output += "------------------------------------------------------------\n"
for k in key1: # loop over required keywords
junk = 3 + maxlength - len(k) - len(self.args[k][0])
space = ' '*junk
output += " %s=%s%s%s\n" %(k,self.args[k][0],space,self.args[k][1])
for k in key2: # loop over remaining keywords
junk = 3 + maxlength - len(k) - len(self.args[k][0])
space = ' '*junk
output += " %s=%s%s%s\n" %(k,self.args[k][0],space,self.args[k][1])
output = output[:-1] # strip off trailing \n
#output += "------------------------------------------------------------"
return output
def _atFile(self,filename,comment):
"""Tries to read an at-file, a file that contains keyword values.
Specified by key=@filename. It converts the file to a
string of comma separated values. Blank lines are skipped.
filename - string name of file. Assumes @ has been stripped off
comment - character to use on lines that should be ignored as comments"""
self._checkExist(filename)
fp = open(filename,'r')
tmp = [line.partition(comment)[0].strip() for line in fp] # no comments
data = [a for a in tmp if len(a) > 0] # skip blank lines
fp.close()
return data
def _checkExist(self,*files):
"""Given an input file name as a string or a list of filenames will check to
make sure each file exists. If not, a fatal error will occur using
error()
If filename is a dash or a period, does not check for existance. This
is because I allow dashes to be sys.stdin/sys.stdout, and period to
be /dev/null"""
if len(files) == 0:
self._error("IndexError: You must pass at least one argument to _checkExist()")
for f in files:
if isinstance(f,str): # check a single filename
if f == '-': # give a pass since it is sys.stdin
pass
elif not os.path.exists(f):
t = os.path.split(f)[1]
self._error("IOError: Required file %s is missing" %t)
elif isinstance(f,(list,tuple)): # a list or tuple
for a in f:
if a == '-': # give a pass since it is sys.stdin
pass
elif not os.path.exists(a):
t = os.path.split(a)[1]
self._error("IOError: Required file %s is missing" %t)
else:
self._error("TypeError: _checkExist() can only check types str,list, and tuple")
def _checkKey(self,key):
"""Check to see if key is part of self.validkeys."""
if key in self.validkeys:
pass
else:
self._error("KeyError: '%s' is not a valid keyword" %key)
def _checkMinMax(self,key,value,minval=None,maxval=None):
"""Check to see if value is within bounds set by minval and maxval"""
if minval is not None:
if value < minval:
self._error("ValueError: %s is < minimum value of %f" %(key,minval))
if maxval is not None:
if value > maxval:
self._error("ValueError: %s is > maximum value of %f" %(key,maxval))
def _checkNotExist(self,*files):
"""Given an input file list, will check to make sure each files does NOT
exist. If any one of the files exists, a fatal error will occur
using error()
If filename is a dash or a period, does not check for existance. This
is because I allow dashes to be sys.stdin/sys.stdout, and period to
be /dev/null"""
if len(files) == 0:
self._error("IndexError: You must pass at least one argument to _checkNotExist()")
for f in files:
if isinstance(f,str): # check a single filename
if f in ('.','-'): # give these a pass as described in docstring
pass
elif os.path.exists(f):
t = os.path.split(f)[1]
if os.path.isdir(f):
self._error("IOError: Directory '%s' already exists" %t)
else:
self._error("IOError: File '%s' already exists" %t)
elif isinstance(f,(list,tuple)): # a list
for a in f:
if a in ('.','-'): # give these a pass as described in docstring
pass
elif os.path.exists(a):
t = os.path.split(a)[1]
if os.path.isdir(a):
self._error("IOError: Directory '%s' already exists" %t)
else:
self._error("IOError: File '%s' already exists" %t)
else:
self._error("TypeError: _checkNotExist can only check types str,list, and tuple")
def _checkOption(self,key,value,option,ignorecase=False):
"""Check whether a value is among valid options"""
if ignorecase is True:
temp = [a.lower() for a in option]
if value.lower() in temp:
pass
else:
self._error("IndexError: Allowed options for key %s are %s" %(key,str(option)))
else:
if value in option:
pass
else:
self._error("IndexError: Allowed options for key %s are %s" %(key,str(option)))
def _checkRequired(self):
"""Checks to see that no blank values exist"""
usage = "Usage: %s " %self.name
missing = 0 # number of missing keywords
extraFlag = False # set to true if >= 1 keyword is not missing
for k in self.validkeys:
if self.args[k][0] == '???': # ??? means a required value
usage = usage + "%s=??? " %k
missing += 1
else:
extraFlag = True
if missing > 0:
if extraFlag is True:
usage = usage + "..."
self._error("KeyError: Missing Keywords: %s" %usage)
def _checkType(self,typestr,*files):
""""Given an input name as a string or a list of strings, will check to
make sure each is a directory using os.path.isdir() or regular
file, using os.path.isfile().
Assumes that names have already been checked by _checkExist. If name
is a dash or a period, does not check. This is because I allow dashes
to be sys.stdin/sys.stdout, and period to be /dev/null"""
if typestr not in ('file','dir'):
self._error("TypeError: _checkType() only checks file and dir types")
for f in files:
if isinstance(f,str): # check a single name
if f == '-' or f == '.':
pass
else:
if typestr == 'file':
if not os.path.isfile(f):
t = os.path.split(f)[1]
self._error("IOError: %s is not a regular file" %t)
elif typestr == 'dir':
if not os.path.isdir(f):
t = os.path.split(f)[1]
self._error("IOError: %s is not a directory" %t)
elif isinstance(f,(list,tuple)):
for a in f:
if f == '-' or f == '.':
pass
else:
if typestr == 'file':
if not os.path.isfile(a):
t = os.path.split(a)[1]
self._error("IOError: %s is not a regular file" %t)
elif typestr == 'dir':
if not os.path.isdir(a):
t = os.path.split(a)[1]
self._error("IOError: %s is not a directory" %t)
else:
self._error("TypeError: _checkType() can only check types str,list, and tuple")
def _convertBool(self,value):
"""Convert value to a Boolean. Accepts True, False, 1,0, yes, no, and
None. A value of None returns None."""
if value == 'None':
return None
if value.lower() in ('1','yes','true','y'):
return True
elif value.lower() in ('0','no','false','n'):
return False
else:
self._error("ValueError: '%s' is not a valid boolean" %value)
def _convertValues(self,value,outlist,type,exist,min,max):
"""Helper function for getlist() to convert values in list to boolean,
string, integer, or float"""
itemlist = []
if type is int:
for s in outlist:
try:
temp = map(int,s.split('-')) # parse 1-4 as 1,2,3,4
except ValueError:
self._error("ValueError: %s is not a valid range of integers" %s)
start = temp[0]
stop = temp[-1]
if start > stop:
self._error("ValueError: range minimum (%d) > maximum (%d)" %(start,stop))
itemlist = itemlist + range(start,stop+1)
elif type is float:
try:
itemlist = map(float,outlist)
except ValueError:
self._error("ValueError: %s is not a valid list of floats" %value)
elif type is str:
itemlist = outlist
if exist is True: # make sure files exist in the list
self._checkExist(outlist)
elif exist is False: # make sure files don't exist in the list
self._checkNotExist(outlist)
elif type is bool:
try:
itemlist = map(self._convertBool,outlist)
except ValueError:
self._error("ValueError: %s is not a valid list of booleans" %value)
else:
self._error("TypeError: type for getlist() must be str,int, or float")
if min is not None or max is not None:
for tmp in itemlist:
self._checkMinMax(value,tmp,min,max)
return itemlist
def _error(self,msg):
"""Print out an error message to screen and quit"""
sys.stderr.write("### %s\n" %msg)
sys.exit()
def _getlistbase(self,key,type=str,comment='#',min=None,max=None,
option=None,length=None,exist=None,ignorecase=False):
"""Return keyword value as a list. A value of None returns an empty list.
key = keyword given as a string
type = Can be either bool, float, int, or str (for boolean,
float, integer, or string)
comment = String character for comment lines to ignore in an @file
min = check for minimum value (for each element)
max = check for maximum value (for each element)
option = list/tuple of allowed values (for each element)
length = int/list/tuple of allowed number of elements in list
exist = Can check to make sure all all input files exist. Default is
to not check. Note, if you give an @file, then the @file
will always be checked for existance no matter what."""
self._checkKey(key)
value = self.args[key][0]
outlist = []
if value == 'None':
return outlist
for alist in value.split(';'):# split by semicolon for nested lists
blah = []
for junk in alist.split(','): # loop over comma-separated list
if junk[0] == '@': # read from atfile
temp = self._atFile(junk[1:],comment)
blah = blah + temp
else: # try file globbing
temp = glob.glob(junk)
if len(temp) == 0: # try to expand ~
temp = os.path.expanduser(junk)
if temp == junk: # no match so add exact input
blah.append(junk)
else:
blah = blah + temp
else:
blah = blah + temp
blah = self._convertValues(value,blah,type,exist,min,max)
if option is not None:
for value in blah:
self._checkOption(key,value,option,ignorecase)
outlist.append(blah)
if len(outlist) == 1: # only one nested list, so removing nesting
outlist = outlist[0]
if length is not None:
nval = len(outlist)
if isinstance(length,int):
if nval != length:
if nval == 1:
self._error("IndexError: key %s should have 1 element" %(key))
else:
self._error("IndexError: key %s should have %d elements" %(key,length))
elif isinstance(length,list) or isinstance(length,tuple):
if nval not in length:
self._error("IndexError: key %s should have %s elements" %(key,str(length)))
else:
self._error("IndexError: length parameter for key %s should be int/list/tuple" %key)
return outlist
if __name__ == "__main__":
spec = """
# Compute polarization vectors from a sharp_combine map
in = ??? # Input combine.fits
out = ??? # Output file of vectors
sigma = 3 # p/dp cutoff for vectors
skip = 4 # Only plot every ith pixel (since we oversample 4x)
offset = 0,0 # Offset in x,y for start of vectors
debias = False # Debias Polarizations (Ricean correction)"""
bob = ReadCmd(spec)
inFile = bob.getstr("in")
outFile = bob.getstr("out")
sigma = bob.getfloat("sigma")
skip = bob.getint("skip")
offset = bob.getlistint("offset",length=2)
debias = bob.getbool("debias")
print bob
print bob.getkeys(format=80),
|
|
from __future__ import unicode_literals
import httplib
import logging
from modularodm import Q
from modularodm.storage.base import KeyExistsException
from flask import request
from framework.auth import Auth
from framework.exceptions import HTTPError
from framework.auth.decorators import must_be_signed
from website.models import User
from website.project.decorators import (
must_not_be_registration, must_have_addon,
)
from website.util import rubeus
from website.project.model import has_anonymous_link
from website.files import models
from website.addons.osfstorage import utils
from website.addons.osfstorage import decorators
from website.addons.osfstorage import settings as osf_storage_settings
logger = logging.getLogger(__name__)
def osf_storage_root(node_settings, auth, **kwargs):
"""Build HGrid JSON for root node. Note: include node URLs for client-side
URL creation for uploaded files.
"""
node = node_settings.owner
root = rubeus.build_addon_root(
node_settings=node_settings,
name='',
permissions=auth,
user=auth.user,
nodeUrl=node.url,
nodeApiUrl=node.api_url,
)
return [root]
def make_error(code, message_short=None, message_long=None):
data = {}
if message_short:
data['message_short'] = message_short
if message_long:
data['message_long'] = message_long
return HTTPError(code, data=data)
@must_be_signed
@must_have_addon('osfstorage', 'node')
def osfstorage_update_metadata(node_addon, payload, **kwargs):
try:
version_id = payload['version']
metadata = payload['metadata']
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
version = models.FileVersion.load(version_id)
if version is None:
raise HTTPError(httplib.NOT_FOUND)
version.update_metadata(metadata)
return {'status': 'success'}
@must_be_signed
@decorators.autoload_filenode(must_be='file')
def osfstorage_get_revisions(file_node, node_addon, payload, **kwargs):
is_anon = has_anonymous_link(node_addon.owner, Auth(private_key=request.args.get('view_only')))
# Return revisions in descending order
return {
'revisions': [
utils.serialize_revision(node_addon.owner, file_node, version, index=len(file_node.versions) - idx - 1, anon=is_anon)
for idx, version in enumerate(reversed(file_node.versions))
]
}
@decorators.waterbutler_opt_hook
def osfstorage_copy_hook(source, destination, name=None, **kwargs):
return source.copy_under(destination, name=name).serialize(), httplib.CREATED
@decorators.waterbutler_opt_hook
def osfstorage_move_hook(source, destination, name=None, **kwargs):
return source.move_under(destination, name=name).serialize(), httplib.OK
@must_be_signed
@decorators.autoload_filenode(default_root=True)
def osfstorage_get_lineage(file_node, node_addon, **kwargs):
#TODO Profile
list(models.OsfStorageFolder.find(Q('node', 'eq', node_addon.owner)))
lineage = []
while file_node:
lineage.append(file_node.serialize())
file_node = file_node.parent
return {'data': lineage}
@must_be_signed
@decorators.autoload_filenode(default_root=True)
def osfstorage_get_metadata(file_node, **kwargs):
try:
# TODO This should change to version as its internal it can be changed anytime
version = int(request.args.get('revision'))
except (ValueError, TypeError): # If its not a number
version = -1
return file_node.serialize(version=version, include_full=True)
@must_be_signed
@decorators.autoload_filenode(must_be='folder')
def osfstorage_get_children(file_node, **kwargs):
return [
child.serialize()
for child in file_node.children
]
@must_be_signed
@must_not_be_registration
@decorators.autoload_filenode(must_be='folder')
def osfstorage_create_child(file_node, payload, node_addon, **kwargs):
parent = file_node # Just for clarity
name = payload.get('name')
user = User.load(payload.get('user'))
is_folder = payload.get('kind') == 'folder'
if not (name or user) or '/' in name:
raise HTTPError(httplib.BAD_REQUEST)
try:
if is_folder:
created, file_node = True, parent.append_folder(name)
else:
created, file_node = True, parent.append_file(name)
except KeyExistsException:
created, file_node = False, parent.find_child_by_name(name, kind=int(not is_folder))
if not created and is_folder:
raise HTTPError(httplib.CONFLICT, data={
'message': 'Cannot create folder "{name}" because a file or folder already exists at path "{path}"'.format(
name=file_node.name,
path=file_node.materialized_path,
)
})
if not is_folder:
try:
version = file_node.create_version(
user,
dict(payload['settings'], **dict(
payload['worker'], **{
'object': payload['metadata']['name'],
'service': payload['metadata']['provider'],
})
),
dict(payload['metadata'], **payload['hashes'])
)
version_id = version._id
archive_exists = version.archive is not None
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
else:
version_id = None
archive_exists = False
return {
'status': 'success',
'archive': not archive_exists, # Should waterbutler also archive this file
'data': file_node.serialize(),
'version': version_id,
}, httplib.CREATED if created else httplib.OK
@must_be_signed
@must_not_be_registration
@decorators.autoload_filenode()
def osfstorage_delete(file_node, payload, node_addon, **kwargs):
auth = Auth(User.load(payload['user']))
#TODO Auth check?
if not auth:
raise HTTPError(httplib.BAD_REQUEST)
if file_node == node_addon.get_root():
raise HTTPError(httplib.BAD_REQUEST)
file_node.delete()
return {'status': 'success'}
@must_be_signed
@decorators.autoload_filenode(must_be='file')
def osfstorage_download(file_node, payload, node_addon, **kwargs):
if not request.args.get('version'):
version_id = None
else:
try:
version_id = int(request.args['version'])
except ValueError:
raise make_error(httplib.BAD_REQUEST, 'Version must be an integer if not specified')
version = file_node.get_version(version_id, required=True)
if request.args.get('mode') not in ('render', ):
utils.update_analytics(node_addon.owner, file_node._id, int(version.identifier) - 1)
return {
'data': {
'name': file_node.name,
'path': version.location_hash,
},
'settings': {
osf_storage_settings.WATERBUTLER_RESOURCE: version.location[osf_storage_settings.WATERBUTLER_RESOURCE],
},
}
|
|
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib2
import numpy as np
import urllib2
import cStringIO
import sys
import tempfile
import h5py
from PIL import Image, ImageEnhance
from PIL import ImageOps
from contextlib import closing
import ndproj
import spatialdb
class Synaptogram:
"""Synaptogram virtual object. Construct and process."""
def __init__ (self, token, channels, centroid):
# arguments
self.token = token
self.channels = channels
self.centroid = centroid
# parameter defaults. set be accessors.
self.sog_width = 200
self.sog_frame = 20
self.width = 11
self.normalize = True
self.normalize2 = False
self.resolution = 0
self.refchannels = []
self.emchannels = []
self.enhance = None
# pattern for using contexts to close databases
# get the project
with closing ( ndproj.NDProjectsDB() ) as projdb:
self.proj = projdb.loadToken ( token )
def setReference ( self, refchans ):
"""Modifier to set reference channels. Default value is None."""
self.refchannels = refchans
def setEM ( self, emchans ):
"""Modifier to set EM channels. No reference drawn for EM channels. Default value is None."""
self.emchannels = emchans
def setEnhance ( self, enhance ):
"""Modifier to set reference channels. Default value is None."""
self.enhance = enhance
def setNormalize ( self ):
"""Modifier to set reference channels. Default value is None."""
self.normalize=True
def setNormalize2 ( self ):
"""Modifier to set reference channels. Default value is None."""
self.normalize2=True
def setWidth ( self, width ):
"""How many pixels in the synaptogram data"""
self.width=width
def setTileWidth ( self, sogwidth ):
"""How many pixels in the synaptogram panel"""
self.sog_width=sogwidth
def setFrameWidth ( self, sogframe ):
"""How many pixels in the frame between iamges"""
self.sog_frame=sogframe
def setResolution ( self, resolution ):
"""Choose a resolution, default is 0"""
self.resolution=resolution
def construct ( self ):
# get the spatial parameters of the synaptogram
hwidth = self.width/2
[x,y,z] = self.centroid
# update for the zoffset
# z = z - self.proj.datasetcfg.slicerange[0]
# and the database and then call the db function
with closing ( spatialdb.SpatialDB(self.proj) ) as db:
# if we have a globale normalization request, do a big cutout to get a send of values and then
# set a maximum value for each channel
if self.normalize2:
# is a form of normalization
self.normalize = True
gchmaxval = self.getChannelMax(db)
# convert to cutout coordinates
corner = [ x-hwidth, y-hwidth, z-hwidth ]
dim = [ 2*hwidth+1, 2*hwidth+1, 2*hwidth+1 ]
chancuboids = {}
# get the data region for each channel
for chan in self.channels:
ch = self.proj.getChannelObj ( chan )
try:
chancuboids[chan] = db.cutout ( ch, corner, dim, self.resolution )
except KeyError:
raise Exception ("Channel %s not found" % ( chan ))
# Now generate a synaptogram
# create a white image as the background
self.sog = Image.new("L", ((hwidth*2+1)*(self.sog_width+self.sog_frame)+self.sog_frame,len(self.channels)*(self.sog_width+self.sog_frame)+self.sog_frame),(255)).convert('RGB')
# array of reference images and intensities
# need these as zero even if we don't use a referene image
refimgdata = np.zeros((2*hwidth+1,2*hwidth+1,2*hwidth+1),dtype=np.uint32)
refintensity = np.zeros((2*hwidth+1,2*hwidth+1,2*hwidth+1),dtype=np.uint8)
# build the reference channel data
if self.refchannels != []:
# list of reference channels
for refchanid in range(len(self.refchannels)):
refchan = self.refchannels[refchanid]
try:
refdata = chancuboids[refchan].data
except KeyError:
raise Exception ("Reference channel %s not found" % ( refchan ))
if self.normalize2:
chmaxval = gchmaxval[refchan]
else:
chmaxval = np.max(refdata)
for sl in range(2*hwidth+1):
if self.normalize:
normdata = np.uint8(np.uint32(refdata[sl,:,:])*256/(chmaxval+1))
else:
normdata = np.uint8(refdata[sl,:,:]/256)
refintensity[sl,:,:] = np.where ( normdata>refintensity[sl], normdata, refintensity[sl,:,:])
tmpimgdata = np.where ( normdata == refintensity[sl,:,:], normdata, 0 )
# channel 0 is red
if refchanid == 0:
refimgdata[sl,:,:] = np.where ( tmpimgdata, np.uint32(tmpimgdata), refimgdata[sl,:,:] )
elif refchanid == 1:
refimgdata[sl,:,:] = np.where ( tmpimgdata, np.uint32(tmpimgdata)<<8, refimgdata[sl,:,:] )
elif refchanid == 2:
refimgdata[sl,:,:] = np.where ( tmpimgdata, np.uint32(tmpimgdata)<<16, refimgdata[sl,:,:] )
elif refchanid == 3:
refimgdata[sl,:,:] = np.where ( tmpimgdata, (np.uint32(tmpimgdata)<<8)+np.uint32(tmpimgdata), refimgdata[sl,:,:] )
elif refchanid == 4:
refimgdata[sl,:,:] = np.where ( tmpimgdata, (np.uint32(tmpimgdata)<<16)+np.uint32(tmpimgdata), refimgdata[sl,:,:] )
elif refchanid == 5:
refimgdata[sl,:,:] = np.where ( tmpimgdata, (np.uint32(tmpimgdata)<<16)+(np.uint32(tmpimgdata)<<8), refimgdata[sl,:,:] )
# Add the image data
# Add the image data
# where the channel gets drawn on the page
chanidx = 0
# add each channel to the synaptogram
for chan in self.channels:
chandata = chancuboids[chan].data
# select a normalization value for the chanel
if self.normalize2:
chmaxval = gchmaxval[chan]
else:
chmaxval = np.max(chandata)
# process each slice
for sl in range(2*hwidth+1):
if self.normalize:
normdata = np.uint8(np.uint32(chandata[sl,:,:])*256/(chmaxval+1))
else:
normdata = np.uint8(chandata[sl,:,:]/256)
# OK, here we have normalized 8 bit data. Add in the reference channel
# if it's an EM channel, use no reference
if chan in self.emchannels:
chansldata = normdata
refsldata = np.zeros ( normdata.shape )
else:
# if the channel is brighter take the channel pixels
chansldata = np.where ( normdata>=refintensity[sl,:,:], normdata, 0 )
# otherwise take the reference pixels
refsldata = np.where ( refintensity[sl,:,:]>normdata, refimgdata[sl,:,:], 0 )
# generate the channel panel
tmpimg = Image.frombuffer ( 'L', (2*hwidth+1,2*hwidth+1), chansldata.flatten(), 'raw', 'L', 0, 1)
refimg = Image.frombuffer ( 'RGBA', (2*hwidth+1,2*hwidth+1), np.uint32(refsldata), 'raw', 'RGBA', 0, 1)
refimg.paste ( tmpimg, (0,0), tmpimg )
bigtmpimg = refimg.resize ( (200,200), Image.ANTIALIAS )
# if enhance was chosen
if self.enhance != None and chan not in self.emchannels:
enhancer = ImageEnhance.Brightness(bigtmpimg)
bigtmpimg = enhancer.enhance(self.enhance)
self.sog.paste ( bigtmpimg, (sl*(self.sog_width+self.sog_frame)+self.sog_frame, chanidx*(self.sog_width+self.sog_frame)+self.sog_frame))
# go on to the next channel
chanidx += 1
# at this point self.sog contains a synaptogram image
return self.sog
def getImage ( self ):
"""Accessor function"""
return self.sog
def getChannelMax ( self, db ):
"""Helper function to determine the maximum in biggish box around each centroid"""
[x,y,z] = self.centroid
xmin = max ( 0, x -256 )
ymin = max ( 0, y- 256 )
zmin = max ( 0, z-8 )
xmax = min ( x +256, self.proj.datasetcfg.imagesz[self.resolution][0])
ymax = min ( y+256, self.proj.datasetcfg.imagesz[self.resolution][1])
zmax = min ( z+8, self.proj.datasetcfg.slicerange[1] )
# convert to cutout coordinates
corner = [ xmin, ymin, zmin ]
dim = [ xmax-xmin, ymax-ymin, zmax-zmin ]
# get the data region for each channel
# dictionary that stores the maximum value per channel
gchmaxval={}
for chan in self.channels:
cuboid = db.cutout ( corner, dim, self.resolution, chan )
gchmaxval[chan] = np.max(cuboid.data)
return gchmaxval
|
|
from __future__ import unicode_literals
from djblets.webapi.testing.decorators import webapi_test_template
from reviewboard.webapi.tests.mixins_extra_data import (ExtraDataItemMixin,
ExtraDataListMixin)
class BaseCommentListMixin(object):
@webapi_test_template
def test_post_with_text_type_markdown(self):
"""Testing the POST <URL> API with text_type=markdown"""
self._test_post_with_text_type('markdown')
@webapi_test_template
def test_post_with_text_type_plain(self):
"""Testing the POST <URL> API with text_type=plain"""
self._test_post_with_text_type('plain')
def _test_post_with_text_type(self, text_type):
comment_text = '`This` is a **test**'
url, mimetype, data, objs = \
self.setup_basic_post_test(self.user, False, None, True)
data['text'] = comment_text
data['text_type'] = text_type
rsp = self.api_post(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text'], comment_text)
self.assertEqual(comment_rsp['text_type'], text_type)
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
class BaseCommentItemMixin(object):
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
if comment.rich_text:
self.assertEqual(item_rsp['rich_text'], 'markdown')
else:
self.assertEqual(item_rsp['rich_text'], 'plain')
@webapi_test_template
def test_get_with_markdown_and_force_text_type_markdown(self):
"""Testing the GET <URL> API with text_type=markdown and
?force-text-type=markdown
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='markdown',
expected_text=r'\# `This` is a **test**')
@webapi_test_template
def test_get_with_markdown_and_force_text_type_plain(self):
"""Testing the GET <URL> API with text_type=markdown and
?force-text-type=plain
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='plain',
expected_text='# `This` is a **test**')
@webapi_test_template
def test_get_with_markdown_and_force_text_type_html(self):
"""Testing the GET <URL> API with text_type=markdown and
?force-text-type=html
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='html',
expected_text='<p># <code>This</code> is a '
'<strong>test</strong></p>')
@webapi_test_template
def test_get_with_plain_and_force_text_type_markdown(self):
"""Testing the GET <URL> API with text_type=plain and
?force-text-type=markdown
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='markdown',
expected_text=r'\#<\`This\` is a \*\*test\*\*>')
@webapi_test_template
def test_get_with_plain_and_force_text_type_plain(self):
"""Testing the GET <URL> API with text_type=plain and
?force-text-type=plain
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='plain',
expected_text='#<`This` is a **test**>')
@webapi_test_template
def test_get_with_plain_and_force_text_type_html(self):
"""Testing the GET <URL> API with text_type=plain and
?force-text-type=html
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='html',
expected_text='#<`This` is a **test**>')
@webapi_test_template
def test_put_with_text_type_markdown_and_text(self):
"""Testing the PUT <URL> API
with text_type=markdown and text specified
"""
self._test_put_with_text_type_and_text('markdown')
@webapi_test_template
def test_put_with_text_type_plain_and_text(self):
"""Testing the PUT <URL> API with text_type=plain and text specified"""
self._test_put_with_text_type_and_text('plain')
@webapi_test_template
def test_put_with_text_type_markdown_and_not_text(self):
"""Testing the PUT <URL> API
with text_type=markdown and text not specified escapes text
"""
self._test_put_with_text_type_and_not_text(
'markdown',
'`Test` **diff** comment',
r'\`Test\` \*\*diff\*\* comment')
@webapi_test_template
def test_put_with_text_type_plain_and_not_text(self):
"""Testing the PUT <URL> API
with text_type=plain and text not specified
"""
self._test_put_with_text_type_and_not_text(
'plain',
r'\`Test\` \*\*diff\*\* comment',
'`Test` **diff** comment')
@webapi_test_template
def test_put_without_text_type_and_escaping_provided_fields(self):
"""Testing the PUT <URL> API
without changing text_type and with escaping provided fields
"""
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
reply_comment.rich_text = True
reply_comment.save()
if 'text_type' in data:
del data['text_type']
data.update({
'text': '`This` is **text**',
})
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text_type'], 'markdown')
self.assertEqual(comment_rsp['text'], '\\`This\\` is \\*\\*text\\*\\*')
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
@webapi_test_template
def test_put_with_multiple_include_text_types(self):
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
data.update({
'include_text_types': 'raw,plain,markdown,html',
'text': 'Foo',
})
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
def _test_get_with_force_text_type(self, text, rich_text,
force_text_type, expected_text):
url, mimetype, comment = \
self.setup_basic_get_test(self.user, False, None)
comment.text = text
comment.rich_text = rich_text
comment.save()
rsp = self.api_get(url + '?force-text-type=%s' % force_text_type,
expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text_type'], force_text_type)
self.assertEqual(comment_rsp['text'], expected_text)
self.assertNotIn('raw_text_fields', comment_rsp)
rsp = self.api_get('%s?force-text-type=%s&include-text-types=raw'
% (url, force_text_type),
expected_mimetype=mimetype)
comment_rsp = rsp[self.resource.item_result_key]
self.assertIn('raw_text_fields', comment_rsp)
self.assertEqual(comment_rsp['raw_text_fields']['text'], text)
def _test_put_with_text_type_and_text(self, text_type):
comment_text = '`Test` **diff** comment'
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
data['text_type'] = text_type
data['text'] = comment_text
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text'], comment_text)
self.assertEqual(comment_rsp['text_type'], text_type)
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
def _test_put_with_text_type_and_not_text(self, text_type, text,
expected_text):
self.assertIn(text_type, ('markdown', 'plain'))
rich_text = (text_type == 'markdown')
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
reply_comment.text = text
reply_comment.rich_text = not rich_text
reply_comment.save()
data['text_type'] = text_type
if 'text' in data:
del data['text']
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text'], expected_text)
self.assertEqual(comment_rsp['text_type'], text_type)
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
class CommentListMixin(ExtraDataListMixin, BaseCommentListMixin):
pass
class CommentItemMixin(ExtraDataItemMixin, BaseCommentItemMixin):
pass
class CommentReplyListMixin(BaseCommentListMixin):
pass
class CommentReplyItemMixin(BaseCommentItemMixin):
pass
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listtransactions API."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.enable_mocktime()
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1)
assert_equal(utxo_to_use["safe"], True)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
assert_equal(utxo_to_use["safe"], False)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
|
|
# Copyright 2011 Tsutomu Uchino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unohelper
from com.sun.star.awt import XMouseListener, KeyEvent, Point, Rectangle
from com.sun.star.awt.MouseButton import RIGHT as MB_RIGHT, LEFT as MB_LEFT
from mytools_Mri.ui import transferable
class KeyModifier(object):
from com.sun.star.awt.KeyModifier import MOD1, MOD2, SHIFT
titles = (
('Name', 'Value Type', 'Value', 'Info.', 'Attr.'), #, 'Handle'),
('Name', 'Arguments', 'Return Type', 'Declaring Class', 'Exceptions'),
('Interfaces',),
('Services',)
)
def _create_grid2(ctx, smgr, page, name, help_url, title_id, grid_type):
#from com.sun.star.awt import FontDescriptor
model = page.getModel()
grid_model = model.createInstance(
"com.sun.star.awt.grid.UnoControlGridModel")
grid_model.setPropertyValues(
("BackgroundColor", "Border", "HScroll", "SelectionModel",
"ShowColumnHeader", "ShowRowHeader", "VScroll"),
(page.StyleSettings.DialogColor, 0, True, 1, True, False, True))
#desc = FontDescriptor()
#desc.Name = "DejaVu Sans Mono"
#grid_model.FontDescriptor = desc
if grid_type == 1:
grid_model.EvenRowBackgroundColor = 0xfafafa
else:
grid_model.RowBackgroundColors = (0xffffff, 0xfafafa)
if not grid_model.GridDataModel:
data = smgr.createInstanceWithContext(
"com.sun.star.awt.grid.DefaultGridDataModel", ctx)
grid_model.GridDataModel = data
if grid_type != 2:
grid_model.GridDataModel.addRow('', titles[title_id])
columns = grid_model.ColumnModel
if not columns:
columns = smgr.createInstanceWithContext(
"com.sun.star.awt.grid.DefaultGridColumnModel", ctx)
for title in titles[title_id]:
column = smgr.createInstanceWithContext(
"com.sun.star.awt.grid.GridColumn", ctx)
column.Title = title
columns.addColumn(column)
grid_model.ColumnModel = columns
model.insertByName(name, grid_model)
return page.getControl(name)
# ToDo reconstruct
class SimpleGridInfoListener(unohelper.Base, XMouseListener):
def __init__(self, cast):
self.cast = cast
self.popup = None
import mytools_Mri.tools
self.use_point = mytools_Mri.tools.check_method_parameter(self.cast.ctx,
"com.sun.star.awt.XPopupMenu", "execute", 1, "com.sun.star.awt.Point")
def disposing(self,ev): pass
def mouseReleased(self,ev): pass
def mouseEntered(self,ev): pass
def mouseExited(self,ev): pass
def mousePressed(self, ev):
if ev.Buttons == MB_RIGHT and ev.ClickCount == 1:
if not self.popup:
self.popup = self._create_popup()
if not self.popup: return
grid_model = ev.Source.getModel()
if grid_model.ShowColumnHeader:
if hasattr(grid_model, "ColumnHeaderHeight"):
header_height = grid_model.ColumnHeaderHeight
else:
header_height = grid_model.ColumnModel.ColumnHeaderHeight
if header_height is None: header_height = 20
if ev.Y <= header_height:
return
try:
index = self.cast.pages.get_active()
if index == 0:
# properties
self._update_popup_states(((2, True), (4, True), (8, False), (512, True)))
properties_title = ('Name', 'Value Type', 'Value', 'Info.', 'Attr.')
copy_cell_popup = self.popup.getPopupMenu(512)
for i, label in zip(range(513, 518), properties_title):
copy_cell_popup.setItemText(i, label)
elif index == 1:
# methods
self._update_popup_states(((2, False), (4, False), (8, True), (512, True)))
methods_title = ('Name', 'Arguments', 'Return Type', 'Declaring Class', 'Exceptions')
copy_cell_popup = self.popup.getPopupMenu(512)
for i, label in zip(range(513, 518), methods_title):
copy_cell_popup.setItemText(i, label)
else:
self._update_popup_states(((2, False), (4, False), (8, False), (512, False)))
pos = ev.Source.getPosSize()
if self.use_point:
_pos = Point(pos.X + ev.X, pos.Y + ev.Y)
else:
_pos = Rectangle(pos.X + ev.X, pos.Y + ev.Y, 0, 0)
n = self.popup.execute(ev.Source.getPeer(), _pos, 0)
if n > 0:
self.do_command(n)
except Exception as e:
print(e)
def do_command(self, n):
if n == 0x2 or n == 0x8:
# get value and call method
self.cast.info_action()
elif n == 0x4:
mode = self.cast.property_mode
self.cast.property_mode = False # to set the value
try:
self.cast.info_action()
except:
pass
self.cast.property_mode = mode
elif n == 32:
# idl
self.cast.open_idl_reference()
elif n == 256 or 513 <= n <= 517 or n == 520:
# copy
if n == 256:
word = self.cast.pages.get_first_word()
elif 513 <= n <= 517:
word = self.cast.pages.get_cell(column_index=(n - 513))
elif n == 520:
word = ' '.join(self.cast.pages.get_row())
else:
return
try:
transferable.set_text_to_clipboard(self.cast.ctx, word)
except Exception as e:
print(e)
def _update_popup_states(self, states):
if self.popup:
for state in states:
self.popup.enableItem(state[0], state[1])
def _create_popup(self):
""" [ [id, pos, type, 'label', "command", acc_key], [] ] """
items = [
[2, 0, 0, 'Get Value', ':GetValue', None],
[4, 1, 0, 'Set Value', ':SetValue', None],
[8, 2, 0, 'Call Method', ':CallMethod', None],
[-1, 3],
[32, 4, 0, 'IDL Ref.', ':IDLRef', None],
[None, 5],
[256, 6, 0, 'Copy', ':Copy', None],
[512, 7, 0, 'Copy Cell', ':CopyCell', None]
]
copy_cell_items = [
[513, 0, 0, '', ':CopyCell0', None],
[514, 1, 0, '', ':CopyCell1', None],
[515, 2, 0, '', ':CopyCell2', None],
[516, 3, 0, '', ':CopyCell3', None],
[517, 4, 0, '', ':CopyCell4', None],
[None, 5],
[520, 6, 0, 'All', ':CopyCellAll', None]
]
import mytools_Mri.ui.tools
popup = mytools_Mri.ui.tools.create_popupmenu(self.cast.ctx, items)
copy_cell_popup = mytools_Mri.ui.tools.create_popupmenu(self.cast.ctx, copy_cell_items)
popup.setPopupMenu(512, copy_cell_popup)
popup.hideDisabledEntries(True)
return popup
class GridInfoListener(SimpleGridInfoListener):
def __init__(self, cast):
SimpleGridInfoListener.__init__(self, cast)
def mousePressed(self, ev):
try:
if ev.Buttons == MB_LEFT and ev.ClickCount == 2:
category = self.cast.pages.get_active()
mod = ev.Modifiers
if mod == KeyModifier.SHIFT:
return
if category == 0:
if mod == KeyModifier.MOD2 or \
mod == (KeyModifier.SHIFT | KeyModifier.MOD1):
mode = self.cast.property_mode
self.cast.property_mode = not mode
self.cast.info_action(category)
self.cast.property_mode = mode
else:
if mod == KeyModifier.MOD1:
self.cast.main.open_new = True
self.cast.info_action(category)
elif category == 1:
if mod == KeyModifier.SHIFT:
self.cast.main.open_new = True
self.cast.info_action(category)
elif ev.Buttons == MB_RIGHT and ev.ClickCount == 1:
SimpleGridInfoListener.mousePressed(self, ev)
except Exception as e:
print(e)
import traceback
traceback.print_exc()
import re
from mytools_Mri.ui.pages import PagesBase, PageStatus, Ui
class GridPagesBase(PagesBase):
""" Basis of informations shown in grid controls. """
def __init__(self, active, ctrls, changer, scrolls, tab):
PagesBase.__init__(self, active, ctrls, changer, tab)
self.scrolls = scrolls
def __getitem__(self, index):
return ''
def _re_set_size(self, index):
grid = self.ctrls[index]
size = grid.getPosSize()
grid.setPosSize(0, 0, 0, size.Height +1, 8)
grid.setPosSize(0, 0, 0, size.Height, 8)
def set_properties(self, index, names, values):
pass
def set_font(self, name, height):
"""does not effected."""
pass
def _get_grid(self, index=None):
if index == None: index = self.get_active()
return self.ctrls[index]
def get_current_line(self, index=None, r=None):
return self.get_first_word()
def get_first_word(self, index=None):
return self.get_cell(index)
def select_row(self, index, row_index):
ctrl = self._get_grid(index)
try:
ctrl.deselectAllRows()
ctrl.selectRow(row_index)
self.show_row(index, row_index)
except Exception as e:
print(e)
def show_row(self, index, row_index):
""" make to show the row. """
if index is None: index = self.get_active()
scroll = self.scrolls[index]
if not scroll: return
if row_index < scroll.getValue():
scroll.setValue(row_index)
elif row_index >= (scroll.getBlockIncrement() + scroll.getValue()):
scroll.setValue(
row_index - scroll.getBlockIncrement() + 1)
import mytools_Mri.config
class GridPages(GridPagesBase):
""" Keeps grid controls. """
TAG_EXP = "^\(([^\)]*?)\)"
def select_current_sentence(self, index=None):
pass
def select_current_line(self, index=None):
pass
def get_selected(self, index=None):
ret = ""
ctrl = self._get_grid(index)
row_index = self._get_selected(ctrl)
if not row_index is None:
data_model = ctrl.getModel().GridDataModel
ret = self._get_cell_data(data_model, 0, row_index)
return ret
def get_cell(self, index=None, row_index=None, column_index=0):
ret = ''
ctrl = self._get_grid(index)
row_index = self._get_selected(ctrl)
if not row_index is None:
data_model = ctrl.getModel().GridDataModel
ret = self._get_cell_data(data_model, column_index, row_index)
return ret
def get_row(self, index=None, row_index=None):
ctrl = self._get_grid(index)
row_index = self._get_selected(ctrl)
if not row_index is None:
data_model = ctrl.getModel().GridDataModel
return self._get_row(data_model, row_index)
return []
def get_tag(self, index=None):
ret = ''
ctrl = self._get_grid(index)
row_index = self._get_selected(ctrl)
if not row_index is None:
data_model = ctrl.getModel().GridDataModel
regexp = re.compile(self.TAG_EXP)
ret = self._search_in_first_column(data_model, row_index, regexp)
return ret
def search(self, search_text, index=None):
start_row = 0
ctrl = self._get_grid(index)
row_index = self._get_selected(ctrl)
if not row_index is None:
start_row = row_index
data_model = ctrl.getModel().GridDataModel
n = self._search(data_model, start_row, search_text)
if not n is None:
self.select_row(index, n)
def __init__(self, active, ctrls, changer, scrolls, tab):
GridPagesBase.__init__(self, active, ctrls, changer, scrolls, tab)
if mytools_Mri.config.Config.GRID == 1:
def __setitem__(self, index, rows):
try:
data_model = self.ctrls[index].getModel().GridDataModel
data_model.removeAll()
if not isinstance(rows, (list, tuple)):
rows = ((rows,),)
for row in rows:
data_model.addRow('', tuple(row))
except Exception as e:
print(e)
def _get_count(self, data_model):
return data_model.getRowCount()
def _remove_all(self, data_model):
data_model.removeAll()
def _get_cell_data(self, data_model, column, row):
return data_model.Data[row][column]
def _get_row(self, data_model, n):
return data_model.Data[n]
def _add_rows(self, data_model, headings, rows):
for heading, row in zip(headings, rows):
data_model.addRow(heading, tuple(row))
def _get_selected(self, grid):
selections = grid.getSelection()
if selections:
return selections[0]
return None
def _search_in_first_column(self, data_model, row_index, regexp):
data = data_model.Data
for i in range(row_index)[::-1]:
d = data[i]
m = regexp.search(d[0])
if m:
return m.group(1)
return None
def _search(self, data_model, start_row, search_text):
exp = None
try:
exp = re.compile(search_text, re.I)
except:
pass
result = None
n = None
for i, row in enumerate(data_model.Data[start_row +1:]):
row_data = '|'.join(row)
if exp:
result = exp.search(row_data, 0)
if result:
n = i + start_row + 1
break
else:
result = row_data.find(search_text)
if result >= 0:
n = i + start_row + 1
break
return n
else:
def __setitem__(self, index, rows):
try:
data_model = self.ctrls[index].getModel().GridDataModel
data_model.removeAllRows()
if not isinstance(rows, (list, tuple)):
rows = ((rows,),)
trows = tuple([tuple(row) for row in rows])
headings = tuple(["" for i in range(len(rows))])
data_model.addRows(headings, trows)
except:
pass
if mytools_Mri.config.Config.GRID_UPDATE:
self.ctrls[index].getContext().getPeer().invalidate(8)
self._re_set_size(index)
def _get_count(self, data_model):
return data_model.RowCount
def _remove_all(self, data_model):
data_model.removeAllRows()
def _get_cell_data(self, data_model, column, row):
return data_model.getCellData(column, row)
def _add_rows(self, data_model, headings, rows):
data_model.addRows(headings, trows)
def _search_in_first_column(self, data_model, row_index, regexp):
if row_index:
for i in range(row_index)[::-1]:
m = regexp.search(data_model.getCellData(0, i))
if m:
return m.group(1)
elif row_index == 0:
m = regexp.search(data_model.getCellData(0, 0))
if m:
return m.group(1)
return None
if mytools_Mri.config.Config.GRID == 2:
def _get_row(self, data_model, n):
return data_model.getRowData(n)
def _get_selected(self, grid):
if grid.hasSelectedRows():
return grid.getSelectedRows()[0]
return None
def _search(self, data_model, start_row, search_text):
exp = None
try:
exp = re.compile(search_text, re.I)
except:
pass
result = None
n = None
for i in range(start_row +1, data_model.RowCount):
row_data = "|".join(data_model.getRowData(i))
if exp:
result = exp.search(row_data, 0)
if result:
n = i
break
else:
result = row_data.find(search_text)
if result >= 0:
n = i
break
return n
else:
def _get_row(self, data_model, n):
return [data_model.getCellData(i, n)
for i in range(data_model.ColumnCount)]
def _get_selected(self, grid):
selections = grid.getSelection()
if selections:
return selections[0]
return None
def _search(self, data_model, start_row, search_text):
exp = None
try:
exp = re.compile(search_text, re.I)
except:
pass
column_count = data_model.ColumnCount
result = None
n = None
for i in range(start_row +1, data_model.RowCount):
row_data = "|".join([data_model.getCellData(j, i)
for j in range(column_count)])
if exp:
result = exp.search(row_data, 0)
if result:
n = i
break
else:
result = row_data.find(search_text)
if result >= 0:
n = i
break
return n
class GridUi(Ui, GridPages, PageStatus):
"""UI with grid controls."""
def __init__(self, active, ctrls, changer,
code, status, scrolls, tab, *args):
Ui.__init__(self, code, status)
GridPages.__init__(self, active, ctrls, changer, scrolls, tab)
PageStatus.__init__(self, ctrls)
def reset(self):
for ctrl in self.ctrls:
data_model = ctrl.getModel().GridDataModel
if self._get_count(data_model) > 0:
self._remove_all(data_model)
self.update_status = [False for i in range(len(self.ctrls))]
|
|
#!/usr/bin/env python
# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import io
import json
import mock
from six.moves.urllib.parse import parse_qs
from swift.common import swob
from swift.common.middleware import symlink, copy, versioned_writes, \
listing_formats
from swift.common.swob import Request
from swift.common.request_helpers import get_reserved_name
from swift.common.utils import MD5_OF_EMPTY_STRING
from swift.common.registry import get_swift_info
from test.unit.common.middleware.helpers import FakeSwift
from test.unit.common.middleware.test_versioned_writes import FakeCache
class TestSymlinkMiddlewareBase(unittest.TestCase):
def setUp(self):
self.app = FakeSwift()
self.sym = symlink.filter_factory({
'symloop_max': '2',
})(self.app)
self.sym.logger = self.app.logger
def call_app(self, req, app=None, expect_exception=False):
if app is None:
app = self.app
self.authorized = []
def authorize(req):
self.authorized.append(req)
if 'swift.authorize' not in req.environ:
req.environ['swift.authorize'] = authorize
status = [None]
headers = [None]
def start_response(s, h, ei=None):
status[0] = s
headers[0] = h
body_iter = app(req.environ, start_response)
body = b''
caught_exc = None
try:
for chunk in body_iter:
body += chunk
except Exception as exc:
if expect_exception:
caught_exc = exc
else:
raise
if expect_exception:
return status[0], headers[0], body, caught_exc
else:
return status[0], headers[0], body
def call_sym(self, req, **kwargs):
return self.call_app(req, app=self.sym, **kwargs)
class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
def test_symlink_info(self):
swift_info = get_swift_info()
self.assertEqual(swift_info['symlink'], {
'symloop_max': 2,
'static_links': True,
})
def test_symlink_simple_put(self):
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={'X-Symlink-Target': 'c1/o'},
body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[0]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertNotIn('X-Object-Sysmeta-Symlink-Target-Account', hdrs)
val = hdrs.get('X-Object-Sysmeta-Container-Update-Override-Etag')
self.assertEqual(val, '%s; symlink_target=c1/o' % MD5_OF_EMPTY_STRING)
self.assertEqual('application/symlink', hdrs.get('Content-Type'))
def test_symlink_simple_put_with_content_type(self):
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={'X-Symlink-Target': 'c1/o',
'Content-Type': 'application/linkyfoo'},
body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[0]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertNotIn('X-Object-Sysmeta-Symlink-Target-Account', hdrs)
val = hdrs.get('X-Object-Sysmeta-Container-Update-Override-Etag')
self.assertEqual(val, '%s; symlink_target=c1/o' % MD5_OF_EMPTY_STRING)
self.assertEqual('application/linkyfoo', hdrs.get('Content-Type'))
def test_symlink_simple_put_with_etag(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'Etag': 'tgt-etag', 'Content-Length': 42,
'Content-Type': 'application/foo'})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'tgt-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[1]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertNotIn('X-Object-Sysmeta-Symlink-Target-Account', hdrs)
val = hdrs.get('X-Object-Sysmeta-Container-Update-Override-Etag')
self.assertEqual(val, '%s; symlink_target=c1/o; '
'symlink_target_etag=tgt-etag; '
'symlink_target_bytes=42' % MD5_OF_EMPTY_STRING)
self.assertEqual([
('HEAD', '/v1/a/c1/o'),
('PUT', '/v1/a/c/symlink'),
], self.app.calls)
self.assertEqual('application/foo',
self.app._calls[-1].headers['Content-Type'])
def test_symlink_simple_put_with_quoted_etag(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'Etag': 'tgt-etag', 'Content-Length': 42,
'Content-Type': 'application/foo'})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': '"tgt-etag"',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[1]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertNotIn('X-Object-Sysmeta-Symlink-Target-Account', hdrs)
val = hdrs.get('X-Object-Sysmeta-Container-Update-Override-Etag')
self.assertEqual(val, '%s; symlink_target=c1/o; '
'symlink_target_etag=tgt-etag; '
'symlink_target_bytes=42' % MD5_OF_EMPTY_STRING)
self.assertEqual([
('HEAD', '/v1/a/c1/o'),
('PUT', '/v1/a/c/symlink'),
], self.app.calls)
self.assertEqual('application/foo',
self.app._calls[-1].headers['Content-Type'])
def test_symlink_simple_put_with_etag_target_missing_content_type(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'Etag': 'tgt-etag', 'Content-Length': 42})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'tgt-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[1]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertNotIn('X-Object-Sysmeta-Symlink-Target-Account', hdrs)
val = hdrs.get('X-Object-Sysmeta-Container-Update-Override-Etag')
self.assertEqual(val, '%s; symlink_target=c1/o; '
'symlink_target_etag=tgt-etag; '
'symlink_target_bytes=42' % MD5_OF_EMPTY_STRING)
self.assertEqual([
('HEAD', '/v1/a/c1/o'),
('PUT', '/v1/a/c/symlink'),
], self.app.calls)
# N.B. the ObjectController would call _update_content_type on PUT
# regardless, but you actually can't get a HEAD response without swob
# setting a Content-Type
self.assertEqual('text/html; charset=UTF-8',
self.app._calls[-1].headers['Content-Type'])
def test_symlink_simple_put_with_etag_explicit_content_type(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'Etag': 'tgt-etag', 'Content-Length': 42,
'Content-Type': 'application/foo'})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'tgt-etag',
'Content-Type': 'application/bar',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[1]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertNotIn('X-Object-Sysmeta-Symlink-Target-Account', hdrs)
val = hdrs.get('X-Object-Sysmeta-Container-Update-Override-Etag')
self.assertEqual(val, '%s; symlink_target=c1/o; '
'symlink_target_etag=tgt-etag; '
'symlink_target_bytes=42' % MD5_OF_EMPTY_STRING)
self.assertEqual([
('HEAD', '/v1/a/c1/o'),
('PUT', '/v1/a/c/symlink'),
], self.app.calls)
self.assertEqual('application/bar',
self.app._calls[-1].headers['Content-Type'])
def test_symlink_simple_put_with_unmatched_etag(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'Etag': 'tgt-etag', 'Content-Length': 42})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'not-tgt-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '409 Conflict')
self.assertIn(('Content-Location', '/v1/a/c1/o'), headers)
self.assertEqual(body, b"Object Etag 'tgt-etag' does not match "
b"X-Symlink-Target-Etag header 'not-tgt-etag'")
def test_symlink_simple_put_to_non_existing_object(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPNotFound, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'not-tgt-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '409 Conflict')
self.assertIn(('Content-Location', '/v1/a/c1/o'), headers)
self.assertIn(b'does not exist', body)
def test_symlink_simple_put_error(self):
self.app.register('HEAD', '/v1/a/c1/o',
swob.HTTPInternalServerError, {}, 'bad news')
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'not-tgt-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '500 Internal Error')
# this is a PUT response; so if we have a content-length...
self.assertGreater(int(dict(headers)['Content-Length']), 0)
# ... we better have a body!
self.assertIn(b'Internal Error', body)
def test_symlink_simple_put_to_non_existing_object_override(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPNotFound, {})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'some-tgt-etag',
# this header isn't normally sent with PUT
'X-Symlink-Target-Bytes': '13',
}, body='')
# this can be set in container_sync
req.environ['swift.symlink_override'] = True
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
def test_symlink_put_with_prevalidated_etag(self):
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT', headers={
'X-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Etag': 'tgt-etag',
'X-Object-Sysmeta-Symlink-Target-Bytes': '13',
'Content-Type': 'application/foo',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
self.assertEqual([
# N.B. no HEAD!
('PUT', '/v1/a/c/symlink'),
], self.app.calls)
self.assertEqual('application/foo',
self.app._calls[-1].headers['Content-Type'])
method, path, hdrs = self.app.calls_with_headers[0]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertNotIn('X-Object-Sysmeta-Symlink-Target-Account', hdrs)
val = hdrs.get('X-Object-Sysmeta-Container-Update-Override-Etag')
self.assertEqual(val, '%s; symlink_target=c1/o; '
'symlink_target_etag=tgt-etag; '
'symlink_target_bytes=13' % MD5_OF_EMPTY_STRING)
def test_symlink_put_with_prevalidated_etag_sysmeta_incomplete(self):
req = Request.blank('/v1/a/c/symlink', method='PUT', headers={
'X-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Etag': 'tgt-etag',
}, body='')
with self.assertRaises(KeyError) as cm:
self.call_sym(req)
self.assertEqual(cm.exception.args[0], swob.header_to_environ_key(
'X-Object-Sysmeta-Symlink-Target-Bytes'))
def test_symlink_chunked_put(self):
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={'X-Symlink-Target': 'c1/o'},
environ={'wsgi.input': io.BytesIO(b'')})
self.assertIsNone(req.content_length) # sanity
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[0]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertNotIn('X-Object-Sysmeta-Symlink-Target-Account', hdrs)
val = hdrs.get('X-Object-Sysmeta-Container-Update-Override-Etag')
self.assertEqual(val, '%s; symlink_target=c1/o' % MD5_OF_EMPTY_STRING)
def test_symlink_chunked_put_error(self):
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={'X-Symlink-Target': 'c1/o'},
environ={'wsgi.input':
io.BytesIO(b'this has a body')})
self.assertIsNone(req.content_length) # sanity
status, headers, body = self.call_sym(req)
self.assertEqual(status, '400 Bad Request')
def test_symlink_put_different_account(self):
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Account': 'a1'},
body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[0]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertEqual(hdrs.get('X-Object-Sysmeta-Symlink-Target-Account'),
'a1')
def test_symlink_put_leading_slash(self):
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={'X-Symlink-Target': '/c1/o'},
body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '412 Precondition Failed')
self.assertEqual(body, b"X-Symlink-Target header must be of "
b"the form <container name>/<object name>")
def test_symlink_put_non_zero_length(self):
req = Request.blank('/v1/a/c/symlink', method='PUT', body='req_body',
headers={'X-Symlink-Target': 'c1/o'})
status, headers, body = self.call_sym(req)
self.assertEqual(status, '400 Bad Request')
self.assertEqual(body, b'Symlink requests require a zero byte body')
def test_symlink_put_bad_object_header(self):
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={'X-Symlink-Target': 'o'},
body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, "412 Precondition Failed")
self.assertEqual(body, b"X-Symlink-Target header must be of "
b"the form <container name>/<object name>")
def test_symlink_put_bad_account_header(self):
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Account': 'a1/c1'},
body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, "412 Precondition Failed")
self.assertEqual(body, b"Account name cannot contain slashes")
def test_get_symlink(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o'})
req = Request.blank('/v1/a/c/symlink?symlink=get', method='GET')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertIsInstance(headers, list)
self.assertIn(('X-Symlink-Target', 'c1/o'), headers)
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
def test_get_symlink_with_account(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2'})
req = Request.blank('/v1/a/c/symlink?symlink=get', method='GET')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertIn(('X-Symlink-Target', 'c1/o'), headers)
self.assertIn(('X-Symlink-Target-Account', 'a2'), headers)
def test_get_symlink_not_found(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPNotFound, {})
req = Request.blank('/v1/a/c/symlink', method='GET')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '404 Not Found')
self.assertNotIn('Content-Location', dict(headers))
def test_get_target_object(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2'})
self.app.register('GET', '/v1/a2/c1/o', swob.HTTPOk, {}, 'resp_body')
req_headers = {'X-Newest': 'True', 'X-Backend-Something': 'future'}
req = Request.blank('/v1/a/c/symlink', method='GET',
headers=req_headers)
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertEqual(body, b'resp_body')
self.assertNotIn('X-Symlink-Target', dict(headers))
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
self.assertIn(('Content-Location', '/v1/a2/c1/o'), headers)
calls = self.app.calls_with_headers
req_headers.update({
'Host': 'localhost:80',
'X-Backend-Ignore-Range-If-Metadata-Present':
'x-object-sysmeta-symlink-target',
'X-Backend-Storage-Policy-Index': '2',
})
self.assertEqual(req_headers, calls[0].headers)
req_headers['User-Agent'] = 'Swift'
self.assertEqual(req_headers, calls[1].headers)
self.assertFalse(calls[2:])
self.assertFalse(self.app.unread_requests)
def test_get_target_object_not_found(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-account': 'a2'})
self.app.register('GET', '/v1/a2/c1/o', swob.HTTPNotFound, {}, '')
req = Request.blank('/v1/a/c/symlink', method='GET')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '404 Not Found')
self.assertEqual(body, b'')
self.assertNotIn('X-Symlink-Target', dict(headers))
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
self.assertIn(('Content-Location', '/v1/a2/c1/o'), headers)
self.assertFalse(self.app.unread_requests)
def test_get_target_object_range_not_satisfiable(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2'})
self.app.register('GET', '/v1/a2/c1/o',
swob.HTTPRequestedRangeNotSatisfiable, {}, '')
req = Request.blank('/v1/a/c/symlink', method='GET',
headers={'Range': 'bytes=1-2'})
status, headers, body = self.call_sym(req)
self.assertEqual(status, '416 Requested Range Not Satisfiable')
self.assertEqual(
body, b'<html><h1>Requested Range Not Satisfiable</h1>'
b'<p>The Range requested is not available.</p></html>')
self.assertNotIn('X-Symlink-Target', dict(headers))
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
self.assertIn(('Content-Location', '/v1/a2/c1/o'), headers)
self.assertFalse(self.app.unread_requests)
def test_get_ec_symlink_range_unsatisfiable_can_redirect_to_target(self):
self.app.register('GET', '/v1/a/c/symlink',
swob.HTTPRequestedRangeNotSatisfiable,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2'})
self.app.register('GET', '/v1/a2/c1/o', swob.HTTPOk,
{'Content-Range': 'bytes 1-2/10'}, 'es')
req = Request.blank('/v1/a/c/symlink', method='GET',
headers={'Range': 'bytes=1-2'})
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertEqual(body, b'es')
self.assertNotIn('X-Symlink-Target', dict(headers))
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
self.assertIn(('Content-Location', '/v1/a2/c1/o'), headers)
self.assertIn(('Content-Range', 'bytes 1-2/10'), headers)
def test_get_non_symlink(self):
# this is not symlink object
self.app.register('GET', '/v1/a/c/obj', swob.HTTPOk, {}, 'resp_body')
req = Request.blank('/v1/a/c/obj', method='GET')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertEqual(body, b'resp_body')
# Assert special headers for symlink are not in response
self.assertNotIn('X-Symlink-Target', dict(headers))
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
self.assertNotIn('Content-Location', dict(headers))
def test_get_static_link_mismatched_etag(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Etag': 'the-etag'})
# apparently target object was overwritten
self.app.register('GET', '/v1/a/c1/o', swob.HTTPOk,
{'ETag': 'not-the-etag'}, 'resp_body')
req = Request.blank('/v1/a/c/symlink', method='GET')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '409 Conflict')
self.assertEqual(body, b"Object Etag 'not-the-etag' does not "
b"match X-Symlink-Target-Etag header 'the-etag'")
def test_get_static_link_to_symlink(self):
self.app.register('GET', '/v1/a/c/static_link', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/symlink',
'X-Object-Sysmeta-Symlink-Target-Etag': 'the-etag'})
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'ETag': 'the-etag',
'X-Object-Sysmeta-Symlink-Target': 'c1/o'})
self.app.register('GET', '/v1/a/c1/o', swob.HTTPOk,
{'ETag': 'not-the-etag'}, 'resp_body')
req = Request.blank('/v1/a/c/static_link', method='GET')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
def test_get_static_link_to_symlink_fails(self):
self.app.register('GET', '/v1/a/c/static_link', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/symlink',
'X-Object-Sysmeta-Symlink-Target-Etag': 'the-etag'})
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'ETag': 'not-the-etag',
'X-Object-Sysmeta-Symlink-Target': 'c1/o'})
req = Request.blank('/v1/a/c/static_link', method='GET')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '409 Conflict')
self.assertEqual(body, b"X-Symlink-Target-Etag headers do not match")
def put_static_link_to_symlink(self):
self.app.register('HEAD', '/v1/a/c/symlink', swob.HTTPOk,
{'ETag': 'symlink-etag',
'X-Object-Sysmeta-Symlink-Target': 'c/o',
'Content-Type': 'application/symlink'})
self.app.register('HEAD', '/v1/a/c/o', swob.HTTPOk,
{'ETag': 'tgt-etag',
'Content-Type': 'application/data'}, 'resp_body')
self.app.register('PUT', '/v1/a/c/static_link', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/static_link', method='PUT',
headers={
'X-Symlink-Target': 'c/symlink',
'X-Symlink-Target-Etag': 'symlink-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
self.assertEqual([], self.app.calls)
self.assertEqual('application/data',
self.app._calls[-1].headers['Content-Type'])
def test_head_symlink(self):
self.app.register('HEAD', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Meta-Color': 'Red'})
req = Request.blank('/v1/a/c/symlink?symlink=get', method='HEAD')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertIn(('X-Symlink-Target', 'c1/o'), headers)
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
self.assertIn(('X-Object-Meta-Color', 'Red'), headers)
def test_head_symlink_with_account(self):
self.app.register('HEAD', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2',
'X-Object-Meta-Color': 'Red'})
req = Request.blank('/v1/a/c/symlink?symlink=get', method='HEAD')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertIn(('X-Symlink-Target', 'c1/o'), headers)
self.assertIn(('X-Symlink-Target-Account', 'a2'), headers)
self.assertIn(('X-Object-Meta-Color', 'Red'), headers)
def test_head_target_object(self):
# this test is also validating that the symlink metadata is not
# returned, but the target object metadata does return
self.app.register('HEAD', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2',
'X-Object-Meta-Color': 'Red'})
self.app.register('HEAD', '/v1/a2/c1/o', swob.HTTPOk,
{'X-Object-Meta-Color': 'Green'})
req_headers = {'X-Newest': 'True', 'X-Backend-Something': 'future'}
req = Request.blank('/v1/a/c/symlink', method='HEAD',
headers=req_headers)
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertNotIn('X-Symlink-Target', dict(headers))
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
self.assertNotIn(('X-Object-Meta-Color', 'Red'), headers)
self.assertIn(('X-Object-Meta-Color', 'Green'), headers)
self.assertIn(('Content-Location', '/v1/a2/c1/o'), headers)
calls = self.app.calls_with_headers
req_headers.update({
'Host': 'localhost:80',
'X-Backend-Ignore-Range-If-Metadata-Present':
'x-object-sysmeta-symlink-target',
'X-Backend-Storage-Policy-Index': '2',
})
self.assertEqual(req_headers, calls[0].headers)
req_headers['User-Agent'] = 'Swift'
self.assertEqual(req_headers, calls[1].headers)
self.assertFalse(calls[2:])
def test_get_symlink_to_reserved_object(self):
cont = get_reserved_name('versioned')
obj = get_reserved_name('symlink', '9999998765.99999')
symlink_target = "%s/%s" % (cont, obj)
version_path = '/v1/a/%s' % symlink_target
self.app.register('GET', '/v1/a/versioned/symlink', swob.HTTPOk, {
symlink.TGT_OBJ_SYSMETA_SYMLINK_HDR: symlink_target,
symlink.ALLOW_RESERVED_NAMES: 'true',
'x-object-sysmeta-symlink-target-etag': MD5_OF_EMPTY_STRING,
'x-object-sysmeta-symlink-target-bytes': '0',
})
self.app.register('GET', version_path, swob.HTTPOk, {})
req = Request.blank('/v1/a/versioned/symlink', headers={
'Range': 'foo', 'If-Match': 'bar'})
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertIn(('Content-Location', version_path), headers)
self.assertEqual(len(self.authorized), 1)
self.assertNotIn('X-Backend-Allow-Reserved-Names',
self.app.calls_with_headers[0])
call_headers = self.app.calls_with_headers[1].headers
self.assertEqual('true', call_headers[
'X-Backend-Allow-Reserved-Names'])
self.assertEqual('foo', call_headers['Range'])
self.assertEqual('bar', call_headers['If-Match'])
def test_get_symlink_to_reserved_symlink(self):
cont = get_reserved_name('versioned')
obj = get_reserved_name('symlink', '9999998765.99999')
symlink_target = "%s/%s" % (cont, obj)
version_path = '/v1/a/%s' % symlink_target
self.app.register('GET', '/v1/a/versioned/symlink', swob.HTTPOk, {
symlink.TGT_OBJ_SYSMETA_SYMLINK_HDR: symlink_target,
symlink.ALLOW_RESERVED_NAMES: 'true',
'x-object-sysmeta-symlink-target-etag': MD5_OF_EMPTY_STRING,
'x-object-sysmeta-symlink-target-bytes': '0',
})
self.app.register('GET', version_path, swob.HTTPOk, {
symlink.TGT_OBJ_SYSMETA_SYMLINK_HDR: 'unversioned/obj',
'ETag': MD5_OF_EMPTY_STRING,
})
self.app.register('GET', '/v1/a/unversioned/obj', swob.HTTPOk, {
})
req = Request.blank('/v1/a/versioned/symlink')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertIn(('Content-Location', '/v1/a/unversioned/obj'), headers)
self.assertEqual(len(self.authorized), 2)
def test_symlink_too_deep(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/sym1'})
self.app.register('GET', '/v1/a/c/sym1', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/sym2'})
self.app.register('GET', '/v1/a/c/sym2', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/o'})
req = Request.blank('/v1/a/c/symlink', method='HEAD')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '409 Conflict')
self.assertEqual(body, b'')
req = Request.blank('/v1/a/c/symlink')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '409 Conflict')
self.assertEqual(body, b'Too many levels of symbolic links, '
b'maximum allowed is 2')
def test_symlink_change_symloopmax(self):
# similar test to test_symlink_too_deep, but now changed the limit to 3
self.sym = symlink.filter_factory({
'symloop_max': '3',
})(self.app)
self.sym.logger = self.app.logger
self.app.register('HEAD', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/sym1'})
self.app.register('HEAD', '/v1/a/c/sym1', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/sym2'})
self.app.register('HEAD', '/v1/a/c/sym2', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/o',
'X-Object-Meta-Color': 'Red'})
self.app.register('HEAD', '/v1/a/c/o', swob.HTTPOk,
{'X-Object-Meta-Color': 'Green'})
req = Request.blank('/v1/a/c/symlink', method='HEAD')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
# assert that the correct metadata was returned
self.assertNotIn(('X-Object-Meta-Color', 'Red'), headers)
self.assertIn(('X-Object-Meta-Color', 'Green'), headers)
def test_sym_to_sym_to_target(self):
# this test is also validating that the symlink metadata is not
# returned, but the target object metadata does return
self.app.register('HEAD', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/sym1',
'X-Object-Meta-Color': 'Red'})
self.app.register('HEAD', '/v1/a/c/sym1', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Meta-Color': 'Yellow'})
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk,
{'X-Object-Meta-Color': 'Green'})
req = Request.blank('/v1/a/c/symlink', method='HEAD')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertNotIn(('X-Symlink-Target', 'c1/o'), headers)
self.assertNotIn(('X-Symlink-Target-Account', 'a2'), headers)
self.assertNotIn(('X-Object-Meta-Color', 'Red'), headers)
self.assertNotIn(('X-Object-Meta-Color', 'Yellow'), headers)
self.assertIn(('X-Object-Meta-Color', 'Green'), headers)
self.assertIn(('Content-Location', '/v1/a/c1/o'), headers)
def test_symlink_post(self):
self.app.register('POST', '/v1/a/c/symlink', swob.HTTPAccepted,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o'})
req = Request.blank('/v1/a/c/symlink', method='POST',
headers={'X-Object-Meta-Color': 'Red'})
status, headers, body = self.call_sym(req)
self.assertEqual(status, '307 Temporary Redirect')
self.assertEqual(
body,
b'The requested POST was applied to a symlink. POST '
b'directly to the target to apply requested metadata.')
method, path, hdrs = self.app.calls_with_headers[0]
val = hdrs.get('X-Object-Meta-Color')
self.assertEqual(val, 'Red')
def test_non_symlink_post(self):
self.app.register('POST', '/v1/a/c/o', swob.HTTPAccepted, {})
req = Request.blank('/v1/a/c/o', method='POST',
headers={'X-Object-Meta-Color': 'Red'})
status, headers, body = self.call_sym(req)
self.assertEqual(status, '202 Accepted')
def test_set_symlink_POST_fail(self):
# Setting a link with a POST request is not allowed
req = Request.blank('/v1/a/c/o', method='POST',
headers={'X-Symlink-Target': 'c1/regular_obj'})
status, headers, body = self.call_sym(req)
self.assertEqual(status, '400 Bad Request')
self.assertEqual(body, b"A PUT request is required to set a symlink "
b"target")
def test_symlink_post_but_fail_at_server(self):
self.app.register('POST', '/v1/a/c/o', swob.HTTPNotFound, {})
req = Request.blank('/v1/a/c/o', method='POST',
headers={'X-Object-Meta-Color': 'Red'})
status, headers, body = self.call_sym(req)
self.assertEqual(status, '404 Not Found')
def test_validate_and_prep_request_headers(self):
def do_test(headers):
req = Request.blank('/v1/a/c/o', method='PUT',
headers=headers)
symlink._validate_and_prep_request_headers(req)
# normal cases
do_test({'X-Symlink-Target': 'c1/o1'})
do_test({'X-Symlink-Target': 'c1/sub/o1'})
do_test({'X-Symlink-Target': 'c1%2Fo1'})
# specify account
do_test({'X-Symlink-Target': 'c1/o1',
'X-Symlink-Target-Account': 'another'})
# URL encoded is safe
do_test({'X-Symlink-Target': 'c1%2Fo1'})
# URL encoded + multibytes is also safe
target = u'\u30b0\u30e9\u30d6\u30eb/\u30a2\u30ba\u30ec\u30f3'
target = swob.bytes_to_wsgi(target.encode('utf8'))
do_test({'X-Symlink-Target': target})
do_test({'X-Symlink-Target': swob.wsgi_quote(target)})
target = swob.bytes_to_wsgi(u'\u30b0\u30e9\u30d6\u30eb'.encode('utf8'))
do_test(
{'X-Symlink-Target': 'cont/obj',
'X-Symlink-Target-Account': target})
do_test(
{'X-Symlink-Target': 'cont/obj',
'X-Symlink-Target-Account': swob.wsgi_quote(target)})
def test_validate_and_prep_request_headers_invalid_format(self):
def do_test(headers, status, err_msg):
req = Request.blank('/v1/a/c/o', method='PUT',
headers=headers)
with self.assertRaises(swob.HTTPException) as cm:
symlink._validate_and_prep_request_headers(req)
self.assertEqual(cm.exception.status, status)
self.assertEqual(cm.exception.body, err_msg)
do_test({'X-Symlink-Target': '/c1/o1'},
'412 Precondition Failed',
b'X-Symlink-Target header must be of the '
b'form <container name>/<object name>')
do_test({'X-Symlink-Target': 'c1o1'},
'412 Precondition Failed',
b'X-Symlink-Target header must be of the '
b'form <container name>/<object name>')
do_test({'X-Symlink-Target': 'c1/o1',
'X-Symlink-Target-Account': '/another'},
'412 Precondition Failed',
b'Account name cannot contain slashes')
do_test({'X-Symlink-Target': 'c1/o1',
'X-Symlink-Target-Account': 'an/other'},
'412 Precondition Failed',
b'Account name cannot contain slashes')
# url encoded case
do_test({'X-Symlink-Target': '%2Fc1%2Fo1'},
'412 Precondition Failed',
b'X-Symlink-Target header must be of the '
b'form <container name>/<object name>')
do_test({'X-Symlink-Target': 'c1/o1',
'X-Symlink-Target-Account': '%2Fanother'},
'412 Precondition Failed',
b'Account name cannot contain slashes')
do_test({'X-Symlink-Target': 'c1/o1',
'X-Symlink-Target-Account': 'an%2Fother'},
'412 Precondition Failed',
b'Account name cannot contain slashes')
# with multi-bytes
target = u'/\u30b0\u30e9\u30d6\u30eb/\u30a2\u30ba\u30ec\u30f3'
target = swob.bytes_to_wsgi(target.encode('utf8'))
do_test(
{'X-Symlink-Target': target},
'412 Precondition Failed',
b'X-Symlink-Target header must be of the '
b'form <container name>/<object name>')
do_test(
{'X-Symlink-Target': swob.wsgi_quote(target)},
'412 Precondition Failed',
b'X-Symlink-Target header must be of the '
b'form <container name>/<object name>')
account = u'\u30b0\u30e9\u30d6\u30eb/\u30a2\u30ba\u30ec\u30f3'
account = swob.bytes_to_wsgi(account.encode('utf8'))
do_test(
{'X-Symlink-Target': 'c/o',
'X-Symlink-Target-Account': account},
'412 Precondition Failed',
b'Account name cannot contain slashes')
do_test(
{'X-Symlink-Target': 'c/o',
'X-Symlink-Target-Account': swob.wsgi_quote(account)},
'412 Precondition Failed',
b'Account name cannot contain slashes')
def test_validate_and_prep_request_headers_points_to_itself(self):
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'X-Symlink-Target': 'c/o'})
with self.assertRaises(swob.HTTPException) as cm:
symlink._validate_and_prep_request_headers(req)
self.assertEqual(cm.exception.status, '400 Bad Request')
self.assertEqual(cm.exception.body, b'Symlink cannot target itself')
# Even if set account to itself, it will fail as well
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'X-Symlink-Target': 'c/o',
'X-Symlink-Target-Account': 'a'})
with self.assertRaises(swob.HTTPException) as cm:
symlink._validate_and_prep_request_headers(req)
self.assertEqual(cm.exception.status, '400 Bad Request')
self.assertEqual(cm.exception.body, b'Symlink cannot target itself')
# sanity, the case to another account is safe
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'X-Symlink-Target': 'c/o',
'X-Symlink-Target-Account': 'a1'})
symlink._validate_and_prep_request_headers(req)
def test_symloop_max_config(self):
self.app = FakeSwift()
# sanity
self.sym = symlink.filter_factory({
'symloop_max': '1',
})(self.app)
self.assertEqual(self.sym.symloop_max, 1)
# < 1 case will result in default
self.sym = symlink.filter_factory({
'symloop_max': '-1',
})(self.app)
self.assertEqual(self.sym.symloop_max, symlink.DEFAULT_SYMLOOP_MAX)
class SymlinkCopyingTestCase(TestSymlinkMiddlewareBase):
# verify interaction of copy and symlink middlewares
def setUp(self):
self.app = FakeSwift()
conf = {'symloop_max': '2'}
self.sym = symlink.filter_factory(conf)(self.app)
self.sym.logger = self.app.logger
self.copy = copy.filter_factory({})(self.sym)
def call_copy(self, req, **kwargs):
return self.call_app(req, app=self.copy, **kwargs)
def test_copy_symlink_target(self):
req = Request.blank('/v1/a/src_cont/symlink', method='COPY',
headers={'Destination': 'tgt_cont/tgt_obj'})
self._test_copy_symlink_target(req)
req = Request.blank('/v1/a/tgt_cont/tgt_obj', method='PUT',
headers={'X-Copy-From': 'src_cont/symlink'})
self._test_copy_symlink_target(req)
def _test_copy_symlink_target(self, req):
self.app.register('GET', '/v1/a/src_cont/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2'})
self.app.register('GET', '/v1/a2/c1/o', swob.HTTPOk, {}, 'resp_body')
self.app.register('PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated,
{}, 'resp_body')
status, headers, body = self.call_copy(req)
method, path, hdrs = self.app.calls_with_headers[0]
self.assertEqual(method, 'GET')
self.assertEqual(path, '/v1/a/src_cont/symlink')
self.assertEqual('/src_cont/symlink', hdrs.get('X-Copy-From'))
method, path, hdrs = self.app.calls_with_headers[1]
self.assertEqual(method, 'GET')
self.assertEqual(path, '/v1/a2/c1/o')
self.assertEqual('/src_cont/symlink', hdrs.get('X-Copy-From'))
method, path, hdrs = self.app.calls_with_headers[2]
self.assertEqual(method, 'PUT')
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
# this is raw object copy
self.assertEqual(val, None)
self.assertEqual(status, '201 Created')
def test_copy_symlink(self):
req = Request.blank(
'/v1/a/src_cont/symlink?symlink=get', method='COPY',
headers={'Destination': 'tgt_cont/tgt_obj'})
self._test_copy_symlink(req)
req = Request.blank(
'/v1/a/tgt_cont/tgt_obj?symlink=get', method='PUT',
headers={'X-Copy-From': 'src_cont/symlink'})
self._test_copy_symlink(req)
def _test_copy_symlink(self, req):
self.app.register('GET', '/v1/a/src_cont/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2'})
self.app.register('PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated,
{'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Account': 'a2'})
status, headers, body = self.call_copy(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[0]
self.assertEqual(method, 'GET')
self.assertEqual(path, '/v1/a/src_cont/symlink?symlink=get')
self.assertEqual('/src_cont/symlink', hdrs.get('X-Copy-From'))
method, path, hdrs = self.app.calls_with_headers[1]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertEqual(
hdrs.get('X-Object-Sysmeta-Symlink-Target-Account'), 'a2')
def test_copy_symlink_new_target(self):
req = Request.blank(
'/v1/a/src_cont/symlink?symlink=get', method='COPY',
headers={'Destination': 'tgt_cont/tgt_obj',
'X-Symlink-Target': 'new_cont/new_obj',
'X-Symlink-Target-Account': 'new_acct'})
self._test_copy_symlink_new_target(req)
req = Request.blank(
'/v1/a/tgt_cont/tgt_obj?symlink=get', method='PUT',
headers={'X-Copy-From': 'src_cont/symlink',
'X-Symlink-Target': 'new_cont/new_obj',
'X-Symlink-Target-Account': 'new_acct'})
self._test_copy_symlink_new_target(req)
def _test_copy_symlink_new_target(self, req):
self.app.register('GET', '/v1/a/src_cont/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2'})
self.app.register('PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated,
{'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Account': 'a2'})
status, headers, body = self.call_copy(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[0]
self.assertEqual(method, 'GET')
self.assertEqual(path, '/v1/a/src_cont/symlink?symlink=get')
self.assertEqual('/src_cont/symlink', hdrs.get('X-Copy-From'))
method, path, hdrs = self.app.calls_with_headers[1]
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/v1/a/tgt_cont/tgt_obj?symlink=get')
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'new_cont/new_obj')
self.assertEqual(hdrs.get('X-Object-Sysmeta-Symlink-Target-Account'),
'new_acct')
def test_copy_symlink_with_slo_query(self):
req = Request.blank(
'/v1/a/src_cont/symlink?multipart-manifest=get&symlink=get',
method='COPY', headers={'Destination': 'tgt_cont/tgt_obj'})
self._test_copy_symlink_with_slo_query(req)
req = Request.blank(
'/v1/a/tgt_cont/tgt_obj?multipart-manifest=get&symlink=get',
method='PUT', headers={'X-Copy-From': 'src_cont/symlink'})
self._test_copy_symlink_with_slo_query(req)
def _test_copy_symlink_with_slo_query(self, req):
self.app.register('GET', '/v1/a/src_cont/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2'})
self.app.register('PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated,
{'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Account': 'a2'})
status, headers, body = self.call_copy(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[0]
self.assertEqual(method, 'GET')
path, query = path.split('?')
query_dict = parse_qs(query)
self.assertEqual(
path, '/v1/a/src_cont/symlink')
self.assertEqual(
query_dict,
{'multipart-manifest': ['get'], 'symlink': ['get'],
'format': ['raw']})
self.assertEqual('/src_cont/symlink', hdrs.get('X-Copy-From'))
method, path, hdrs = self.app.calls_with_headers[1]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertEqual(
hdrs.get('X-Object-Sysmeta-Symlink-Target-Account'), 'a2')
def test_static_link_to_new_slo_manifest(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'X-Static-Large-Object': 'True',
'Etag': 'manifest-etag',
'X-Object-Sysmeta-Slo-Size': '1048576',
'X-Object-Sysmeta-Slo-Etag': 'this-is-not-used',
'Content-Length': 42,
'Content-Type': 'application/big-data',
'X-Object-Sysmeta-Container-Update-Override-Etag':
'956859738870e5ca6aa17eeda58e4df0; '
'slo_etag=71e938d37c1d06dc634dd24660255a88',
})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'manifest-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
self.assertEqual([
('HEAD', '/v1/a/c1/o'),
('PUT', '/v1/a/c/symlink'),
], self.app.calls)
method, path, hdrs = self.app.calls_with_headers[-1]
self.assertEqual('application/big-data', hdrs['Content-Type'])
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target'], 'c1/o')
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target-Etag'],
'manifest-etag')
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target-Bytes'],
'1048576')
self.assertEqual(
hdrs['X-Object-Sysmeta-Container-Update-Override-Etag'],
'd41d8cd98f00b204e9800998ecf8427e; '
'slo_etag=71e938d37c1d06dc634dd24660255a88; '
'symlink_target=c1/o; '
'symlink_target_etag=manifest-etag; '
'symlink_target_bytes=1048576')
def test_static_link_to_old_slo_manifest(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'X-Static-Large-Object': 'True',
'Etag': 'manifest-etag',
'X-Object-Sysmeta-Slo-Size': '1048576',
'X-Object-Sysmeta-Slo-Etag': '71e938d37c1d06dc634dd24660255a88',
'Content-Length': 42,
'Content-Type': 'application/big-data',
})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'manifest-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
self.assertEqual([
('HEAD', '/v1/a/c1/o'),
('PUT', '/v1/a/c/symlink'),
], self.app.calls)
method, path, hdrs = self.app.calls_with_headers[-1]
self.assertEqual('application/big-data', hdrs['Content-Type'])
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target'], 'c1/o')
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target-Etag'],
'manifest-etag')
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target-Bytes'],
'1048576')
self.assertEqual(
hdrs['X-Object-Sysmeta-Container-Update-Override-Etag'],
'd41d8cd98f00b204e9800998ecf8427e; '
'slo_etag=71e938d37c1d06dc634dd24660255a88; '
'symlink_target=c1/o; '
'symlink_target_etag=manifest-etag; '
'symlink_target_bytes=1048576')
def test_static_link_to_really_old_slo_manifest(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'X-Static-Large-Object': 'True',
'Etag': 'manifest-etag',
'Content-Length': 42,
'Content-Type': 'application/big-data',
})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'manifest-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
self.assertEqual([
('HEAD', '/v1/a/c1/o'),
('PUT', '/v1/a/c/symlink'),
], self.app.calls)
method, path, hdrs = self.app.calls_with_headers[-1]
self.assertEqual('application/big-data', hdrs['Content-Type'])
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target'], 'c1/o')
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target-Etag'],
'manifest-etag')
# symlink m/w is doing a HEAD, it's not going to going to read the
# manifest body and sum up the bytes - so we just use manifest size
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target-Bytes'],
'42')
# no slo_etag, and target_bytes is manifest
self.assertEqual(
hdrs['X-Object-Sysmeta-Container-Update-Override-Etag'],
'd41d8cd98f00b204e9800998ecf8427e; '
'symlink_target=c1/o; '
'symlink_target_etag=manifest-etag; '
'symlink_target_bytes=42')
def test_static_link_to_slo_manifest_slo_etag(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'Etag': 'manifest-etag',
'X-Object-Sysmeta-Slo-Etag': 'slo-etag',
'Content-Length': 42,
})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
# unquoted slo-etag doesn't match
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'slo-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '409 Conflict')
# the quoted slo-etag is tolerated, but still doesn't match
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': '"slo-etag"',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '409 Conflict')
class SymlinkVersioningTestCase(TestSymlinkMiddlewareBase):
# verify interaction of versioned_writes and symlink middlewares
def setUp(self):
self.app = FakeSwift()
conf = {'symloop_max': '2'}
self.sym = symlink.filter_factory(conf)(self.app)
self.sym.logger = self.app.logger
vw_conf = {'allow_versioned_writes': 'true'}
self.vw = versioned_writes.filter_factory(vw_conf)(self.sym)
def call_vw(self, req, **kwargs):
return self.call_app(req, app=self.vw, **kwargs)
def assertRequestEqual(self, req, other):
self.assertEqual(req.method, other.method)
self.assertEqual(req.path, other.path)
def test_new_symlink_version_success(self):
self.app.register(
'PUT', '/v1/a/c/symlink', swob.HTTPCreated,
{'X-Symlink-Target': 'new_cont/new_tgt',
'X-Symlink-Target-Account': 'a'}, None)
self.app.register(
'GET', '/v1/a/c/symlink', swob.HTTPOk,
{'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT',
'X-Object-Sysmeta-Symlink-Target': 'old_cont/old_tgt',
'X-Object-Sysmeta-Symlink-Target-Account': 'a'},
'')
self.app.register(
'PUT', '/v1/a/ver_cont/007symlink/0000000001.00000',
swob.HTTPCreated,
{'X-Symlink-Target': 'old_cont/old_tgt',
'X-Symlink-Target-Account': 'a'}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/symlink',
headers={'X-Symlink-Target': 'new_cont/new_tgt'},
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '0',
'swift.trans_id': 'fake_trans_id'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '201 Created')
# authorized twice now because versioned_writes now makes a check on
# PUT
self.assertEqual(len(self.authorized), 2)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(['VW', 'VW', None], self.app.swift_sources)
self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids))
calls = self.app.calls_with_headers
method, path, req_headers = calls[2]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/symlink', path)
self.assertEqual(
'new_cont/new_tgt',
req_headers['X-Object-Sysmeta-Symlink-Target'])
def test_delete_latest_version_no_marker_success(self):
self.app.register(
'GET',
'/v1/a/ver_cont?prefix=003sym/&marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 0, '
'"name": "003sym/2", '
'"content_type": "text/plain"}, '
'{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 0, '
'"name": "003sym/1", '
'"content_type": "text/plain"}]')
self.app.register(
'GET', '/v1/a/ver_cont/003sym/2', swob.HTTPCreated,
{'content-length': '0',
'X-Object-Sysmeta-Symlink-Target': 'c/tgt'}, None)
self.app.register(
'PUT', '/v1/a/c/sym', swob.HTTPCreated,
{'X-Symlink-Target': 'c/tgt', 'X-Symlink-Target-Account': 'a'},
None)
self.app.register(
'DELETE', '/v1/a/ver_cont/003sym/2', swob.HTTPOk,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/sym',
headers={'X-If-Delete-At': 1},
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0', 'swift.trans_id': 'fake_trans_id'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(4, self.app.call_count)
self.assertEqual(['VW', 'VW', 'VW', 'VW'], self.app.swift_sources)
self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids))
calls = self.app.calls_with_headers
method, path, req_headers = calls[2]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/sym', path)
self.assertEqual(
'c/tgt',
req_headers['X-Object-Sysmeta-Symlink-Target'])
class TestSymlinkContainerContext(TestSymlinkMiddlewareBase):
def setUp(self):
super(TestSymlinkContainerContext, self).setUp()
self.context = symlink.SymlinkContainerContext(
self.sym.app, self.sym.logger)
def test_extract_symlink_path_json_simple_etag(self):
obj_dict = {"bytes": 6,
"last_modified": "1",
"hash": "etag",
"name": "obj",
"content_type": "application/octet-stream"}
obj_dict = self.context._extract_symlink_path_json(
obj_dict, 'v1', 'AUTH_a')
self.assertEqual(obj_dict['hash'], 'etag')
self.assertNotIn('symlink_path', obj_dict)
def test_extract_symlink_path_json_symlink_path(self):
obj_dict = {"bytes": 6,
"last_modified": "1",
"hash": "etag; symlink_target=c/o; something_else=foo; "
"symlink_target_etag=tgt_etag; symlink_target_bytes=8",
"name": "obj",
"content_type": "application/octet-stream"}
obj_dict = self.context._extract_symlink_path_json(
obj_dict, 'v1', 'AUTH_a')
self.assertEqual(obj_dict['hash'], 'etag; something_else=foo')
self.assertEqual(obj_dict['symlink_path'], '/v1/AUTH_a/c/o')
self.assertEqual(obj_dict['symlink_etag'], 'tgt_etag')
self.assertEqual(obj_dict['symlink_bytes'], 8)
def test_extract_symlink_path_json_symlink_path_and_account(self):
obj_dict = {
"bytes": 6,
"last_modified": "1",
"hash": "etag; symlink_target=c/o; symlink_target_account=AUTH_a2",
"name": "obj",
"content_type": "application/octet-stream"}
obj_dict = self.context._extract_symlink_path_json(
obj_dict, 'v1', 'AUTH_a')
self.assertEqual(obj_dict['hash'], 'etag')
self.assertEqual(obj_dict['symlink_path'], '/v1/AUTH_a2/c/o')
def test_extract_symlink_path_json_extra_key(self):
obj_dict = {"bytes": 6,
"last_modified": "1",
"hash": "etag; symlink_target=c/o; extra_key=value",
"name": "obj",
"content_type": "application/octet-stream"}
obj_dict = self.context._extract_symlink_path_json(
obj_dict, 'v1', 'AUTH_a')
self.assertEqual(obj_dict['hash'], 'etag; extra_key=value')
self.assertEqual(obj_dict['symlink_path'], '/v1/AUTH_a/c/o')
def test_get_container_simple(self):
self.app.register(
'GET',
'/v1/a/c',
swob.HTTPOk, {},
json.dumps(
[{"hash": "etag; symlink_target=c/o;",
"last_modified": "2014-11-21T14:23:02.206740",
"bytes": 0,
"name": "sym_obj",
"content_type": "text/plain"},
{"hash": "etag2",
"last_modified": "2014-11-21T14:14:27.409100",
"bytes": 32,
"name": "normal_obj",
"content_type": "text/plain"}]))
req = Request.blank(path='/v1/a/c')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
obj_list = json.loads(body)
self.assertIn('symlink_path', obj_list[0])
self.assertIn(obj_list[0]['symlink_path'], '/v1/a/c/o')
self.assertNotIn('symlink_path', obj_list[1])
def test_get_container_with_subdir(self):
self.app.register(
'GET',
'/v1/a/c?delimiter=/',
swob.HTTPOk, {},
json.dumps([{"subdir": "photos/"}]))
req = Request.blank(path='/v1/a/c?delimiter=/')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
obj_list = json.loads(body)
self.assertEqual(len(obj_list), 1)
self.assertEqual(obj_list[0]['subdir'], 'photos/')
def test_get_container_error_cases(self):
# No affect for error cases
for error in (swob.HTTPNotFound, swob.HTTPUnauthorized,
swob.HTTPServiceUnavailable,
swob.HTTPInternalServerError):
self.app.register('GET', '/v1/a/c', error, {}, '')
req = Request.blank(path='/v1/a/c')
status, headers, body = self.call_sym(req)
self.assertEqual(status, error().status)
def test_no_affect_for_account_request(self):
with mock.patch.object(self.sym, 'app') as mock_app:
mock_app.return_value = (b'ok',)
req = Request.blank(path='/v1/a')
status, headers, body = self.call_sym(req)
self.assertEqual(body, b'ok')
def test_get_container_simple_with_listing_format(self):
self.app.register(
'GET',
'/v1/a/c?format=json',
swob.HTTPOk, {},
json.dumps(
[{"hash": "etag; symlink_target=c/o;",
"last_modified": "2014-11-21T14:23:02.206740",
"bytes": 0,
"name": "sym_obj",
"content_type": "text/plain"},
{"hash": "etag2",
"last_modified": "2014-11-21T14:14:27.409100",
"bytes": 32,
"name": "normal_obj",
"content_type": "text/plain"}]))
self.lf = listing_formats.filter_factory({})(self.sym)
req = Request.blank(path='/v1/a/c?format=json')
status, headers, body = self.call_app(req, app=self.lf)
self.assertEqual(status, '200 OK')
obj_list = json.loads(body)
self.assertIn('symlink_path', obj_list[0])
self.assertIn(obj_list[0]['symlink_path'], '/v1/a/c/o')
self.assertNotIn('symlink_path', obj_list[1])
def test_get_container_simple_with_listing_format_xml(self):
self.app.register(
'GET',
'/v1/a/c?format=json',
swob.HTTPOk, {'Content-Type': 'application/json'},
json.dumps(
[{"hash": "etag; symlink_target=c/o;",
"last_modified": "2014-11-21T14:23:02.206740",
"bytes": 0,
"name": "sym_obj",
"content_type": "text/plain"},
{"hash": "etag2",
"last_modified": "2014-11-21T14:14:27.409100",
"bytes": 32,
"name": "normal_obj",
"content_type": "text/plain"}]))
self.lf = listing_formats.filter_factory({})(self.sym)
req = Request.blank(path='/v1/a/c?format=xml')
status, headers, body = self.call_app(req, app=self.lf)
self.assertEqual(status, '200 OK')
self.assertEqual(body.split(b'\n'), [
b'<?xml version="1.0" encoding="UTF-8"?>',
b'<container name="c"><object><name>sym_obj</name>'
b'<hash>etag</hash><bytes>0</bytes>'
b'<content_type>text/plain</content_type>'
b'<last_modified>2014-11-21T14:23:02.206740</last_modified>'
b'</object>'
b'<object><name>normal_obj</name><hash>etag2</hash>'
b'<bytes>32</bytes><content_type>text/plain</content_type>'
b'<last_modified>2014-11-21T14:14:27.409100</last_modified>'
b'</object></container>'])
|
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
OpenStack Client interface. Handles the REST calls and responses.
"""
from __future__ import print_function
import logging
import os
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
from eventlet import sleep
except ImportError:
from time import sleep
try:
import json
except ImportError:
import simplejson as json
# Python 2.5 compat fix
if not hasattr(urlparse, 'parse_qsl'):
import cgi
urlparse.parse_qsl = cgi.parse_qsl
import requests
from troveclient.openstack.common.apiclient import exceptions
from troveclient import service_catalog
from troveclient import utils
from troveclient.openstack.common.apiclient import client
class HTTPClient(object):
USER_AGENT = 'python-troveclient'
def __init__(self, user, password, projectid, auth_url, insecure=False,
timeout=None, tenant_id=None, proxy_tenant_id=None,
proxy_token=None, region_name=None,
endpoint_type='publicURL', service_type=None,
service_name=None, database_service_name=None, retries=None,
http_log_debug=False, cacert=None):
self.user = user
self.password = password
self.projectid = projectid
self.tenant_id = tenant_id
self.auth_url = auth_url.rstrip('/')
self.version = 'v1'
self.region_name = region_name
self.endpoint_type = endpoint_type
self.service_type = service_type
self.service_name = service_name
self.database_service_name = database_service_name
self.retries = int(retries or 0)
self.http_log_debug = http_log_debug
self.management_url = None
self.auth_token = None
self.proxy_token = proxy_token
self.proxy_tenant_id = proxy_tenant_id
self.timeout = timeout
if insecure:
self.verify_cert = False
else:
if cacert:
self.verify_cert = cacert
else:
self.verify_cert = True
self._logger = logging.getLogger(__name__)
if self.http_log_debug and not self._logger.handlers:
ch = logging.StreamHandler()
self._logger.setLevel(logging.DEBUG)
self._logger.addHandler(ch)
if hasattr(requests, 'logging'):
requests.logging.getLogger(requests.__name__).addHandler(ch)
def http_log_req(self, args, kwargs):
if not self.http_log_debug:
return
string_parts = ['curl -i']
for element in args:
if element in ('GET', 'POST', 'DELETE', 'PUT'):
string_parts.append(' -X %s' % element)
else:
string_parts.append(' %s' % element)
for element in kwargs['headers']:
header = ' -H "%s: %s"' % (element, kwargs['headers'][element])
string_parts.append(header)
if 'data' in kwargs:
string_parts.append(" -d '%s'" % (kwargs['data']))
self._logger.debug("\nREQ: %s\n" % "".join(string_parts))
def http_log_resp(self, resp):
if not self.http_log_debug:
return
self._logger.debug(
"RESP: [%s] %s\nRESP BODY: %s\n",
resp.status_code,
resp.headers,
resp.text)
def request(self, url, method, **kwargs):
kwargs.setdefault('headers', kwargs.get('headers', {}))
kwargs['headers']['User-Agent'] = self.USER_AGENT
kwargs['headers']['Accept'] = 'application/json'
if 'body' in kwargs:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['data'] = json.dumps(kwargs['body'])
del kwargs['body']
if self.timeout:
kwargs.setdefault('timeout', self.timeout)
self.http_log_req((url, method,), kwargs)
resp = requests.request(
method,
url,
verify=self.verify_cert,
**kwargs)
self.http_log_resp(resp)
if resp.text:
try:
body = json.loads(resp.text)
except ValueError:
pass
body = None
else:
body = None
if resp.status_code >= 400:
raise exceptions.from_response(resp, body, url)
return resp, body
def _cs_request(self, url, method, **kwargs):
auth_attempts = 0
attempts = 0
backoff = 1
while True:
attempts += 1
if not self.management_url or not self.auth_token:
self.authenticate()
kwargs.setdefault('headers', {})['X-Auth-Token'] = self.auth_token
if self.projectid:
kwargs['headers']['X-Auth-Project-Id'] = self.projectid
try:
resp, body = self.request(self.management_url + url, method,
**kwargs)
return resp, body
except exceptions.BadRequest as e:
if attempts > self.retries:
raise
except exceptions.Unauthorized:
if auth_attempts > 0:
raise
self._logger.debug("Unauthorized, reauthenticating.")
self.management_url = self.auth_token = None
# First reauth. Discount this attempt.
attempts -= 1
auth_attempts += 1
continue
except exceptions.ClientException as e:
if attempts > self.retries:
raise
if 500 <= e.code <= 599:
pass
else:
raise
except requests.exceptions.ConnectionError as e:
# Catch a connection refused from requests.request
self._logger.debug("Connection refused: %s" % e)
msg = 'Unable to establish connection: %s' % e
raise exceptions.ConnectionError(msg)
self._logger.debug(
"Failed attempt(%s of %s), retrying in %s seconds" %
(attempts, self.retries, backoff))
sleep(backoff)
backoff *= 2
def get(self, url, **kwargs):
return self._cs_request(url, 'GET', **kwargs)
def post(self, url, **kwargs):
return self._cs_request(url, 'POST', **kwargs)
def put(self, url, **kwargs):
return self._cs_request(url, 'PUT', **kwargs)
def delete(self, url, **kwargs):
return self._cs_request(url, 'DELETE', **kwargs)
def _extract_service_catalog(self, url, resp, body, extract_token=True):
"""See what the auth service told us and process the response.
We may get redirected to another site, fail or actually get
back a service catalog with a token and our endpoints.
"""
if resp.status_code == 200: # content must always present
try:
self.auth_url = url
self.service_catalog = \
service_catalog.ServiceCatalog(body)
if extract_token:
self.auth_token = self.service_catalog.get_token()
management_url = self.service_catalog.url_for(
attr='region',
filter_value=self.region_name,
endpoint_type=self.endpoint_type,
service_type=self.service_type,
service_name=self.service_name,
database_service_name=self.database_service_name)
self.management_url = management_url.rstrip('/')
return None
except exceptions.AmbiguousEndpoints:
print("Found more than one valid endpoint. Use a more "
"restrictive filter")
raise
except KeyError:
raise exceptions.AuthorizationFailure()
except exceptions.EndpointNotFound:
print("Could not find any suitable endpoint. Correct region?")
raise
elif resp.status_code == 305:
return resp['location']
else:
raise exceptions.from_response(resp, body, url)
def _fetch_endpoints_from_auth(self, url):
"""We have a token, but don't know the final endpoint for
the region. We have to go back to the auth service and
ask again. This request requires an admin-level token
to work. The proxy token supplied could be from a low-level enduser.
We can't get this from the keystone service endpoint, we have to use
the admin endpoint.
This will overwrite our admin token with the user token.
"""
# GET ...:5001/v2.0/tokens/#####/endpoints
url = '/'.join([url, 'tokens', '%s?belongsTo=%s'
% (self.proxy_token, self.proxy_tenant_id)])
self._logger.debug("Using Endpoint URL: %s" % url)
resp, body = self.request(url, "GET",
headers={'X-Auth-Token': self.auth_token})
return self._extract_service_catalog(url, resp, body,
extract_token=False)
def authenticate(self):
magic_tuple = urlparse.urlsplit(self.auth_url)
scheme, netloc, path, query, frag = magic_tuple
port = magic_tuple.port
if port is None:
port = 80
path_parts = path.split('/')
for part in path_parts:
if len(part) > 0 and part[0] == 'v':
self.version = part
break
# TODO(sandy): Assume admin endpoint is 35357 for now.
# Ideally this is going to have to be provided by the service catalog.
new_netloc = netloc.replace(':%d' % port, ':%d' % (35357,))
admin_url = urlparse.urlunsplit((scheme, new_netloc,
path, query, frag))
auth_url = self.auth_url
if self.version == "v2.0":
while auth_url:
if "TROVE_RAX_AUTH" in os.environ:
auth_url = self._rax_auth(auth_url)
else:
auth_url = self._v2_auth(auth_url)
# Are we acting on behalf of another user via an
# existing token? If so, our actual endpoints may
# be different than that of the admin token.
if self.proxy_token:
self._fetch_endpoints_from_auth(admin_url)
# Since keystone no longer returns the user token
# with the endpoints any more, we need to replace
# our service account token with the user token.
self.auth_token = self.proxy_token
else:
try:
while auth_url:
auth_url = self._v1_auth(auth_url)
# In some configurations trove makes redirection to
# v2.0 keystone endpoint. Also, new location does not contain
# real endpoint, only hostname and port.
except exceptions.AuthorizationFailure:
if auth_url.find('v2.0') < 0:
auth_url = auth_url + '/v2.0'
self._v2_auth(auth_url)
def _v1_auth(self, url):
if self.proxy_token:
raise exceptions.NoTokenLookupException()
headers = {'X-Auth-User': self.user,
'X-Auth-Key': self.password}
if self.projectid:
headers['X-Auth-Project-Id'] = self.projectid
resp, body = self.request(url, 'GET', headers=headers)
if resp.status_code in (200, 204): # in some cases we get No Content
try:
mgmt_header = 'x-server-management-url'
self.management_url = resp.headers[mgmt_header].rstrip('/')
self.auth_token = resp.headers['x-auth-token']
self.auth_url = url
except (KeyError, TypeError):
raise exceptions.AuthorizationFailure()
elif resp.status_code == 305:
return resp.headers['location']
else:
raise exceptions.from_response(resp, body, url)
def _v2_auth(self, url):
"""Authenticate against a v2.0 auth service."""
body = {"auth": {
"passwordCredentials": {"username": self.user,
"password": self.password}}}
if self.projectid:
body['auth']['tenantName'] = self.projectid
elif self.tenant_id:
body['auth']['tenantId'] = self.tenant_id
self._authenticate(url, body)
def _rax_auth(self, url):
"""Authenticate against the Rackspace auth service."""
body = {"auth": {
"RAX-KSKEY:apiKeyCredentials": {
"username": self.user,
"apiKey": self.password,
"tenantName": self.projectid}}}
self._authenticate(url, body)
def _authenticate(self, url, body):
"""Authenticate and extract the service catalog."""
token_url = url + "/tokens"
# Make sure we follow redirects when trying to reach Keystone
resp, body = self.request(
token_url,
"POST",
body=body,
allow_redirects=True)
return self._extract_service_catalog(url, resp, body)
def get_database_api_version_from_endpoint(self):
magic_tuple = urlparse.urlsplit(self.management_url)
scheme, netloc, path, query, frag = magic_tuple
v = path.split("/")[1]
valid_versions = ['v1.0']
if v not in valid_versions:
msg = "Invalid client version '%s'. must be one of: %s" % (
(v, ', '.join(valid_versions)))
raise exceptions.UnsupportedVersion(msg)
return v[1:]
def get_version_map():
return {
'1.0': 'troveclient.v1.client.Client',
}
def Client(version, *args, **kwargs):
version_map = get_version_map()
client_class = client.BaseClient.get_class('database',
version, version_map)
return client_class(*args, **kwargs)
|
|
#!/usr/bin/env python
# encoding: utf-8
# Copyright 2012 Aaron Morton
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import cStringIO
import errno
import json
import logging
import os.path
import pkg_resources
import shutil
import time
import boto
from boto.s3 import key
from cassback import dt_util, file_util
# ============================================================================
#
def create_from_args(args):
endpoint_name = args.endpoint
for entry_point in pkg_resources.iter_entry_points("cassback.endpoints"):
endpoint_class = entry_point.load()
if endpoint_class.name == endpoint_name:
return endpoint_class(args)
raise RuntimeError("Unknown endpoint name %s" % (endpoint_name,))
def validate_args(args):
endpoint_name = args.endpoint
for entry_point in pkg_resources.iter_entry_points("cassback.endpoints"):
endpoint_class = entry_point.load()
if endpoint_class.name == endpoint_name:
endpoint_class.validate_args(args)
return
raise RuntimeError("Unknown endpoint name %s" % (endpoint_name,))
# ============================================================================
#
class EndpointBase(object):
"""Base for all endpoints."""
name = None
"""Endpoint name, used in command line to identifity it.
"""
@classmethod
def add_arg_group(cls, main_parser):
"""
"""
pass
@classmethod
def validate_args(cls, args):
pass
def backup_file(self, backup_file):
"""Store the SSTable ``backup_file``.
Returns the fully qualified path to the file in the backup.
"""
raise NotImplementedError()
def read_backup_file(self, path):
"""Creates a :cls:`cassandra.BackupFile` from the meta for the file
at ``path``
"""
raise NotImplementedError()
def backup_keyspace(self, ks_backup):
raise NotImplementedError()
def read_keyspace(self, path):
raise NotImplementedError()
def restore_file(self, backup_file, dest_prefix):
"""Restore the ``backup_file`` under the ``dest_prefix``.
Returns the fully qualified backup path.
"""
raise NotImplementedError()
def exists(self, relative_path):
"""Returns ``True`` if the file at ``relative_path`` exists. False
otherwise.
"""
raise NotImplementedError()
def validate_checksum(self, relative_path, expected_md5_hex):
"""Validates that the MD5 checksum of the file in the backup at
``relative_path`` matches ``expected_md5_hex``.
"""
raise NotImplementedError()
def iter_dir(self, relative_path, include_files=True,
include_dirs=False, recursive=False):
raise NotImplementedError()
def remove_file(self, relative_path, dry_run=False):
"""Removes the file at the ``relative_path``.
If ``dry_run`` the file is not deleted.
Returns the fill path to the file in the backup."""
raise NotImplementedError()
def remove_file_with_meta(self, relative_path, dry_run):
"""Removes the file at the ``relative_path`` that is expected to
have meta data.
If ``dry_run`` the file is not deleted.
Returns the fill path to the file in the backup."""
raise NotImplementedError()
class TransferTiming(object):
def __init__(self, logger, path, size):
self.log = logger
self.path = path
self.start_ms = int(time.time() * 1000)
self.size = size # bytes
# number of boto callbacks we should ask for.
mb = 1024 **2
pattern = [
(1 * mb, 0), # 1MB, none
(10 * mb, 1), # 10MB, 1
(100 * mb, 2), # 100MB, 2
(1024 * mb, 5), # 1GB , 5
(10 * 1024 * mb, 10), # 10GB , 10
]
self.num_callbacks = 20
for i, j in pattern:
if self.size < i:
self.num_callbacks = j
break
def progress(self, progress, total):
"""Boto progress callback function.
Logs the progress.
"""
path = self.path
elapsed_ms = int(time.time() * 1000) - self.start_ms
throughput = ((progress * 1.0) / (1024**2)) / ((elapsed_ms / 1000) or 1)
if progress == total:
pattern = "Transfered file {path} in {elapsed_ms:d} ms size "\
"{total} at {throughput:f} MB/sec"
else:
pattern = "Progress transfering file {path} elapsed "\
"{elapsed_ms:d} ms, transferred "\
"{progress} bytes at {throughput:f} MB/sec {total} "\
"total"
self.log.info(pattern.format(**vars()))
return
def __enter__(self):
"""Entry function when used as a context."""
# Nothing to do.
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_value is not None:
# There was an error, let's just get out of here.
return False
# report 100% progress.
self.progress(self.size, self.size)
return False
|
|
#
# Copyright 2015 Paul Osborne <[email protected]>
# Copyright (c) 2021 Chris Reed
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
# Sentinel value for lookup where None might be a valid value
NOT_PRESENT = object()
TO_DICT_SKIP_KEYS = {"_register_arrays", "parent"}
REGISTER_PROPERTY_KEYS = {"size", "access", "protection", "reset_value", "reset_mask"}
LIST_TYPE_KEYS = {"register_arrays", "registers", "fields", "peripherals", "interrupts"}
def _check_type(value, expected_type):
"""@brief Perform type checking on the provided value
This is a helper that will raise ``TypeError`` if the provided value is
not an instance of the provided type. This method should be used sparingly
but can be good for preventing problems earlier when you want to restrict
duck typing to make the types of fields more obvious.
If the value passed the type check it will be returned from the call.
"""
if not isinstance(value, expected_type):
raise TypeError("Value {value!r} has unexpected type {actual_type!r}, expected {expected_type!r}".format(
value=value,
expected_type=expected_type,
actual_type=type(value),
))
return value
def _none_as_empty(v):
if v is not None:
for e in v:
yield e
class SVDJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, SVDElement):
eldict = {}
for k, v in obj.__dict__.items():
if k in TO_DICT_SKIP_KEYS:
continue
if k.startswith("_"):
pubkey = k[1:]
eldict[pubkey] = getattr(obj, pubkey)
else:
eldict[k] = v
return eldict
else:
return json.JSONEncoder.default(self, obj)
class SVDElement(object):
"""@brief Base class for all SVD Elements"""
def __init__(self):
self.parent = None
def _lookup_possibly_derived_attribute(self, attr):
derived_from = self.get_derived_from()
# see if there is an attribute with the same name and leading underscore
try:
value_self = object.__getattribute__(self, "_{}".format(attr))
except AttributeError:
value_self = NOT_PRESENT
# if not, then this is an attribute error
if value_self is NOT_PRESENT:
raise AttributeError("Requested missing key")
# if there is a non-None value, that is what we want to use
elif value_self is not None:
return value_self # if there is a non-None value, use it
# if there is a derivedFrom, check there first
elif derived_from is not None:
derived_value = getattr(derived_from, "_{}".format(attr), NOT_PRESENT)
if (derived_value is not NOT_PRESENT) and (derived_value is not None):
return derived_value
# for some attributes, try to grab from parent
if attr in REGISTER_PROPERTY_KEYS:
value = getattr(self.parent, attr, value_self)
else:
value = value_self
# if value is None and this is a list type, transform to empty list
if value is None and attr in LIST_TYPE_KEYS:
value = []
return value
def get_derived_from(self):
pass # override in children
def to_dict(self):
# This is a little convoluted but it works and ensures a
# json-compatible dictionary representation (at the cost of
# some computational overhead)
encoder = SVDJSONEncoder()
return json.loads(encoder.encode(self))
class SVDEnumeratedValue(SVDElement):
def __init__(self, name, description, value, is_default):
SVDElement.__init__(self)
self.name = name
self.description = description
self.value = value
self.is_default = is_default
class SVDField(SVDElement):
def __init__(self, name, derived_from, description, bit_offset, bit_width, access, enumerated_values, modified_write_values, read_action):
SVDElement.__init__(self)
self.name = name
self.derived_from = derived_from
self.description = description
self.bit_offset = bit_offset
self.bit_width = bit_width
self.access = access
self.enumerated_values = enumerated_values
self.modified_write_values = modified_write_values
self.read_action = read_action
def __getattr__(self, attr):
return self._lookup_possibly_derived_attribute(attr)
def get_derived_from(self):
# TODO: add support for dot notation derivedFrom
if self.derived_from is None:
return None
for field in self.parent.fields:
if field.name == self.derived_from:
return field
raise KeyError("Unable to find derived_from: %r" % self.derived_from)
@property
def is_enumerated_type(self):
"""@brief Return True if the field is an enumerated type"""
return self.enumerated_values is not None
@property
def is_reserved(self):
return self.name.lower() == "reserved"
class SVDRegisterArray(SVDElement):
"""@brief Represent a register array in the tree"""
def __init__(self, name, derived_from, description, address_offset, size,
access, protection, reset_value, reset_mask, fields,
display_name, alternate_group, modified_write_values,
read_action, dim, dim_indices, dim_increment):
SVDElement.__init__(self)
# When deriving a register, it is mandatory to specify at least the name, the description,
# and the addressOffset
self.derived_from = derived_from
self.name = name
self.description = description
self.address_offset = address_offset
self.dim = dim
self.dim_indices = dim_indices
self.dim_increment = dim_increment
self._read_action = read_action
self._modified_write_values = modified_write_values
self._display_name = display_name
self._alternate_group = alternate_group
self._size = size
self._access = access
self._protection = protection
self._reset_value = reset_value
self._reset_mask = reset_mask
self._fields = fields
# make parent association
for field in self._fields:
field.parent = self
def __getattr__(self, attr):
return self._lookup_possibly_derived_attribute(attr)
@property
def registers(self):
for i in range(self.dim):
reg = SVDRegister(
name=self.name % self.dim_indices[i],
fields=self._fields,
derived_from=self.derived_from,
description=self.description,
address_offset=self.address_offset + self.dim_increment * i,
size=self._size,
access=self._access,
protection=self._protection,
reset_value=self._reset_value,
reset_mask=self._reset_mask,
display_name=self._display_name,
alternate_group=self._alternate_group,
modified_write_values=self._modified_write_values,
read_action=self._read_action,
)
reg.parent = self.parent
yield reg
def get_derived_from(self):
# TODO: add support for dot notation derivedFrom
if self.derived_from is None:
return None
for register in self.parent.registers:
if register.name == self.derived_from:
return register
raise KeyError("Unable to find derived_from: %r" % self.derived_from)
def is_reserved(self):
return 'reserved' in self.name.lower()
class SVDRegister(SVDElement):
def __init__(self, name, derived_from, description, address_offset, size, access, protection, reset_value, reset_mask,
fields, display_name, alternate_group, modified_write_values, read_action):
SVDElement.__init__(self)
# When deriving a register, it is mandatory to specify at least the name, the description,
# and the addressOffset
self.derived_from = derived_from
self.name = name
self.description = description
self.address_offset = address_offset
self._read_action = read_action
self._modified_write_values = modified_write_values
self._display_name = display_name
self._alternate_group = alternate_group
self._size = size
self._access = access
self._protection = protection
self._reset_value = reset_value
self._reset_mask = reset_mask
self._fields = fields
# make parent association
for field in self._fields:
field.parent = self
def __getattr__(self, attr):
return self._lookup_possibly_derived_attribute(attr)
def get_derived_from(self):
# TODO: add support for dot notation derivedFrom
if self.derived_from is None:
return None
for register in self.parent.registers:
if register.name == self.derived_from:
return register
raise KeyError("Unable to find derived_from: %r" % self.derived_from)
def is_reserved(self):
return 'reserved' in self.name.lower()
class SVDRegisterCluster(SVDElement):
"""@brief Represent a register cluster in the tree"""
def __init__(self, name, derived_from, description, address_offset, size,
alternate_cluster, header_struct_name,
access, protection, reset_value, reset_mask, register,
cluster):
SVDElement.__init__(self)
# When deriving a register, it is mandatory to specify at least the name, the description,
# and the addressOffset
self.derived_from = derived_from
self.name = name
self.description = description
self.address_offset = address_offset
self._alternate_cluster = alternate_cluster
self._header_struct_name = header_struct_name
self._size = size
self._access = access
self._protection = protection
self._reset_value = reset_value
self._reset_mask = reset_mask
self._register = register
self._cluster = cluster
# make parent association
for cluster in self._cluster:
cluster.parent = self
def __getattr__(self, attr):
return self._lookup_possibly_derived_attribute(attr)
def updated_register(self, reg, clu):
new_reg = SVDRegister(
name="{}_{}".format(clu.name, reg.name),
fields=reg.fields,
derived_from=reg.derived_from,
description=reg.description,
address_offset=clu.address_offset + reg.address_offset,
size=reg.size,
access=reg.access,
protection=reg.protection,
reset_value=reg.reset_value,
reset_mask=reg.reset_mask,
display_name=reg.display_name,
alternate_group=reg.alternate_group,
modified_write_values=reg.modified_write_values,
read_action=reg.read_action,
)
new_reg.parent = self
return new_reg
@property
def registers(self):
for reg in self._register:
yield self.updated_register(reg, self)
for cluster in self._cluster:
for reg in cluster.registers:
yield self.updated_register(reg, self)
def get_derived_from(self):
# TODO: add support for dot notation derivedFrom
if self.derived_from is None:
return None
for register in self.parent.registers:
if register.name == self.derived_from:
return register
raise KeyError("Unable to find derived_from: %r" % self.derived_from)
def is_reserved(self):
return 'reserved' in self.name.lower()
class SVDRegisterClusterArray(SVDElement):
"""@brief Represent a register cluster in the tree"""
def __init__(self, name, derived_from, description, address_offset, size,
alternate_cluster, header_struct_name,
dim, dim_indices, dim_increment,
access, protection, reset_value, reset_mask, register,
cluster):
SVDElement.__init__(self)
# When deriving a register, it is mandatory to specify at least the name, the description,
# and the addressOffset
self.derived_from = derived_from
self.name = name
self.description = description
self.address_offset = address_offset
self.dim = dim
self.dim_indices = dim_indices
self.dim_increment = dim_increment
self._alternate_cluster = alternate_cluster
self._header_struct_name = header_struct_name
self._size = size
self._access = access
self._protection = protection
self._reset_value = reset_value
self._reset_mask = reset_mask
self._register = register
self._cluster = cluster
# make parent association
for register in self._register:
register.parent = self
for cluster in self._cluster:
cluster.parent = self
def __getattr__(self, attr):
return self._lookup_possibly_derived_attribute(attr)
def updated_register(self, reg, clu, i):
new_reg = SVDRegister(
name="{}_{}".format(clu.name % i, reg.name),
fields=reg.fields,
derived_from=reg.derived_from,
description=reg.description,
address_offset=clu.address_offset + reg.address_offset + i*clu.dim_increment,
size=reg.size,
access=reg.access,
protection=reg.protection,
reset_value=reg.reset_value,
reset_mask=reg.reset_mask,
display_name=reg.display_name,
alternate_group=reg.alternate_group,
modified_write_values=reg.modified_write_values,
read_action=reg.read_action,
)
new_reg.parent = self
return new_reg
@property
def registers(self):
for i in range(self.dim):
for reg in self._register:
yield self.updated_register(reg, self, i)
for cluster in self._cluster:
for reg in cluster.registers:
yield self.updated_register(reg, cluster, i)
def get_derived_from(self):
# TODO: add support for dot notation derivedFrom
if self.derived_from is None:
return None
for register in self.parent.registers:
if register.name == self.derived_from:
return register
raise KeyError("Unable to find derived_from: %r" % self.derived_from)
def is_reserved(self):
return 'reserved' in self.name.lower()
class SVDAddressBlock(SVDElement):
def __init__(self, offset, size, usage):
SVDElement.__init__(self)
self.offset = offset
self.size = size
self.usage = usage
class SVDInterrupt(SVDElement):
def __init__(self, name, value, description):
SVDElement.__init__(self)
self.name = name
self.value = _check_type(value, int)
self.description = description
class SVDPeripheral(SVDElement):
def __init__(self, name, version, derived_from, description,
prepend_to_name, base_address, address_block,
interrupts, registers, register_arrays, size, access,
protection, reset_value, reset_mask,
group_name, append_to_name, disable_condition,
clusters):
SVDElement.__init__(self)
# items with underscore are potentially derived
self.name = name
self._version = version
self._derived_from = derived_from
self._description = description
self._prepend_to_name = prepend_to_name
self._base_address = base_address
self._address_block = address_block
self._interrupts = interrupts
self._registers = registers
self._register_arrays = register_arrays
self._size = size # Defines the default bit-width of any register contained in the device (implicit inheritance).
self._access = access # Defines the default access rights for all registers.
self._protection = protection # Defines extended access protection for all registers.
self._reset_value = reset_value # Defines the default value for all registers at RESET.
self._reset_mask = reset_mask # Identifies which register bits have a defined reset value.
self._group_name = group_name
self._append_to_name = append_to_name
self._disable_condition = disable_condition
self._clusters = clusters
# make parent association for complex node types
for i in _none_as_empty(self._interrupts):
i.parent = self
for r in _none_as_empty(self._registers):
r.parent = self
def __getattr__(self, attr):
return self._lookup_possibly_derived_attribute(attr)
@property
def registers(self):
regs = []
for reg in self._lookup_possibly_derived_attribute('registers'):
regs.append(reg)
for arr in self._lookup_possibly_derived_attribute('register_arrays'):
regs.extend(arr.registers)
for cluster in self._lookup_possibly_derived_attribute('clusters'):
regs.extend(cluster.registers)
return regs
def get_derived_from(self):
if self._derived_from is None:
return None
# find the peripheral with this name in the tree
try:
return [p for p in self.parent.peripherals if p.name == self._derived_from][0]
except IndexError:
return None
class SVDCpu(SVDElement):
def __init__(self, name, revision, endian, mpu_present, fpu_present, fpu_dp, icache_present,
dcache_present, itcm_present, dtcm_present, vtor_present, nvic_prio_bits,
vendor_systick_config, device_num_interrupts, sau_num_regions, sau_regions_config):
SVDElement.__init__(self)
self.name = name
self.revision = revision
self.endian = endian
self.mpu_present = mpu_present
self.fpu_present = fpu_present
self.fpu_dp = fpu_dp
self.icache_present = icache_present,
self.dcache_present = dcache_present,
self.itcm_present = itcm_present,
self.dtcm_present = dtcm_present,
self.vtor_present = vtor_present
self.nvic_prio_bits = nvic_prio_bits
self.vendor_systick_config = vendor_systick_config
self.device_num_interrupts = device_num_interrupts
self.sau_num_regions = sau_num_regions
self.sau_regions_config = sau_regions_config
class SVDDevice(SVDElement):
def __init__(self, vendor, vendor_id, name, version, description, cpu, address_unit_bits, width,
peripherals, size, access, protection, reset_value, reset_mask):
SVDElement.__init__(self)
self.vendor = vendor
self.vendor_id = vendor_id
self.name = name
self.version = version
self.description = description
self.cpu = cpu
self.address_unit_bits = _check_type(address_unit_bits, int)
self.width = _check_type(width, int)
self.peripherals = peripherals
self.size = size # Defines the default bit-width of any register contained in the device (implicit inheritance).
self.access = access # Defines the default access rights for all registers.
self.protection = protection # Defines extended access protection for all registers.
self.reset_value = reset_value # Defines the default value for all registers at RESET.
self.reset_mask = reset_mask # Identifies which register bits have a defined reset value.
# set up parent relationship
if self.cpu:
self.cpu.parent = self
for p in _none_as_empty(self.peripherals):
p.parent = self
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Deploy Pretrained Vision Model from MxNet on VTA
================================================
**Author**: `Thierry Moreau <https://homes.cs.washington.edu/~moreau/>`_
This tutorial provides an end-to-end demo, on how to run ImageNet classification
inference onto the VTA accelerator design to perform ImageNet classification tasks.
It showcases Relay as a front end compiler that can perform quantization (VTA
only supports int8/32 inference) as well as graph packing (in order to enable
tensorization in the core) to massage the compute graph for the hardware target.
"""
######################################################################
# Install dependencies
# --------------------
# To use the autotvm package in tvm, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user mxnet requests pillow
#
# Now return to the python code. Import packages.
from __future__ import absolute_import, print_function
import argparse, json, os, requests, sys, time
from io import BytesIO
from os.path import join, isfile
from PIL import Image
from mxnet.gluon.model_zoo import vision
import numpy as np
from matplotlib import pyplot as plt
import tvm
from tvm import rpc, autotvm, relay
from tvm.contrib import graph_runtime, util, download
from tvm.contrib.debugger import debug_runtime
from tvm.relay import transform
import vta
from vta.testing import simulator
from vta.top import graph_pack
# Make sure that TVM was compiled with RPC=1
assert tvm.module.enabled("rpc")
######################################################################
# Define the platform and model targets
# -------------------------------------
# Execute on CPU vs. VTA, and define the model.
# Load VTA parameters from the vta/config/vta_config.json file
env = vta.get_env()
# Set ``device=arm_cpu`` to run inference on the CPU
# or ``device=vta`` to run inference on the FPGA.
device = "vta"
target = env.target if device == "vta" else env.target_vta_cpu
# Dictionary lookup for when to start/end bit packing
pack_dict = {
"resnet18_v1": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet34_v1": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet18_v2": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet34_v2": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet50_v2": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet101_v2": ["nn.max_pool2d", "nn.global_avg_pool2d"],
}
# Name of Gluon model to compile
# The ``start_pack`` and ``stop_pack`` labels indicate where
# to start and end the graph packing relay pass: in other words
# where to start and finish offloading to VTA.
model = "resnet18_v1"
assert model in pack_dict
######################################################################
# Obtain an execution remote
# ---------------------------------
# When target is 'pynq', reconfigure FPGA and runtime.
# Otherwise, if target is 'sim', execute locally.
if env.TARGET not in ["sim", "tsim"]:
# Get remote from tracker node if environment variable is set.
# To set up the tracker, you'll need to follow the "Auto-tuning
# a convolutional network for VTA" tutorial.
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
# Otherwise if you have a device you want to program directly from
# the host, make sure you've set the variables below to the IP of
# your board.
device_host = os.environ.get("VTA_PYNQ_RPC_HOST", "192.168.2.99")
device_port = os.environ.get("VTA_PYNQ_RPC_PORT", "9091")
if not tracker_host or not tracker_port:
remote = rpc.connect(device_host, int(device_port))
else:
remote = autotvm.measure.request_remote(env.TARGET, tracker_host, int(tracker_port), timeout=10000)
# Reconfigure the JIT runtime and FPGA.
# You can program the FPGA with your own custom bitstream
# by passing the path to the bitstream file instead of None.
reconfig_start = time.time()
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
reconfig_time = time.time() - reconfig_start
print("Reconfigured FPGA and RPC runtime in {0:.2f}s!".format(reconfig_time))
# In simulation mode, host the RPC server locally.
else:
remote = rpc.LocalSession()
# Get execution context from remote
ctx = remote.ext_dev(0) if device == "vta" else remote.cpu(0)
######################################################################
# Build the inference graph runtime
# ---------------------------------
# Grab vision model from Gluon model zoo and compile with Relay.
# The compilation steps are:
# 1) Front end translation from MxNet into Relay module.
# 2) Apply 8-bit quantization: here we skip the first conv layer,
# and dense layer which will both be executed in fp32 on the CPU.
# 3) Perform graph packing to alter the data layout for tensorization.
# 4) Perform constant folding to reduce number of operators (e.g. eliminate
# batch norm multiply).
# 5) Perform relay build to object file.
# 6) Load the object file onto remote (FPGA device).
# 7) Generate graph runtime, `m`.
# Load pre-configured AutoTVM schedules
with autotvm.tophub.context(target):
# Populate the shape and data type dictionary for ImageNet classifier input
dtype_dict = {"data": 'float32'}
shape_dict = {"data": (env.BATCH, 3, 224, 224)}
# Get off the shelf gluon model, and convert to relay
gluon_model = vision.get_model(model, pretrained=True)
# Measure build start time
build_start = time.time()
# Start front end compilation
mod, params = relay.frontend.from_mxnet(gluon_model, shape_dict)
# Update shape and type dictionary
shape_dict.update({k: v.shape for k, v in params.items()})
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})
if target.device_name == "vta":
# Perform quantization in Relay
with relay.quantize.qconfig(global_scale=8.0,
skip_conv_layers=[0]):
relay_prog = relay.quantize.quantize(mod["main"], params=params)
# Perform graph packing and constant folding for VTA target
assert env.BLOCK_IN == env.BLOCK_OUT
relay_prog = graph_pack(
relay_prog,
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=pack_dict[model][0],
stop_name=pack_dict[model][1])
else:
relay_prog = mod["main"]
# Compile Relay program with AlterOpLayout disabled
with relay.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}):
if target.device_name != "vta":
graph, lib, params = relay.build(
relay_prog, target=target,
params=params, target_host=env.target_host)
else:
with vta.build_config():
graph, lib, params = relay.build(
relay_prog, target=target,
params=params, target_host=env.target_host)
# Measure Relay build time
build_time = time.time() - build_start
print(model + " inference graph built in {0:.2f}s!".format(build_time))
# Send the inference library over to the remote RPC server
temp = util.tempdir()
lib.save(temp.relpath("graphlib.o"))
remote.upload(temp.relpath("graphlib.o"))
lib = remote.load_module("graphlib.o")
# Graph runtime
m = graph_runtime.create(graph, lib, ctx)
######################################################################
# Perform image classification inference
# --------------------------------------
# We run classification on an image sample from ImageNet
# We just need to download the categories files, `synset.txt`
# and an input test image.
# Download ImageNet categories
categ_url = "https://github.com/uwsaml/web-data/raw/master/vta/models/"
categ_fn = "synset.txt"
download.download(join(categ_url, categ_fn), categ_fn)
synset = eval(open(categ_fn).read())
# Download test image
image_url = 'https://homes.cs.washington.edu/~moreau/media/vta/cat.jpg'
response = requests.get(image_url)
# Prepare test image for inference
image = Image.open(BytesIO(response.content)).resize((224, 224))
plt.imshow(image)
plt.show()
image = np.array(image) - np.array([123., 117., 104.])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
image = np.repeat(image, env.BATCH, axis=0)
# Set the network parameters and inputs
m.set_input(**params)
m.set_input('data', image)
# Perform inference and gather execution statistics
# More on: https://docs.tvm.ai/api/python/module.html#tvm.module.Module.time_evaluator
num = 4 # number of times we run module for a single measurement
rep = 3 # number of measurements (we derive std dev from this)
timer = m.module.time_evaluator("run", ctx, number=num, repeat=rep)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
timer()
sim_stats = simulator.stats()
print("\nExecution statistics:")
for k, v in sim_stats.items():
# Since we execute the workload many times, we need to normalize stats
# Note that there is always one warm up run
# Therefore we divide the overall stats by (num * rep + 1)
print("\t{:<16}: {:>16}".format(k, v // (num * rep + 1)))
else:
tcost = timer()
std = np.std(tcost.results) * 1000
mean = tcost.mean * 1000
print("\nPerformed inference in %.2fms (std = %.2f) for %d samples" % (mean, std, env.BATCH))
print("Average per sample inference time: %.2fms" % (mean/env.BATCH))
# Get classification results
tvm_output = m.get_output(0, tvm.nd.empty((env.BATCH, 1000), "float32", remote.cpu(0)))
for b in range(env.BATCH):
top_categories = np.argsort(tvm_output.asnumpy()[b])
# Report top-5 classification results
print("\n{} prediction for sample {}".format(model, b))
print("\t#1:", synset[top_categories[-1]])
print("\t#2:", synset[top_categories[-2]])
print("\t#3:", synset[top_categories[-3]])
print("\t#4:", synset[top_categories[-4]])
print("\t#5:", synset[top_categories[-5]])
# This just checks that one of the 5 top categories
# is one variety of cat; this is by no means an accurate
# assessment of how quantization affects classification
# accuracy but is meant to catch changes to the
# quantization pass that would accuracy in the CI.
cat_detected = False
for k in top_categories[-5:]:
if "cat" in synset[k]:
cat_detected = True
assert(cat_detected)
|
|
import functools
import warnings
from django.conf import settings
# Avoid shadowing the login() and logout() views below.
from django.contrib.auth import (
REDIRECT_FIELD_NAME, get_user_model, login as auth_login,
logout as auth_logout, update_session_auth_hash,
)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm,
)
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponseRedirect, QueryDict
from django.shortcuts import resolve_url
from django.template.response import TemplateResponse
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango21Warning,
)
from django.utils.encoding import force_text
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.utils.six.moves.urllib.parse import urlparse, urlunparse
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
def deprecate_current_app(func):
"""
Handle deprecation of the current_app parameter of the views.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
if 'current_app' in kwargs:
warnings.warn(
"Passing `current_app` as a keyword argument is deprecated. "
"Instead the caller of `{0}` should set "
"`request.current_app`.".format(func.__name__),
RemovedInDjango20Warning
)
current_app = kwargs.pop('current_app')
request = kwargs.get('request', None)
if request and current_app is not None:
request.current_app = current_app
return func(*args, **kwargs)
return inner
class LoginView(FormView):
"""
Displays the login form and handles the login action.
"""
form_class = AuthenticationForm
authentication_form = None
redirect_field_name = REDIRECT_FIELD_NAME
template_name = 'registration/login.html'
redirect_authenticated_user = False
extra_context = None
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
if self.redirect_authenticated_user and self.request.user.is_authenticated:
redirect_to = self.get_success_url()
if redirect_to == self.request.path:
raise ValueError(
"Redirection loop for authenticated user detected. Check that "
"your LOGIN_REDIRECT_URL doesn't point to a login page."
)
return HttpResponseRedirect(redirect_to)
return super(LoginView, self).dispatch(request, *args, **kwargs)
def get_success_url(self):
"""Ensure the user-originating redirection URL is safe."""
redirect_to = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name, '')
)
if not is_safe_url(url=redirect_to, host=self.request.get_host()):
return resolve_url(settings.LOGIN_REDIRECT_URL)
return redirect_to
def get_form_class(self):
return self.authentication_form or self.form_class
def form_valid(self, form):
"""Security check complete. Log the user in."""
auth_login(self.request, form.get_user())
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super(LoginView, self).get_context_data(**kwargs)
current_site = get_current_site(self.request)
context.update({
self.redirect_field_name: self.get_success_url(),
'site': current_site,
'site_name': current_site.name,
})
if self.extra_context is not None:
context.update(self.extra_context)
return context
@deprecate_current_app
def login(request, *args, **kwargs):
warnings.warn(
'The login() view is superseded by the class-based LoginView().',
RemovedInDjango21Warning, stacklevel=2
)
return LoginView.as_view(**kwargs)(request, *args, **kwargs)
class LogoutView(TemplateView):
"""
Logs out the user and displays 'You are logged out' message.
"""
next_page = None
redirect_field_name = REDIRECT_FIELD_NAME
template_name = 'registration/logged_out.html'
extra_context = None
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
auth_logout(request)
next_page = self.get_next_page()
if next_page:
# Redirect to this page until the session has been cleared.
return HttpResponseRedirect(next_page)
return super(LogoutView, self).dispatch(request, *args, **kwargs)
def get_next_page(self):
if self.next_page is not None:
next_page = resolve_url(self.next_page)
elif settings.LOGOUT_REDIRECT_URL:
next_page = resolve_url(settings.LOGOUT_REDIRECT_URL)
else:
next_page = self.next_page
if (self.redirect_field_name in self.request.POST or
self.redirect_field_name in self.request.GET):
next_page = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name)
)
# Security check -- don't allow redirection to a different host.
if not is_safe_url(url=next_page, host=self.request.get_host()):
next_page = self.request.path
return next_page
def get_context_data(self, **kwargs):
context = super(LogoutView, self).get_context_data(**kwargs)
current_site = get_current_site(self.request)
context.update({
'site': current_site,
'site_name': current_site.name,
'title': _('Logged out'),
})
if self.extra_context is not None:
context.update(self.extra_context)
return context
@deprecate_current_app
def logout(request, *args, **kwargs):
warnings.warn(
'The logout() view is superseded by the class-based LogoutView().',
RemovedInDjango21Warning, stacklevel=2
)
return LogoutView.as_view(**kwargs)(request, *args, **kwargs)
@deprecate_current_app
def logout_then_login(request, login_url=None, extra_context=None):
"""
Logs out the user if they are logged in. Then redirects to the log-in page.
"""
if not login_url:
login_url = settings.LOGIN_URL
login_url = resolve_url(login_url)
return logout(request, login_url, extra_context=extra_context)
def redirect_to_login(next, login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Redirects the user to the login page, passing the given 'next' page
"""
resolved_url = resolve_url(login_url or settings.LOGIN_URL)
login_url_parts = list(urlparse(resolved_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlunparse(login_url_parts))
# 4 views for password reset:
# - password_reset sends the mail
# - password_reset_done shows a success message for the above
# - password_reset_confirm checks the link the user clicked and
# prompts for a new password
# - password_reset_complete shows a success message for the above
@deprecate_current_app
@csrf_protect
def password_reset(request,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
subject_template_name='registration/password_reset_subject.txt',
password_reset_form=PasswordResetForm,
token_generator=default_token_generator,
post_reset_redirect=None,
from_email=None,
extra_context=None,
html_email_template_name=None,
extra_email_context=None):
warnings.warn("The password_reset() view is superseded by the "
"class-based PasswordResetView().",
RemovedInDjango21Warning, stacklevel=2)
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_done')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': token_generator,
'from_email': from_email,
'email_template_name': email_template_name,
'subject_template_name': subject_template_name,
'request': request,
'html_email_template_name': html_email_template_name,
'extra_email_context': extra_email_context,
}
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
context = {
'form': form,
'title': _('Password reset'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def password_reset_done(request,
template_name='registration/password_reset_done.html',
extra_context=None):
warnings.warn("The password_reset_done() view is superseded by the "
"class-based PasswordResetDoneView().",
RemovedInDjango21Warning, stacklevel=2)
context = {
'title': _('Password reset sent'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
# Doesn't need csrf_protect since no-one can guess the URL
@sensitive_post_parameters()
@never_cache
@deprecate_current_app
def password_reset_confirm(request, uidb64=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
extra_context=None):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
warnings.warn("The password_reset_confirm() view is superseded by the "
"class-based PasswordResetConfirmView().",
RemovedInDjango21Warning, stacklevel=2)
UserModel = get_user_model()
assert uidb64 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_complete')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
try:
# urlsafe_base64_decode() decodes to bytestring on Python 3
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
title = _('Enter new password')
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_reset_redirect)
else:
form = set_password_form(user)
else:
validlink = False
form = None
title = _('Password reset unsuccessful')
context = {
'form': form,
'title': title,
'validlink': validlink,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def password_reset_complete(request,
template_name='registration/password_reset_complete.html',
extra_context=None):
warnings.warn("The password_reset_complete() view is superseded by the "
"class-based PasswordResetCompleteView().",
RemovedInDjango21Warning, stacklevel=2)
context = {
'login_url': resolve_url(settings.LOGIN_URL),
'title': _('Password reset complete'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
# Class-based password reset views
# - PasswordResetView sends the mail
# - PasswordResetDoneView shows a success message for the above
# - PasswordResetConfirmView checks the link the user clicked and
# prompts for a new password
# - PasswordResetCompleteView shows a success message for the above
class PasswordContextMixin(object):
extra_context = None
def get_context_data(self, **kwargs):
context = super(PasswordContextMixin, self).get_context_data(**kwargs)
context['title'] = self.title
if self.extra_context is not None:
context.update(self.extra_context)
return context
class PasswordResetView(PasswordContextMixin, FormView):
email_template_name = 'registration/password_reset_email.html'
extra_email_context = None
form_class = PasswordResetForm
from_email = None
html_email_template_name = None
subject_template_name = 'registration/password_reset_subject.txt'
success_url = reverse_lazy('password_reset_done')
template_name = 'registration/password_reset_form.html'
title = _('Password reset')
token_generator = default_token_generator
@method_decorator(csrf_protect)
def dispatch(self, *args, **kwargs):
return super(PasswordResetView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
opts = {
'use_https': self.request.is_secure(),
'token_generator': self.token_generator,
'from_email': self.from_email,
'email_template_name': self.email_template_name,
'subject_template_name': self.subject_template_name,
'request': self.request,
'html_email_template_name': self.html_email_template_name,
'extra_email_context': self.extra_email_context,
}
form.save(**opts)
return super(PasswordResetView, self).form_valid(form)
class PasswordResetDoneView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_reset_done.html'
title = _('Password reset sent')
class PasswordResetConfirmView(PasswordContextMixin, FormView):
form_class = SetPasswordForm
success_url = reverse_lazy('password_reset_complete')
template_name = 'registration/password_reset_confirm.html'
title = _('Enter new password')
token_generator = default_token_generator
@method_decorator(sensitive_post_parameters())
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
assert 'uidb64' in kwargs and 'token' in kwargs
return super(PasswordResetConfirmView, self).dispatch(*args, **kwargs)
def get_user(self, uidb64):
UserModel = get_user_model()
try:
# urlsafe_base64_decode() decodes to bytestring on Python 3
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
return user
def get_form_kwargs(self):
kwargs = super(PasswordResetConfirmView, self).get_form_kwargs()
kwargs['user'] = self.get_user(self.kwargs['uidb64'])
return kwargs
def form_valid(self, form):
form.save()
return super(PasswordResetConfirmView, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(PasswordResetConfirmView, self).get_context_data(**kwargs)
user = context['form'].user
if user is not None and self.token_generator.check_token(user, self.kwargs['token']):
context['validlink'] = True
else:
context.update({
'form': None,
'title': _('Password reset unsuccessful'),
'validlink': False,
})
return context
class PasswordResetCompleteView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_reset_complete.html'
title = _('Password reset complete')
def get_context_data(self, **kwargs):
context = super(PasswordResetCompleteView, self).get_context_data(**kwargs)
context['login_url'] = resolve_url(settings.LOGIN_URL)
return context
@sensitive_post_parameters()
@csrf_protect
@login_required
@deprecate_current_app
def password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=PasswordChangeForm,
extra_context=None):
warnings.warn("The password_change() view is superseded by the "
"class-based PasswordChangeView().",
RemovedInDjango21Warning, stacklevel=2)
if post_change_redirect is None:
post_change_redirect = reverse('password_change_done')
else:
post_change_redirect = resolve_url(post_change_redirect)
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
# Updating the password logs out all other sessions for the user
# except the current one.
update_session_auth_hash(request, form.user)
return HttpResponseRedirect(post_change_redirect)
else:
form = password_change_form(user=request.user)
context = {
'form': form,
'title': _('Password change'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@login_required
@deprecate_current_app
def password_change_done(request,
template_name='registration/password_change_done.html',
extra_context=None):
warnings.warn("The password_change_done() view is superseded by the "
"class-based PasswordChangeDoneView().",
RemovedInDjango21Warning, stacklevel=2)
context = {
'title': _('Password change successful'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
class PasswordChangeView(PasswordContextMixin, FormView):
form_class = PasswordChangeForm
success_url = reverse_lazy('password_change_done')
template_name = 'registration/password_change_form.html'
title = _('Password change')
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(PasswordChangeView, self).dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super(PasswordChangeView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
form.save()
# Updating the password logs out all other sessions for the user
# except the current one.
update_session_auth_hash(self.request, form.user)
return super(PasswordChangeView, self).form_valid(form)
class PasswordChangeDoneView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_change_done.html'
title = _('Password change successful')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(PasswordChangeDoneView, self).dispatch(*args, **kwargs)
|
|
"""
Contingency table functions (:mod:`scipy.stats.contingency`)
============================================================
Functions for creating and analyzing contingency tables.
.. currentmodule:: scipy.stats.contingency
.. autosummary::
:toctree: generated/
chi2_contingency
relative_risk
crosstab
association
expected_freq
margins
"""
from functools import reduce
import math
import numpy as np
from ._stats_py import power_divergence
from ._relative_risk import relative_risk
from ._crosstab import crosstab
__all__ = ['margins', 'expected_freq', 'chi2_contingency', 'crosstab',
'association', 'relative_risk']
def margins(a):
"""Return a list of the marginal sums of the array `a`.
Parameters
----------
a : ndarray
The array for which to compute the marginal sums.
Returns
-------
margsums : list of ndarrays
A list of length `a.ndim`. `margsums[k]` is the result
of summing `a` over all axes except `k`; it has the same
number of dimensions as `a`, but the length of each axis
except axis `k` will be 1.
Examples
--------
>>> a = np.arange(12).reshape(2, 6)
>>> a
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11]])
>>> from scipy.stats.contingency import margins
>>> m0, m1 = margins(a)
>>> m0
array([[15],
[51]])
>>> m1
array([[ 6, 8, 10, 12, 14, 16]])
>>> b = np.arange(24).reshape(2,3,4)
>>> m0, m1, m2 = margins(b)
>>> m0
array([[[ 66]],
[[210]]])
>>> m1
array([[[ 60],
[ 92],
[124]]])
>>> m2
array([[[60, 66, 72, 78]]])
"""
margsums = []
ranged = list(range(a.ndim))
for k in ranged:
marg = np.apply_over_axes(np.sum, a, [j for j in ranged if j != k])
margsums.append(marg)
return margsums
def expected_freq(observed):
"""
Compute the expected frequencies from a contingency table.
Given an n-dimensional contingency table of observed frequencies,
compute the expected frequencies for the table based on the marginal
sums under the assumption that the groups associated with each
dimension are independent.
Parameters
----------
observed : array_like
The table of observed frequencies. (While this function can handle
a 1-D array, that case is trivial. Generally `observed` is at
least 2-D.)
Returns
-------
expected : ndarray of float64
The expected frequencies, based on the marginal sums of the table.
Same shape as `observed`.
Examples
--------
>>> from scipy.stats.contingency import expected_freq
>>> observed = np.array([[10, 10, 20],[20, 20, 20]])
>>> expected_freq(observed)
array([[ 12., 12., 16.],
[ 18., 18., 24.]])
"""
# Typically `observed` is an integer array. If `observed` has a large
# number of dimensions or holds large values, some of the following
# computations may overflow, so we first switch to floating point.
observed = np.asarray(observed, dtype=np.float64)
# Create a list of the marginal sums.
margsums = margins(observed)
# Create the array of expected frequencies. The shapes of the
# marginal sums returned by apply_over_axes() are just what we
# need for broadcasting in the following product.
d = observed.ndim
expected = reduce(np.multiply, margsums) / observed.sum() ** (d - 1)
return expected
def chi2_contingency(observed, correction=True, lambda_=None):
"""Chi-square test of independence of variables in a contingency table.
This function computes the chi-square statistic and p-value for the
hypothesis test of independence of the observed frequencies in the
contingency table [1]_ `observed`. The expected frequencies are computed
based on the marginal sums under the assumption of independence; see
`scipy.stats.contingency.expected_freq`. The number of degrees of
freedom is (expressed using numpy functions and attributes)::
dof = observed.size - sum(observed.shape) + observed.ndim - 1
Parameters
----------
observed : array_like
The contingency table. The table contains the observed frequencies
(i.e. number of occurrences) in each category. In the two-dimensional
case, the table is often described as an "R x C table".
correction : bool, optional
If True, *and* the degrees of freedom is 1, apply Yates' correction
for continuity. The effect of the correction is to adjust each
observed value by 0.5 towards the corresponding expected value.
lambda_ : float or str, optional
By default, the statistic computed in this test is Pearson's
chi-squared statistic [2]_. `lambda_` allows a statistic from the
Cressie-Read power divergence family [3]_ to be used instead. See
`scipy.stats.power_divergence` for details.
Returns
-------
chi2 : float
The test statistic.
p : float
The p-value of the test
dof : int
Degrees of freedom
expected : ndarray, same shape as `observed`
The expected frequencies, based on the marginal sums of the table.
See Also
--------
scipy.stats.contingency.expected_freq
scipy.stats.fisher_exact
scipy.stats.chisquare
scipy.stats.power_divergence
scipy.stats.barnard_exact
scipy.stats.boschloo_exact
Notes
-----
An often quoted guideline for the validity of this calculation is that
the test should be used only if the observed and expected frequencies
in each cell are at least 5.
This is a test for the independence of different categories of a
population. The test is only meaningful when the dimension of
`observed` is two or more. Applying the test to a one-dimensional
table will always result in `expected` equal to `observed` and a
chi-square statistic equal to 0.
This function does not handle masked arrays, because the calculation
does not make sense with missing values.
Like stats.chisquare, this function computes a chi-square statistic;
the convenience this function provides is to figure out the expected
frequencies and degrees of freedom from the given contingency table.
If these were already known, and if the Yates' correction was not
required, one could use stats.chisquare. That is, if one calls::
chi2, p, dof, ex = chi2_contingency(obs, correction=False)
then the following is true::
(chi2, p) == stats.chisquare(obs.ravel(), f_exp=ex.ravel(),
ddof=obs.size - 1 - dof)
The `lambda_` argument was added in version 0.13.0 of scipy.
References
----------
.. [1] "Contingency table",
https://en.wikipedia.org/wiki/Contingency_table
.. [2] "Pearson's chi-squared test",
https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test
.. [3] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
A two-way example (2 x 3):
>>> from scipy.stats import chi2_contingency
>>> obs = np.array([[10, 10, 20], [20, 20, 20]])
>>> chi2_contingency(obs)
(2.7777777777777777,
0.24935220877729619,
2,
array([[ 12., 12., 16.],
[ 18., 18., 24.]]))
Perform the test using the log-likelihood ratio (i.e. the "G-test")
instead of Pearson's chi-squared statistic.
>>> g, p, dof, expctd = chi2_contingency(obs, lambda_="log-likelihood")
>>> g, p
(2.7688587616781319, 0.25046668010954165)
A four-way example (2 x 2 x 2 x 2):
>>> obs = np.array(
... [[[[12, 17],
... [11, 16]],
... [[11, 12],
... [15, 16]]],
... [[[23, 15],
... [30, 22]],
... [[14, 17],
... [15, 16]]]])
>>> chi2_contingency(obs)
(8.7584514426741897,
0.64417725029295503,
11,
array([[[[ 14.15462386, 14.15462386],
[ 16.49423111, 16.49423111]],
[[ 11.2461395 , 11.2461395 ],
[ 13.10500554, 13.10500554]]],
[[[ 19.5591166 , 19.5591166 ],
[ 22.79202844, 22.79202844]],
[[ 15.54012004, 15.54012004],
[ 18.10873492, 18.10873492]]]]))
"""
observed = np.asarray(observed)
if np.any(observed < 0):
raise ValueError("All values in `observed` must be nonnegative.")
if observed.size == 0:
raise ValueError("No data; `observed` has size 0.")
expected = expected_freq(observed)
if np.any(expected == 0):
# Include one of the positions where expected is zero in
# the exception message.
zeropos = list(zip(*np.nonzero(expected == 0)))[0]
raise ValueError("The internally computed table of expected "
"frequencies has a zero element at %s." % (zeropos,))
# The degrees of freedom
dof = expected.size - sum(expected.shape) + expected.ndim - 1
if dof == 0:
# Degenerate case; this occurs when `observed` is 1D (or, more
# generally, when it has only one nontrivial dimension). In this
# case, we also have observed == expected, so chi2 is 0.
chi2 = 0.0
p = 1.0
else:
if dof == 1 and correction:
# Adjust `observed` according to Yates' correction for continuity.
# Magnitude of correction no bigger than difference; see gh-13875
diff = expected - observed
direction = np.sign(diff)
magnitude = np.minimum(0.5, np.abs(diff))
observed = observed + magnitude * direction
chi2, p = power_divergence(observed, expected,
ddof=observed.size - 1 - dof, axis=None,
lambda_=lambda_)
return chi2, p, dof, expected
def association(observed, method="cramer", correction=False, lambda_=None):
"""Calculates degree of association between two nominal variables.
The function provides the option for computing one of three measures of
association between two nominal variables from the data given in a 2d
contingency table: Tschuprow's T, Pearson's Contingency Coefficient
and Cramer's V.
Parameters
----------
observed : array-like
The array of observed values
method : {"cramer", "tschuprow", "pearson"} (default = "cramer")
The association test statistic.
correction : bool, optional
Inherited from `scipy.stats.contingency.chi2_contingency()`
lambda_ : float or str, optional
Inherited from `scipy.stats.contingency.chi2_contingency()`
Returns
-------
statistic : float
Value of the test statistic
Notes
-----
Cramer's V, Tschuprow's T and Pearson's Contingency Coefficient, all
measure the degree to which two nominal or ordinal variables are related,
or the level of their association. This differs from correlation, although
many often mistakenly consider them equivalent. Correlation measures in
what way two variables are related, whereas, association measures how
related the variables are. As such, association does not subsume
independent variables, and is rather a test of independence. A value of
1.0 indicates perfect association, and 0.0 means the variables have no
association.
Both the Cramer's V and Tschuprow's T are extensions of the phi
coefficient. Moreover, due to the close relationship between the
Cramer's V and Tschuprow's T the returned values can often be similar
or even equivalent. They are likely to diverge more as the array shape
diverges from a 2x2.
References
----------
.. [1] "Tschuprow's T",
https://en.wikipedia.org/wiki/Tschuprow's_T
.. [2] Tschuprow, A. A. (1939)
Principles of the Mathematical Theory of Correlation;
translated by M. Kantorowitsch. W. Hodge & Co.
.. [3] "Cramer's V", https://en.wikipedia.org/wiki/Cramer's_V
.. [4] "Nominal Association: Phi and Cramer's V",
http://www.people.vcu.edu/~pdattalo/702SuppRead/MeasAssoc/NominalAssoc.html
.. [5] Gingrich, Paul, "Association Between Variables",
http://uregina.ca/~gingrich/ch11a.pdf
Examples
--------
An example with a 4x2 contingency table:
>>> from scipy.stats.contingency import association
>>> obs4x2 = np.array([[100, 150], [203, 322], [420, 700], [320, 210]])
Pearson's contingency coefficient
>>> association(obs4x2, method="pearson")
0.18303298140595667
Cramer's V
>>> association(obs4x2, method="cramer")
0.18617813077483678
Tschuprow's T
>>> association(obs4x2, method="tschuprow")
0.14146478765062995
"""
arr = np.asarray(observed)
if not np.issubdtype(arr.dtype, np.integer):
raise ValueError("`observed` must be an integer array.")
if len(arr.shape) != 2:
raise ValueError("method only accepts 2d arrays")
chi2_stat = chi2_contingency(arr, correction=correction,
lambda_=lambda_)
phi2 = chi2_stat[0] / arr.sum()
n_rows, n_cols = arr.shape
if method == "cramer":
value = phi2 / min(n_cols - 1, n_rows - 1)
elif method == "tschuprow":
value = phi2 / math.sqrt((n_rows - 1) * (n_cols - 1))
elif method == 'pearson':
value = phi2 / (1 + phi2)
else:
raise ValueError("Invalid argument value: 'method' argument must "
"be 'cramer', 'tschuprow', or 'pearson'")
return math.sqrt(value)
|
|
from itertools import product as cartes
from sympy import (
limit, exp, oo, log, sqrt, Limit, sin, floor, cos, ceiling,
atan, gamma, Symbol, S, pi, Integral, cot, Rational, I, zoo,
tan, cot, integrate, Sum, sign, Function)
from sympy.series.limits import heuristics
from sympy.series.order import Order
from sympy.abc import x, y, z
from sympy.utilities.pytest import XFAIL, raises
def test_basic1():
assert limit(x, x, oo) == oo
assert limit(x, x, -oo) == -oo
assert limit(-x, x, oo) == -oo
assert limit(x**2, x, -oo) == oo
assert limit(-x**2, x, oo) == -oo
assert limit(x*log(x), x, 0, dir="+") == 0
assert limit(1/x, x, oo) == 0
assert limit(exp(x), x, oo) == oo
assert limit(-exp(x), x, oo) == -oo
assert limit(exp(x)/x, x, oo) == oo
assert limit(1/x - exp(-x), x, oo) == 0
assert limit(x + 1/x, x, oo) == oo
assert limit(x - x**2, x, oo) == -oo
assert limit((1 + x)**(1 + sqrt(2)), x, 0) == 1
assert limit((1 + x)**oo, x, 0) == oo
assert limit((1 + x)**oo, x, 0, dir='-') == 0
assert limit((1 + x + y)**oo, x, 0, dir='-') == (1 + y)**(oo)
assert limit(y/x/log(x), x, 0) == -oo*sign(y)
assert limit(cos(x + y)/x, x, 0) == sign(cos(y))*oo
raises(NotImplementedError, lambda: limit(Sum(1/x, (x, 1, y)) -
log(y), y, oo))
raises(NotImplementedError, lambda: limit(Sum(1/x, (x, 1, y)) - 1/y, y, oo))
assert limit(gamma(1/x + 3), x, oo) == 2
assert limit(S.NaN, x, -oo) == S.NaN
assert limit(Order(2)*x, x, S.NaN) == S.NaN
assert limit(gamma(1/x + 3), x, oo) == 2
assert limit(S.NaN, x, -oo) == S.NaN
assert limit(Order(2)*x, x, S.NaN) == S.NaN
assert limit(1/(x - 1), x, 1, dir="+") == oo
assert limit(1/(x - 1), x, 1, dir="-") == -oo
assert limit(1/(5 - x)**3, x, 5, dir="+") == -oo
assert limit(1/(5 - x)**3, x, 5, dir="-") == oo
assert limit(1/sin(x), x, pi, dir="+") == -oo
assert limit(1/sin(x), x, pi, dir="-") == oo
assert limit(1/cos(x), x, pi/2, dir="+") == -oo
assert limit(1/cos(x), x, pi/2, dir="-") == oo
assert limit(1/tan(x**3), x, (2*pi)**(S(1)/3), dir="+") == oo
assert limit(1/tan(x**3), x, (2*pi)**(S(1)/3), dir="-") == -oo
assert limit(1/cot(x)**3, x, (3*pi/2), dir="+") == -oo
assert limit(1/cot(x)**3, x, (3*pi/2), dir="-") == oo
# approaching 0
# from dir="+"
assert limit(1 + 1/x, x, 0) == oo
# from dir='-'
# Add
assert limit(1 + 1/x, x, 0, dir='-') == -oo
# Pow
assert limit(x**(-2), x, 0, dir='-') == oo
assert limit(x**(-3), x, 0, dir='-') == -oo
assert limit(1/sqrt(x), x, 0, dir='-') == (-oo)*I
assert limit(x**2, x, 0, dir='-') == 0
assert limit(sqrt(x), x, 0, dir='-') == 0
assert limit(x**-pi, x, 0, dir='-') == oo*sign((-1)**(-pi))
assert limit((1 + cos(x))**oo, x, 0) == oo
def test_basic2():
assert limit(x**x, x, 0, dir="+") == 1
assert limit((exp(x) - 1)/x, x, 0) == 1
assert limit(1 + 1/x, x, oo) == 1
assert limit(-exp(1/x), x, oo) == -1
assert limit(x + exp(-x), x, oo) == oo
assert limit(x + exp(-x**2), x, oo) == oo
assert limit(x + exp(-exp(x)), x, oo) == oo
assert limit(13 + 1/x - exp(-x), x, oo) == 13
def test_basic3():
assert limit(1/x, x, 0, dir="+") == oo
assert limit(1/x, x, 0, dir="-") == -oo
def test_basic4():
assert limit(2*x + y*x, x, 0) == 0
assert limit(2*x + y*x, x, 1) == 2 + y
assert limit(2*x**8 + y*x**(-3), x, -2) == 512 - y/8
assert limit(sqrt(x + 1) - sqrt(x), x, oo) == 0
assert integrate(1/(x**3 + 1), (x, 0, oo)) == 2*pi*sqrt(3)/9
def test_basic5():
class my(Function):
@classmethod
def eval(cls, arg):
if arg is S.Infinity:
return S.NaN
assert limit(my(x), x, oo) == Limit(my(x), x, oo)
def test_issue_3885():
assert limit(x*y + x*z, z, 2) == x*y + 2*x
def test_Limit():
assert Limit(sin(x)/x, x, 0) != 1
assert Limit(sin(x)/x, x, 0).doit() == 1
def test_floor():
assert limit(floor(x), x, -2, "+") == -2
assert limit(floor(x), x, -2, "-") == -3
assert limit(floor(x), x, -1, "+") == -1
assert limit(floor(x), x, -1, "-") == -2
assert limit(floor(x), x, 0, "+") == 0
assert limit(floor(x), x, 0, "-") == -1
assert limit(floor(x), x, 1, "+") == 1
assert limit(floor(x), x, 1, "-") == 0
assert limit(floor(x), x, 2, "+") == 2
assert limit(floor(x), x, 2, "-") == 1
assert limit(floor(x), x, 248, "+") == 248
assert limit(floor(x), x, 248, "-") == 247
def test_floor_requires_robust_assumptions():
assert limit(floor(sin(x)), x, 0, "+") == 0
assert limit(floor(sin(x)), x, 0, "-") == -1
assert limit(floor(cos(x)), x, 0, "+") == 0
assert limit(floor(cos(x)), x, 0, "-") == 0
assert limit(floor(5 + sin(x)), x, 0, "+") == 5
assert limit(floor(5 + sin(x)), x, 0, "-") == 4
assert limit(floor(5 + cos(x)), x, 0, "+") == 5
assert limit(floor(5 + cos(x)), x, 0, "-") == 5
def test_ceiling():
assert limit(ceiling(x), x, -2, "+") == -1
assert limit(ceiling(x), x, -2, "-") == -2
assert limit(ceiling(x), x, -1, "+") == 0
assert limit(ceiling(x), x, -1, "-") == -1
assert limit(ceiling(x), x, 0, "+") == 1
assert limit(ceiling(x), x, 0, "-") == 0
assert limit(ceiling(x), x, 1, "+") == 2
assert limit(ceiling(x), x, 1, "-") == 1
assert limit(ceiling(x), x, 2, "+") == 3
assert limit(ceiling(x), x, 2, "-") == 2
assert limit(ceiling(x), x, 248, "+") == 249
assert limit(ceiling(x), x, 248, "-") == 248
def test_ceiling_requires_robust_assumptions():
assert limit(ceiling(sin(x)), x, 0, "+") == 1
assert limit(ceiling(sin(x)), x, 0, "-") == 0
assert limit(ceiling(cos(x)), x, 0, "+") == 1
assert limit(ceiling(cos(x)), x, 0, "-") == 1
assert limit(ceiling(5 + sin(x)), x, 0, "+") == 6
assert limit(ceiling(5 + sin(x)), x, 0, "-") == 5
assert limit(ceiling(5 + cos(x)), x, 0, "+") == 6
assert limit(ceiling(5 + cos(x)), x, 0, "-") == 6
def test_atan():
x = Symbol("x", real=True)
assert limit(atan(x)*sin(1/x), x, 0) == 0
assert limit(atan(x) + sqrt(x + 1) - sqrt(x), x, oo) == pi/2
def test_abs():
assert limit(abs(x), x, 0) == 0
assert limit(abs(sin(x)), x, 0) == 0
assert limit(abs(cos(x)), x, 0) == 1
assert limit(abs(sin(x + 1)), x, 0) == sin(1)
def test_heuristic():
x = Symbol("x", real=True)
assert heuristics(sin(1/x) + atan(x), x, 0, '+') == sin(oo)
assert limit(log(2 + sqrt(atan(x))*sqrt(sin(1/x))), x, 0) == log(2)
def test_issue_3871():
z = Symbol("z", positive=True)
f = -1/z*exp(-z*x)
assert limit(f, x, oo) == 0
assert f.limit(x, oo) == 0
def test_exponential():
n = Symbol('n')
x = Symbol('x', real=True)
assert limit((1 + x/n)**n, n, oo) == exp(x)
assert limit((1 + x/(2*n))**n, n, oo) == exp(x/2)
assert limit((1 + x/(2*n + 1))**n, n, oo) == exp(x/2)
assert limit(((x - 1)/(x + 1))**x, x, oo) == exp(-2)
assert limit(1 + (1 + 1/x)**x, x, oo) == 1 + S.Exp1
@XFAIL
def test_exponential2():
n = Symbol('n')
assert limit((1 + x/(n + sin(n)))**n, n, oo) == exp(x)
def test_doit():
f = Integral(2 * x, x)
l = Limit(f, x, oo)
assert l.doit() == oo
@XFAIL
def test_doit2():
f = Integral(2 * x, x)
l = Limit(f, x, oo)
# limit() breaks on the contained Integral.
assert l.doit(deep=False) == l
def test_issue_3792():
assert limit( (1 - cos(x))/x**2, x, S(1)/2) == 4 - 4*cos(S(1)/2)
assert limit(sin(sin(x + 1) + 1), x, 0) == sin(1 + sin(1))
assert limit(abs(sin(x + 1) + 1), x, 0) == 1 + sin(1)
def test_issue_4090():
assert limit(1/(x + 3), x, 2) == S(1)/5
assert limit(1/(x + pi), x, 2) == S(1)/(2 + pi)
assert limit(log(x)/(x**2 + 3), x, 2) == log(2)/7
assert limit(log(x)/(x**2 + pi), x, 2) == log(2)/(4 + pi)
def test_issue_4547():
assert limit(cot(x), x, 0, dir='+') == oo
assert limit(cot(x), x, pi/2, dir='+') == 0
def test_issue_5164():
assert limit(x**0.5, x, oo) == oo**0.5 == oo
assert limit(x**0.5, x, 16) == S(16)**0.5
assert limit(x**0.5, x, 0) == 0
assert limit(x**(-0.5), x, oo) == 0
assert limit(x**(-0.5), x, 4) == S(4)**(-0.5)
def test_issue_5183():
# using list(...) so py.test can recalculate values
tests = list(cartes([x, -x],
[-1, 1],
[2, 3, Rational(1, 2), Rational(2, 3)],
['-', '+']))
results = (oo, oo, -oo, oo, -oo*I, oo, -oo*(-1)**Rational(1, 3), oo,
0, 0, 0, 0, 0, 0, 0, 0,
oo, oo, oo, -oo, oo, -oo*I, oo, -oo*(-1)**Rational(1, 3),
0, 0, 0, 0, 0, 0, 0, 0)
assert len(tests) == len(results)
for i, (args, res) in enumerate(zip(tests, results)):
y, s, e, d = args
eq = y**(s*e)
try:
assert limit(eq, x, 0, dir=d) == res
except AssertionError:
if 0: # change to 1 if you want to see the failing tests
print()
print(i, res, eq, d, limit(eq, x, 0, dir=d))
else:
assert None
def test_issue_5184():
assert limit(sin(x)/x, x, oo) == 0
assert limit(atan(x), x, oo) == pi/2
assert limit(gamma(x), x, oo) == oo
assert limit(cos(x)/x, x, oo) == 0
assert limit(gamma(x), x, Rational(1, 2)) == sqrt(pi)
r = Symbol('r', real=True, finite=True)
assert limit(r*sin(1/r), r, 0) == 0
def test_issue_5229():
assert limit((1 + y)**(1/y) - S.Exp1, y, 0) == 0
def test_issue_4546():
# using list(...) so py.test can recalculate values
tests = list(cartes([cot, tan],
[-pi/2, 0, pi/2, pi, 3*pi/2],
['-', '+']))
results = (0, 0, -oo, oo, 0, 0, -oo, oo, 0, 0,
oo, -oo, 0, 0, oo, -oo, 0, 0, oo, -oo)
assert len(tests) == len(results)
for i, (args, res) in enumerate(zip(tests, results)):
f, l, d = args
eq = f(x)
try:
assert limit(eq, x, l, dir=d) == res
except AssertionError:
if 0: # change to 1 if you want to see the failing tests
print()
print(i, res, eq, l, d, limit(eq, x, l, dir=d))
else:
assert None
def test_issue_3934():
assert limit((1 + x**log(3))**(1/x), x, 0) == 1
assert limit((5**(1/x) + 3**(1/x))**x, x, 0) == 5
def test_calculate_series():
# needs gruntz calculate_series to go to n = 32
assert limit(x**(S(77)/3)/(1 + x**(S(77)/3)), x, oo) == 1
# needs gruntz calculate_series to go to n = 128
assert limit(x**101.1/(1 + x**101.1), x, oo) == 1
def test_issue_5955():
assert limit((x**16)/(1 + x**16), x, oo) == 1
assert limit((x**100)/(1 + x**100), x, oo) == 1
assert limit((x**1885)/(1 + x**1885), x, oo) == 1
assert limit((x**1000/((x + 1)**1000 + exp(-x))), x, oo) == 1
def test_newissue():
assert limit(exp(1/sin(x))/exp(cot(x)), x, 0) == 1
def test_extended_real_line():
assert limit(x - oo, x, oo) == -oo
assert limit(oo - x, x, -oo) == oo
assert limit(x**2/(x - 5) - oo, x, oo) == -oo
assert limit(1/(x + sin(x)) - oo, x, 0) == -oo
assert limit(oo/x, x, oo) == oo
assert limit(x - oo + 1/x, x, oo) == -oo
assert limit(x - oo + 1/x, x, 0) == -oo
@XFAIL
def test_order_oo():
from sympy import C
x = Symbol('x', positive=True, finite=True)
assert C.Order(x)*oo != C.Order(1, x)
assert limit(oo/(x**2 - 4), x, oo) == oo
def test_issue_5436():
raises(NotImplementedError, lambda: limit(exp(x*y), x, oo))
raises(NotImplementedError, lambda: limit(exp(-x*y), x, oo))
def test_Limit_dir():
raises(TypeError, lambda: Limit(x, x, 0, dir=0))
raises(ValueError, lambda: Limit(x, x, 0, dir='0'))
def test_polynomial():
assert limit((x + 1)**1000/((x + 1)**1000 + 1), x, oo) == 1
assert limit((x + 1)**1000/((x + 1)**1000 + 1), x, -oo) == 1
def test_rational():
assert limit(1/y - (1/(y + x) + x/(y + x)/y)/z, x, oo) == (z - 1)/(y*z)
assert limit(1/y - (1/(y + x) + x/(y + x)/y)/z, x, -oo) == (z - 1)/(y*z)
def test_issue_5740():
assert limit(log(x)*z - log(2*x)*y, x, 0) == oo*sign(y - z)
def test_issue_6366():
n = Symbol('n', integer=True, positive=True)
r = (n + 1)*x**(n + 1)/(x**(n + 1) - 1) - x/(x - 1)
assert limit(r, x, 1).simplify() == n/2
def test_factorial():
from sympy import factorial, E
f = factorial(x)
assert limit(f, x, oo) == oo
assert limit(x/f, x, oo) == 0
# see Stirling's approximation:
# http://en.wikipedia.org/wiki/Stirling's_approximation
assert limit(f/(sqrt(2*pi*x)*(x/E)**x), x, oo) == 1
assert limit(f, x, -oo) == factorial(-oo)
assert limit(f, x, x**2) == factorial(x**2)
assert limit(f, x, -x**2) == factorial(-x**2)
def test_issue_6560():
e = 5*x**3/4 - 3*x/4 + (y*(3*x**2/2 - S(1)/2) + \
35*x**4/8 - 15*x**2/4 + S(3)/8)/(2*(y + 1))
assert limit(e, y, oo) == (5*x**3 + 3*x**2 - 3*x - 1)/4
def test_issue_5172():
n = Symbol('n')
r = Symbol('r', positive=True)
c = Symbol('c')
p = Symbol('p', positive=True)
m = Symbol('m', negative=True)
expr = ((2*n*(n - r + 1)/(n + r*(n - r + 1)))**c + \
(r - 1)*(n*(n - r + 2)/(n + r*(n - r + 1)))**c - n)/(n**c - n)
expr = expr.subs(c, c + 1)
raises(NotImplementedError, lambda: limit(expr, n, oo))
assert limit(expr.subs(c, m), n, oo) == 1
assert limit(expr.subs(c, p), n, oo).simplify() == \
(2**(p + 1) + r - 1)/(r + 1)**(p + 1)
def test_issue_7088():
a = Symbol('a')
assert limit(sqrt(x/(x + a)), x, oo) == 1
def test_issue_6364():
a = Symbol('a')
e = z/(1 - sqrt(1 + z)*sin(a)**2 - sqrt(1 - z)*cos(a)**2)
assert limit(e, z, 0).simplify() == 2/cos(2*a)
def test_issue_4099():
a = Symbol('a')
assert limit(a/x, x, 0) == oo*sign(a)
assert limit(-a/x, x, 0) == -oo*sign(a)
assert limit(-a*x, x, oo) == -oo*sign(a)
assert limit(a*x, x, oo) == oo*sign(a)
def test_issue_4503():
dx = Symbol('dx')
assert limit((sqrt(1 + exp(x + dx)) - sqrt(1 + exp(x)))/dx, dx, 0) == \
exp(x)/(2*sqrt(exp(x) + 1))
|
|
import contextlib
import os
import tempfile
import unittest
import numpy
import chainer
from chainer import links
from chainer import testing
def _decorrelated_batch_normalization(x, mean, projection, groups):
xs = numpy.split(x, groups, axis=1)
assert mean.shape[0] == groups
assert projection.shape[0] == groups
ys = [
_decorrelated_batch_normalization_1group(xi, m, p)
for (xi, m, p) in zip(xs, mean, projection)]
return numpy.concatenate(ys, axis=1)
def _decorrelated_batch_normalization_1group(x, mean, projection):
spatial_ndim = len(x.shape[2:])
spatial_axis = tuple(range(2, 2 + spatial_ndim))
b, C = x.shape[:2]
x_hat = x.transpose((1, 0) + spatial_axis).reshape(C, -1)
y_hat = projection.dot(x_hat - mean[:, None])
y = y_hat.reshape((C, b) + x.shape[2:]).transpose(
(1, 0) + spatial_axis)
return y
def _calc_projection(x, mean, eps, groups):
xs = numpy.split(x, groups, axis=1)
assert mean.shape[0] == groups
projections = [
_calc_projection_1group(xi, m, eps)
for (xi, m) in zip(xs, mean)]
return numpy.concatenate([p[None] for p in projections])
def _calc_projection_1group(x, mean, eps):
spatial_ndim = len(x.shape[2:])
spatial_axis = tuple(range(2, 2 + spatial_ndim))
b, C = x.shape[:2]
m = b
for i in spatial_axis:
m *= x.shape[i]
x_hat = x.transpose((1, 0) + spatial_axis).reshape(C, -1)
mean = x_hat.mean(axis=1)
x_hat = x_hat - mean[:, None]
cov = x_hat.dot(x_hat.T) / m + eps * numpy.eye(C, dtype=x.dtype)
eigvals, eigvectors = numpy.linalg.eigh(cov)
projection = eigvectors.dot(numpy.diag(eigvals ** -0.5)).dot(eigvectors.T)
return projection
def _calc_mean(x, groups):
axis = (0,) + tuple(range(2, x.ndim))
return x.mean(axis=axis).reshape(groups, -1)
@testing.parameterize(*(testing.product({
'n_channels': [8],
'groups': [1, 2],
'eps': [2e-5, 5e-1],
'test': [True, False],
'ndim': [0, 2],
# NOTE(crcrpar): np.linalg.eigh does not support float16
'dtype': [numpy.float32, numpy.float64],
})))
@testing.inject_backend_tests(
None,
# CPU tests
[{}]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class DecorrelatedBatchNormalizationTest(testing.LinkTestCase):
param_names = ()
def setUp(self):
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-3}
self.check_backward_options = {'atol': 5e-3, 'rtol': 1e-3}
if self.dtype == numpy.float32:
self.check_backward_options = {'atol': 5e-2, 'rtol': 5e-2}
def generate_params(self):
C = self.n_channels // self.groups
# TODO(ecastill) mean and projection are not
# parameters inside the link, just plain arrays
mean = numpy.random.uniform(
-1, 1, (self.groups, C)).astype(self.dtype)
projection = numpy.random.uniform(
0.5, 1, (self.groups, C, C)).astype(
self.dtype)
return mean, projection
def create_link(self, initializers):
mean, projection = initializers
link = links.DecorrelatedBatchNormalization(
self.n_channels, groups=self.groups, eps=self.eps,
dtype=self.dtype)
link.cleargrads()
if self.test:
link.avg_mean[...] = mean
link.avg_projection[...] = projection
return link
def generate_inputs(self):
dtype = self.dtype
ndim = self.ndim
shape = (5, self.n_channels) + (2,) * ndim
m = 5 * 2 ** ndim
# NOTE(kataoka): The current implementation uses linalg.eigh. Small
# eigenvalues of the correlation matrix, which can be as small as
# eps=2e-5, cannot be computed with good *relative* accuracy, but
# the eigenvalues are used later as `eigvals ** -0.5`. Require the
# following is sufficiently large:
# min(eigvals[:k]) == min(singular_vals ** 2 / m + eps)
min_singular_value = 0.1
# NOTE(kataoka): Decorrelated batch normalization should be free from
# "stochastic axis swapping". Requiring a gap between singular values
# just hides mistakes in implementations.
min_singular_value_gap = 0.001
g = self.groups
zca_shape = g, self.n_channels // g, m
x = numpy.random.uniform(-1, 1, zca_shape)
mean = x.mean(axis=2, keepdims=True)
a = x - mean
u, s, vh = numpy.linalg.svd(a, full_matrices=False)
# Decrement the latter dim because of the constraint `sum(_) == 0`
k = min(zca_shape[1], zca_shape[2] - 1)
s[:, :k] += (
min_singular_value
+ min_singular_value_gap * numpy.arange(k)
)[::-1]
a = numpy.einsum('bij,bj,bjk->bik', u, s, vh)
x = a + mean
x = x.reshape((self.n_channels, shape[0]) + shape[2:]).swapaxes(0, 1)
x = x.astype(dtype)
return x,
def forward_expected(self, link, inputs):
x, = inputs
if self.test:
mean = link.avg_mean
projection = link.avg_projection
else:
mean = _calc_mean(x, self.groups)
projection = _calc_projection(x, mean,
link.eps, self.groups)
y_expect = _decorrelated_batch_normalization(
x, mean, projection, self.groups)
return y_expect,
def forward(self, link, inputs, backend_config):
x, = inputs
with chainer.using_config('train', not self.test):
y = link(x)
return y,
# TODO(kataoka) Use `contextlib.nullcontext` if Python 3.7 or higher is assumed
@contextlib.contextmanager
def nullcontext():
yield
@testing.parameterize(*(testing.product({
'n_channels': [8],
'groups': [1, 2],
'dtype': [numpy.float32, numpy.float64],
})))
@testing.inject_backend_tests([
'test_model_compatibility_npz', 'test_model_compatibility_hdf5',
], [
{},
{'use_cuda': True},
{'use_cuda': True, 'cuda_device': 1},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestDecorrelatedBatchNormalizationCompat(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
def tearDown(self):
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def check_model_compatibility(self, backend_config, save, load):
C = self.n_channels // self.groups
old_model = {
'avg_mean': numpy.random.uniform(
-1, 1, (C,)).astype(self.dtype),
'avg_projection': numpy.random.uniform(
0.5, 1, (C, C)).astype(self.dtype),
'N': numpy.array(0)
}
save(self.temp_file_path, old_model)
model = links.DecorrelatedBatchNormalization(
self.n_channels, groups=self.groups, dtype=self.dtype)
model.to_device(backend_config.device)
with (
testing.assert_warns(UserWarning) if self.groups != 1
else nullcontext()):
load(self.temp_file_path, model)
x = numpy.random.rand(5, self.n_channels, 2).astype(self.dtype)
x = backend_config.get_array(x)
with chainer.using_config('train', False):
model(x)
model(x)
def test_model_compatibility_npz(self, backend_config):
self.check_model_compatibility(
backend_config,
chainer.serializers.save_npz,
chainer.serializers.load_npz,
)
@testing.with_requires('h5py')
def test_model_compatibility_hdf5(self, backend_config):
self.check_model_compatibility(
backend_config,
chainer.serializers.save_hdf5,
chainer.serializers.load_hdf5,
)
testing.run_module(__name__, __file__)
|
|
"""
The dataset for the pforest.
GNU GENERAL PUBLIC LICENSE Version 2
Created on Tue Oct 14 18:52:01 2014
@author: Wasit
"""
import numpy as np
import os
#from PIL import Image
#from scipy.ndimage import filters
try:
import json
except ImportError:
import simplejson as json
#1800
#num_img=100
#spi=5
#
#rootdir="dataset"
#mrec=64
#mtran=64
#margin=mrec+mtran
class dataset:
"""
A class that represent dataset of the pforest.
"""
def __init__(self,index=0,n_proposal=100,_prefix='train'):
"""
To create and initialise
self.dimtheta--(m)dimension of theta. theta is a column vector
self.size------(n)number of samples in the root bag
self.I---------prepocessed data
self.samples---the marix which has size of [(p+1)xn],
where p is size of vector that identify location
of a sample in self.I.
Note that the fist row of self.sample is label
"""
#1 self.cmax: maximum number of classes
#2 self.spi: number of samples per image [removed]
#3 self.theta_dim: the number of elements in a theta (a number of parameter in theta)
#4 self.size: number of all samples in the root bag
#5 self.I: the data
#6 self.samples: samples[x]=[class]
#7 self.theta_range: range of theta for generating value in getParam()
'''
Example: In order to extract LBP feature, the possible setup is theta_dim=5
when 4 dimensions is used to indicate the 2 corners of rectangular window.
The last dimension represent the bin of the LBP histogram.
Then we can set theta=[r1, c1, r2, c2, bin]^T
In this particular case (|theta| = 5 ). The theta dimension is called "theta_dim"
In the getParam() the random proposals are generated by random funtion within a curtain range, which is called "theta_range".
#3 self.theta_dim:
# r1,r2 {margin~rmax-margin},
# c1,c2 {margin~cmax-margin},
# bin {0~3}
# L1(r1c1)----L2(r1c2)
# | |
# L3(r2c1)----L4(r2c2)
'''
import pickle
self.n_proposal=n_proposal
self.index=index
self.path='%s/dataset%02d.pic'%(_prefix,self.index)
pickleFile = open(self.path, 'rb')
self.clmax,self.theta_dim,self.theta_range,self.size,self.samples,self.I,pos = pickle.load(pickleFile)
if self.samples is None:
self.samples=np.zeros(self.I.shape[0])
self.samples.astype(np.uint8)
pickleFile.close()
def __str__(self):
"""
Return string that describes the dataset.
Almost all attributes are included.
"""
return '\tdatset_pickle: path=./"%s" cmax=%d, theta_dim=%d, theta_range=%d \n\
\tsize=%d, label.shape=%s, I.shape=%s'\
%(self.path,self.clmax,self.theta_dim,self.theta_range,self.size,self.samples.shape,self.I.shape)
def __del__(self):
del self.clmax
del self.theta_dim
del self.theta_range
del self.size
del self.samples#samples contains only label
del self.I
def getX(self):
"""
Return the indices of data in a dataset in randomized order.
input:
void
output:
[1D ndarray dtype=np.uint32]
"""
return np.random.permutation(self.size)
def getL(self,x):
"""
Return the label associated with indices x.
input:
[1D ndarray dtype=np.uint32]
output:
[1D ndarray dtype=np.uint32]
"""
return self.samples[x]
def setL(self,x,L):
"""
Set the label to associate with the indices x.
input:
x: [1D ndarray dtype=np.uint32]
L: [1D ndarray dtype=np.uint32]
"""
self.samples[x]=L
###here
def getIs(self,thetas,x):
"""
input:
thetas: [2D ndarray float]
x: [1D ndarray dtype=np.uint32]
output:
[1D ndarray dtype=float]
Description:
In spiral case, it uses only first row of the thetas
"""
#dataset.getParam() calls this
#theta and x have same number of column
#3 self.theta_dim: [0_r1, 1_c1, 2_r2, 3_c2, 4_bin]^T
# r1,r2 {margin~rmax-margin},
# c1,c2 {margin~cmax-margin},
# bin {0~3}
# L1(r1c1)----L2(r1c2)
# | |
# L3(r2c1)----L4(r2c2)
##########
#6 self.samples: samples[x]=[0_class, 1_img, 2_row, 3_column]^T
# r1=self.samples[2,x]+thetas[0,:]
# c1=self.samples[3,x]+thetas[1,:]
# r2=self.samples[2,x]+thetas[2,:]
# c2=self.samples[3,x]+thetas[3,:]
# bins=thetas[self.theta_dim-1,:]
# f=np.zeros(len(x))
# for i,ix in enumerate(x):
# img=self.samples[1,ix]
# L1=self.I[img][r1[i],c1[i],bins[i]]
# L2=self.I[img][r1[i],c2[i],bins[i]]
# L3=self.I[img][r2[i],c1[i],bins[i]]
# L4=self.I[img][r2[i],c2[i],bins[i]]
# f[i]=float(L4+L1-L2-L3)
##need to check
f=np.zeros(len(x))
for i in xrange(len(x)):
f[i]=self.I[x[i],thetas[i,0]]
return f
def getI(self,theta,x):
"""
input:
theta: [1D ndarray float]
x: [1D ndarray dtype=np.uint32]
output:
[1D ndarray dtype=float]
Description:
In spiral case, it uses only first row of the thetas
"""
#engine.getQH() call this
##original
# r1=self.samples[2,x]+theta[0]
# c1=self.samples[3,x]+theta[1]
# r2=self.samples[2,x]+theta[2]
# c2=self.samples[3,x]+theta[3]
# bins=theta[self.theta_dim-1]
# f=np.zeros(len(x))
# for i,ix in enumerate(x):
# img=self.samples[1,ix]
# L1=self.I[img][r1[i],c1[i],bins]
# L2=self.I[img][r1[i],c2[i],bins]
# L3=self.I[img][r2[i],c1[i],bins]
# L4=self.I[img][r2[i],c2[i],bins]
# f[i]=float(L4+L1-L2-L3)
# return f
f=np.zeros(len(x))
f=self.I[x[:],theta[0]]
return f
def getParam(self,x):
"""
Extract thetas and taus from a bag x.
This is called by the enigne an the results are gathered by
the master node.
input:
x: [1D ndarray dtype=np.uint32] - A bag.
output:
thetas: [2D ndarray float] rmax=len(x), cmax=theta_dim
taus: [1D ndarray dtype=np.uint32]
Description:
In spiral case, it uses only first row of the thetas
"""
#3 self.theta_dim: [0_r1, 1_c1, 2_r2, 3_c2, 4_bin]
#6 self.samples: samples[x]=[0_class, 1_img, 2_row, 3_column]^T
# N=len(x)//1 #divided by minbagsize
N=len(x)
if N>self.n_proposal:
x=np.random.permutation(x)[:self.n_proposal]
# else:
# x=np.random.permutation(x)[:N]
# print x
#ux=np.random.randint(-mtran,mtran,size=len(x))
#uy=np.random.randint(-mtran,mtran,size=len(x))
#hx=np.random.randint(8,mrec,size=len(x))
#hy=np.random.randint(8,mrec,size=len(x))
#bins=np.random.randint(0,self.dim_bin,size=len(x))
thetas=np.zeros((len(x),self.theta_dim))
thetas[:,0]=np.random.randint(0,self.theta_range,size=len(x))
thetas.astype(int)
taus = self.getIs(thetas, x)
return thetas,taus
def show(self):
"""Output the dataset to standard output."""
#show dataset
print self.samples
if __name__ == '__main__':
# import matplotlib.pyplot as plt
dset=dataset()
print dset
x=dset.getX()
# print("number of images: {}".format(len(dset.I)))
# markers=['ko','ro','go','bo','po']
# for i in xrange(len(dset.jsonfiles)):
# f=open(dset.jsonfiles[i],"r")
# js=json.loads(f.read())
# f.close()
# img_path= rootdir + js['path'][1:]
# print(img_path)
# im=np.array(Image.open(img_path).convert('L'))
# plt.hold(False)
# plt.imshow(im)
# plt.hold(True)
# for j in range(dset.size):
# #samples[x]=[0_class,1_img, 2_row, 3_column]^T
# if dset.samples[1,j]==i:
# plt.plot(dset.samples[3,j],dset.samples[2,j],markers[dset.samples[0,j]])
# plt.set_cmap('gray')
# plt.show()
# plt.ginput()
# plt.close('all')
#--
|
|
""" Lin G. et al "`RefineNet: Multi-Path Refinement Networks for High-Resolution Semantic Segmentation
<https://arxiv.org/abs/1611.06612>`_"
"""
import tensorflow as tf
from .layers import conv_block
from . import TFModel
from .resnet import ResNet, ResNet101
class RefineNet(TFModel):
""" RefineNet
**Configuration**
inputs : dict
dict with 'images' and 'masks' (see :meth:`~.TFModel._make_inputs`)
body : dict
encoder : dict
base_class : TFModel
a model implementing ``make_encoder`` method which returns tensors
with encoded representation of the inputs
other args
parameters for base class ``make_encoder`` method
filters : list of int
number of filters in each decoder block (default=[512, 256, 256, 256])
upsample : dict
:meth:`~.TFModel.upsample` parameters to use in each decoder block
head : dict
num_classes : int
number of semantic classes
"""
@classmethod
def default_config(cls):
""" Define model defaults. See :meth: `~.TFModel.default_config` """
config = TFModel.default_config()
filters = 64 # number of filters in the first block
config['initial_block'] += dict(layout='cna cna', filters=filters, kernel_size=3,
strides=1, pool_strides=1)
config['body']['encoder'] = dict(base_class=ResNet101)
config['body']['filters'] = [512, 256, 256, 256]
config['body']['upsample'] = dict(layout='b', factor=2)
config['loss'] = 'ce'
return config
def build_config(self, names=None):
""" Define model's architecture configuration. See :meth: `~.TFModel.build_config` """
config = super().build_config(names)
if config.get('head/num_classes') is None:
config['head/num_classes'] = self.num_classes('targets')
return config
@classmethod
def body(cls, inputs, name='body', **kwargs):
""" Base layers
Parameters
----------
inputs : tf.Tensor
input tensor
filters : tuple of int
number of filters in decoder blocks
name : str
scope name
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('body', **kwargs)
encoder = kwargs.pop('encoder')
filters = kwargs.pop('filters')
with tf.variable_scope(name):
encoder_outputs = cls.encoder(inputs, filters=filters[::-1],
**encoder, **kwargs)
x = None
for i, tensor in enumerate(encoder_outputs[1:][::-1]):
decoder_inputs = tensor if x is None else (tensor, x)
x = cls.decoder_block(decoder_inputs, filters=filters[i], name='decoder-'+str(i), **kwargs)
return x
@classmethod
def head(cls, inputs, targets, num_classes, layout='c', kernel_size=1, name='head', **kwargs):
""" The last network layers which produce predictions. See :meth: `~.TFModel.head` """
with tf.variable_scope(name):
x, inputs = inputs, None
x = cls.crop(x, targets, kwargs['data_format'])
x = conv_block(x, layout=layout, filters=num_classes, kernel_size=kernel_size, **kwargs)
return x
@classmethod
def encoder(cls, inputs, base_class, name='encoder', **kwargs):
""" Create encoder from a base_class model
Parameters
----------
inputs : tf.Tensor
input tensor
base_class : TFModel
a model class (default=ResNet101).
Should implement ``make_encoder`` method.
name : str
scope name
kwargs : dict
parameters for ``make_encoder`` method
Returns
-------
tf.Tensor
"""
x = base_class.make_encoder(inputs, name=name, **kwargs)
return x
@classmethod
def block(cls, inputs, filters=None, name='block', **kwargs):
""" RefineNet block with Residual Conv Unit, Multi-resolution fusion and Chained-residual pooling.
Parameters
----------
inputs : tuple of tf.Tensor
input tensors (the first should have the largest spatial dimension)
filters : int
the number of output filters for all convolutions within the block
name : str
scope name
kwargs : dict
upsample : dict
upsample params
Returns
-------
tf.Tensor
"""
upsample_args = cls.pop('upsample', kwargs)
upsample_args = {**kwargs, **upsample_args}
with tf.variable_scope(name):
# filters = min([cls.num_channels(t, data_format=kwargs['data_format']) for t in inputs])
# Residual Conv Unit
after_rcu = []
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
for i, tensor in enumerate(inputs):
x = ResNet.double_block(tensor, filters=filters, layout='acac',
bottleneck=False, downsample=False,
name='rcu-%d' % i, **kwargs)
after_rcu.append(x)
inputs = None
# Multi-resolution fusion
with tf.variable_scope('mrf'):
after_mrf = 0
for i, tensor in enumerate(after_rcu):
x = conv_block(tensor, layout='ac', filters=filters, kernel_size=3,
name='conv-%d' % i, **kwargs)
if i != 0:
x = cls.upsample(x, name='upsample-%d' % i, **upsample_args)
after_mrf += x
# free memory
x, after_mrf = after_mrf, None
after_rcu = None
# Chained-residual pooling
x = tf.nn.relu(x)
after_crp = x
num_pools = 4
for i in range(num_pools):
x = conv_block(x, layout='pc', filters=filters, kernel_size=3, strides=1,
pool_size=5, pool_strides=1, name='rcp-%d' % i, **kwargs)
after_crp += x
x, after_crp = after_crp, None
x = ResNet.double_block(x, layout='ac ac', filters=filters, bottleneck=False, downsample=False,
name='rcu-last', **kwargs)
x = tf.identity(x, name='output')
return x
@classmethod
def decoder_block(cls, inputs, filters, name, **kwargs):
""" Call RefineNet block
Parameters
----------
inputs : tf.Tensor
input tensor
filters : int
number of output filters
name : str
scope name
Returns
-------
tf.Tensor
"""
return cls.block(inputs, filters=filters, name=name, **kwargs)
|
|
import unittest
import tkinter
import os
from tkinter import ttk
from test.support import requires, run_unittest
import tkinter.test.support as support
from tkinter.test.test_ttk.test_functions import MockTclObj, MockStateSpec
requires('gui')
class WidgetTest(unittest.TestCase):
"""Tests methods available in every ttk widget."""
def setUp(self):
support.root_deiconify()
self.widget = ttk.Button(width=0, text="Text")
self.widget.pack()
self.widget.wait_visibility()
def tearDown(self):
self.widget.destroy()
support.root_withdraw()
def test_identify(self):
self.widget.update_idletasks()
self.assertEqual(self.widget.identify(
int(self.widget.winfo_width() / 2),
int(self.widget.winfo_height() / 2)
), "label")
self.assertEqual(self.widget.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.widget.identify, None, 5)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, None)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, '')
def test_widget_state(self):
# XXX not sure about the portability of all these tests
self.assertEqual(self.widget.state(), ())
self.assertEqual(self.widget.instate(['!disabled']), True)
# changing from !disabled to disabled
self.assertEqual(self.widget.state(['disabled']), ('!disabled', ))
# no state change
self.assertEqual(self.widget.state(['disabled']), ())
# change back to !disable but also active
self.assertEqual(self.widget.state(['!disabled', 'active']),
('!active', 'disabled'))
# no state changes, again
self.assertEqual(self.widget.state(['!disabled', 'active']), ())
self.assertEqual(self.widget.state(['active', '!disabled']), ())
def test_cb(arg1, **kw):
return arg1, kw
self.assertEqual(self.widget.instate(['!disabled'],
test_cb, "hi", **{"msg": "there"}),
('hi', {'msg': 'there'}))
# attempt to set invalid statespec
currstate = self.widget.state()
self.assertRaises(tkinter.TclError, self.widget.instate,
['badstate'])
self.assertRaises(tkinter.TclError, self.widget.instate,
['disabled', 'badstate'])
# verify that widget didn't change its state
self.assertEqual(currstate, self.widget.state())
# ensuring that passing None as state doesn't modify current state
self.widget.state(['active', '!disabled'])
self.assertEqual(self.widget.state(), ('active', ))
class ButtonTest(unittest.TestCase):
def test_invoke(self):
success = []
btn = ttk.Button(command=lambda: success.append(1))
btn.invoke()
self.assertTrue(success)
class CheckbuttonTest(unittest.TestCase):
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
cbtn = ttk.Checkbutton(command=cb_test)
# the variable automatically created by ttk.Checkbutton is actually
# undefined till we invoke the Checkbutton
self.assertEqual(cbtn.state(), ('alternate', ))
self.assertRaises(tkinter.TclError, cbtn.tk.globalgetvar,
cbtn['variable'])
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(cbtn['onvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertTrue(success)
cbtn['command'] = ''
res = cbtn.invoke()
self.assertEqual(res, '')
self.assertFalse(len(success) > 1)
self.assertEqual(cbtn['offvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
class ComboboxTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.combo = ttk.Combobox()
def tearDown(self):
self.combo.destroy()
support.root_withdraw()
def _show_drop_down_listbox(self):
width = self.combo.winfo_width()
self.combo.event_generate('<ButtonPress-1>', x=width - 5, y=5)
self.combo.event_generate('<ButtonRelease-1>', x=width - 5, y=5)
self.combo.update_idletasks()
def test_virtual_event(self):
success = []
self.combo['values'] = [1]
self.combo.bind('<<ComboboxSelected>>',
lambda evt: success.append(True))
self.combo.pack()
self.combo.wait_visibility()
height = self.combo.winfo_height()
self._show_drop_down_listbox()
self.combo.update()
self.combo.event_generate('<Return>')
self.combo.update()
self.assertTrue(success)
def test_postcommand(self):
success = []
self.combo['postcommand'] = lambda: success.append(True)
self.combo.pack()
self.combo.wait_visibility()
self._show_drop_down_listbox()
self.assertTrue(success)
# testing postcommand removal
self.combo['postcommand'] = ''
self._show_drop_down_listbox()
self.assertEqual(len(success), 1)
def test_values(self):
def check_get_current(getval, currval):
self.assertEqual(self.combo.get(), getval)
self.assertEqual(self.combo.current(), currval)
check_get_current('', -1)
self.combo['values'] = ['a', 1, 'c']
self.combo.set('c')
check_get_current('c', 2)
self.combo.current(0)
check_get_current('a', 0)
self.combo.set('d')
check_get_current('d', -1)
# testing values with empty string
self.combo.set('')
self.combo['values'] = (1, 2, '', 3)
check_get_current('', 2)
# testing values with empty string set through configure
self.combo.configure(values=[1, '', 2])
self.assertEqual(self.combo['values'], ('1', '', '2'))
# out of range
self.assertRaises(tkinter.TclError, self.combo.current,
len(self.combo['values']))
# it expects an integer (or something that can be converted to int)
self.assertRaises(tkinter.TclError, self.combo.current, '')
# testing creating combobox with empty string in values
combo2 = ttk.Combobox(values=[1, 2, ''])
self.assertEqual(combo2['values'], ('1', '2', ''))
combo2.destroy()
class EntryTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.entry = ttk.Entry()
def tearDown(self):
self.entry.destroy()
support.root_withdraw()
def test_bbox(self):
self.assertEqual(len(self.entry.bbox(0)), 4)
for item in self.entry.bbox(0):
self.assertTrue(isinstance(item, int))
self.assertRaises(tkinter.TclError, self.entry.bbox, 'noindex')
self.assertRaises(tkinter.TclError, self.entry.bbox, None)
def test_identify(self):
self.entry.pack()
self.entry.wait_visibility()
self.entry.update_idletasks()
self.assertEqual(self.entry.identify(5, 5), "textarea")
self.assertEqual(self.entry.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.entry.identify, None, 5)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, None)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, '')
def test_validation_options(self):
success = []
test_invalid = lambda: success.append(True)
self.entry['validate'] = 'none'
self.entry['validatecommand'] = lambda: False
self.entry['invalidcommand'] = test_invalid
self.entry.validate()
self.assertTrue(success)
self.entry['invalidcommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['invalidcommand'] = test_invalid
self.entry['validatecommand'] = lambda: True
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = True
self.assertRaises(tkinter.TclError, self.entry.validate)
def test_validation(self):
validation = []
def validate(to_insert):
if not 'a' <= to_insert.lower() <= 'z':
validation.append(False)
return False
validation.append(True)
return True
self.entry['validate'] = 'key'
self.entry['validatecommand'] = self.entry.register(validate), '%S'
self.entry.insert('end', 1)
self.entry.insert('end', 'a')
self.assertEqual(validation, [False, True])
self.assertEqual(self.entry.get(), 'a')
def test_revalidation(self):
def validate(content):
for letter in content:
if not 'a' <= letter.lower() <= 'z':
return False
return True
self.entry['validatecommand'] = self.entry.register(validate), '%P'
self.entry.insert('end', 'avocado')
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
self.entry.delete(0, 'end')
self.assertEqual(self.entry.get(), '')
self.entry.insert('end', 'a1b')
self.assertEqual(self.entry.validate(), False)
self.assertEqual(self.entry.state(), ('invalid', ))
self.entry.delete(1)
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
class PanedwindowTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.paned = ttk.Panedwindow()
def tearDown(self):
self.paned.destroy()
support.root_withdraw()
def test_add(self):
# attempt to add a child that is not a direct child of the paned window
label = ttk.Label(self.paned)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
label.destroy()
child.destroy()
# another attempt
label = ttk.Label()
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
child.destroy()
label.destroy()
good_child = ttk.Label()
self.paned.add(good_child)
# re-adding a child is not accepted
self.assertRaises(tkinter.TclError, self.paned.add, good_child)
other_child = ttk.Label(self.paned)
self.paned.add(other_child)
self.assertEqual(self.paned.pane(0), self.paned.pane(1))
self.assertRaises(tkinter.TclError, self.paned.pane, 2)
good_child.destroy()
other_child.destroy()
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.paned.forget, None)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
self.paned.add(ttk.Label())
self.paned.forget(0)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
def test_insert(self):
self.assertRaises(tkinter.TclError, self.paned.insert, None, 0)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, None)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, 0)
child = ttk.Label()
child2 = ttk.Label()
child3 = ttk.Label()
self.assertRaises(tkinter.TclError, self.paned.insert, 0, child)
self.paned.insert('end', child2)
self.paned.insert(0, child)
self.assertEqual(self.paned.panes(), (str(child), str(child2)))
self.paned.insert(0, child2)
self.assertEqual(self.paned.panes(), (str(child2), str(child)))
self.paned.insert('end', child3)
self.assertEqual(self.paned.panes(),
(str(child2), str(child), str(child3)))
# reinserting a child should move it to its current position
panes = self.paned.panes()
self.paned.insert('end', child3)
self.assertEqual(panes, self.paned.panes())
# moving child3 to child2 position should result in child2 ending up
# in previous child position and child ending up in previous child3
# position
self.paned.insert(child2, child3)
self.assertEqual(self.paned.panes(),
(str(child3), str(child2), str(child)))
def test_pane(self):
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
child = ttk.Label()
self.paned.add(child)
self.assertTrue(isinstance(self.paned.pane(0), dict))
self.assertEqual(self.paned.pane(0, weight=None), 0)
# newer form for querying a single option
self.assertEqual(self.paned.pane(0, 'weight'), 0)
self.assertEqual(self.paned.pane(0), self.paned.pane(str(child)))
self.assertRaises(tkinter.TclError, self.paned.pane, 0,
badoption='somevalue')
def test_sashpos(self):
self.assertRaises(tkinter.TclError, self.paned.sashpos, None)
self.assertRaises(tkinter.TclError, self.paned.sashpos, '')
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child = ttk.Label(self.paned, text='a')
self.paned.add(child, weight=1)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child2 = ttk.Label(self.paned, text='b')
self.paned.add(child2)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 1)
self.paned.pack(expand=True, fill='both')
self.paned.wait_visibility()
curr_pos = self.paned.sashpos(0)
self.paned.sashpos(0, 1000)
self.assertTrue(curr_pos != self.paned.sashpos(0))
self.assertTrue(isinstance(self.paned.sashpos(0), int))
class RadiobuttonTest(unittest.TestCase):
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
myvar = tkinter.IntVar()
cbtn = ttk.Radiobutton(command=cb_test, variable=myvar, value=0)
cbtn2 = ttk.Radiobutton(command=cb_test, variable=myvar, value=1)
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(cbtn['value'], myvar.get())
self.assertEqual(myvar.get(),
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertTrue(success)
cbtn2['command'] = ''
res = cbtn2.invoke()
self.assertEqual(res, '')
self.assertFalse(len(success) > 1)
self.assertEqual(cbtn2['value'], myvar.get())
self.assertEqual(myvar.get(),
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertEqual(str(cbtn['variable']), str(cbtn2['variable']))
class ScaleTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.scale = ttk.Scale()
self.scale.pack()
self.scale.update()
def tearDown(self):
self.scale.destroy()
support.root_withdraw()
def test_custom_event(self):
failure = [1, 1, 1] # will need to be empty
funcid = self.scale.bind('<<RangeChanged>>', lambda evt: failure.pop())
self.scale['from'] = 10
self.scale['from_'] = 10
self.scale['to'] = 3
self.assertFalse(failure)
failure = [1, 1, 1]
self.scale.configure(from_=2, to=5)
self.scale.configure(from_=0, to=-2)
self.scale.configure(to=10)
self.assertFalse(failure)
def test_get(self):
scale_width = self.scale.winfo_width()
self.assertEqual(self.scale.get(scale_width, 0), self.scale['to'])
self.assertEqual(self.scale.get(0, 0), self.scale['from'])
self.assertEqual(self.scale.get(), self.scale['value'])
self.scale['value'] = 30
self.assertEqual(self.scale.get(), self.scale['value'])
self.assertRaises(tkinter.TclError, self.scale.get, '', 0)
self.assertRaises(tkinter.TclError, self.scale.get, 0, '')
def test_set(self):
# set restricts the max/min values according to the current range
max = self.scale['to']
new_max = max + 10
self.scale.set(new_max)
self.assertEqual(self.scale.get(), max)
min = self.scale['from']
self.scale.set(min - 1)
self.assertEqual(self.scale.get(), min)
# changing directly the variable doesn't impose this limitation tho
var = tkinter.DoubleVar()
self.scale['variable'] = var
var.set(max + 5)
self.assertEqual(self.scale.get(), var.get())
self.assertEqual(self.scale.get(), max + 5)
del var
# the same happens with the value option
self.scale['value'] = max + 10
self.assertEqual(self.scale.get(), max + 10)
self.assertEqual(self.scale.get(), self.scale['value'])
# nevertheless, note that the max/min values we can get specifying
# x, y coords are the ones according to the current range
self.assertEqual(self.scale.get(0, 0), min)
self.assertEqual(self.scale.get(self.scale.winfo_width(), 0), max)
self.assertRaises(tkinter.TclError, self.scale.set, None)
class NotebookTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.nb = ttk.Notebook(padding=0)
self.child1 = ttk.Label()
self.child2 = ttk.Label()
self.nb.add(self.child1, text='a')
self.nb.add(self.child2, text='b')
def tearDown(self):
self.child1.destroy()
self.child2.destroy()
self.nb.destroy()
support.root_withdraw()
def test_tab_identifiers(self):
self.nb.forget(0)
self.nb.hide(self.child2)
self.assertRaises(tkinter.TclError, self.nb.tab, self.child1)
self.assertEqual(self.nb.index('end'), 1)
self.nb.add(self.child2)
self.assertEqual(self.nb.index('end'), 1)
self.nb.select(self.child2)
self.assertTrue(self.nb.tab('current'))
self.nb.add(self.child1, text='a')
self.nb.pack()
self.nb.wait_visibility()
self.assertEqual(self.nb.tab('@5,5'), self.nb.tab('current'))
for i in range(5, 100, 5):
if self.nb.tab('@%d, 5' % i, text=None) == 'a':
break
else:
self.fail("Tab with text 'a' not found")
def test_add_and_hidden(self):
self.assertRaises(tkinter.TclError, self.nb.hide, -1)
self.assertRaises(tkinter.TclError, self.nb.hide, 'hi')
self.assertRaises(tkinter.TclError, self.nb.hide, None)
self.assertRaises(tkinter.TclError, self.nb.add, None)
self.assertRaises(tkinter.TclError, self.nb.add, ttk.Label(),
unknown='option')
tabs = self.nb.tabs()
self.nb.hide(self.child1)
self.nb.add(self.child1)
self.assertEqual(self.nb.tabs(), tabs)
child = ttk.Label()
self.nb.add(child, text='c')
tabs = self.nb.tabs()
curr = self.nb.index('current')
# verify that the tab gets readded at its previous position
child2_index = self.nb.index(self.child2)
self.nb.hide(self.child2)
self.nb.add(self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.assertEqual(self.nb.index(self.child2), child2_index)
self.assertTrue(str(self.child2) == self.nb.tabs()[child2_index])
# but the tab next to it (not hidden) is the one selected now
self.assertEqual(self.nb.index('current'), curr + 1)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.nb.forget, -1)
self.assertRaises(tkinter.TclError, self.nb.forget, 'hi')
self.assertRaises(tkinter.TclError, self.nb.forget, None)
tabs = self.nb.tabs()
child1_index = self.nb.index(self.child1)
self.nb.forget(self.child1)
self.assertFalse(str(self.child1) in self.nb.tabs())
self.assertEqual(len(tabs) - 1, len(self.nb.tabs()))
self.nb.add(self.child1)
self.assertEqual(self.nb.index(self.child1), 1)
self.assertFalse(child1_index == self.nb.index(self.child1))
def test_index(self):
self.assertRaises(tkinter.TclError, self.nb.index, -1)
self.assertRaises(tkinter.TclError, self.nb.index, None)
self.assertTrue(isinstance(self.nb.index('end'), int))
self.assertEqual(self.nb.index(self.child1), 0)
self.assertEqual(self.nb.index(self.child2), 1)
self.assertEqual(self.nb.index('end'), 2)
def test_insert(self):
# moving tabs
tabs = self.nb.tabs()
self.nb.insert(1, tabs[0])
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert(self.child1, self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert('end', self.child1)
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert('end', 0)
self.assertEqual(self.nb.tabs(), tabs)
# bad moves
self.assertRaises(tkinter.TclError, self.nb.insert, 2, tabs[0])
self.assertRaises(tkinter.TclError, self.nb.insert, -1, tabs[0])
# new tab
child3 = ttk.Label()
self.nb.insert(1, child3)
self.assertEqual(self.nb.tabs(), (tabs[0], str(child3), tabs[1]))
self.nb.forget(child3)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert(self.child1, child3)
self.assertEqual(self.nb.tabs(), (str(child3), ) + tabs)
self.nb.forget(child3)
self.assertRaises(tkinter.TclError, self.nb.insert, 2, child3)
self.assertRaises(tkinter.TclError, self.nb.insert, -1, child3)
# bad inserts
self.assertRaises(tkinter.TclError, self.nb.insert, 'end', None)
self.assertRaises(tkinter.TclError, self.nb.insert, None, 0)
self.assertRaises(tkinter.TclError, self.nb.insert, None, None)
def test_select(self):
self.nb.pack()
self.nb.wait_visibility()
success = []
tab_changed = []
self.child1.bind('<Unmap>', lambda evt: success.append(True))
self.nb.bind('<<NotebookTabChanged>>',
lambda evt: tab_changed.append(True))
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.select(self.child2)
self.assertTrue(success)
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.update()
self.assertTrue(tab_changed)
def test_tab(self):
self.assertRaises(tkinter.TclError, self.nb.tab, -1)
self.assertRaises(tkinter.TclError, self.nb.tab, 'notab')
self.assertRaises(tkinter.TclError, self.nb.tab, None)
self.assertTrue(isinstance(self.nb.tab(self.child1), dict))
self.assertEqual(self.nb.tab(self.child1, text=None), 'a')
# newer form for querying a single option
self.assertEqual(self.nb.tab(self.child1, 'text'), 'a')
self.nb.tab(self.child1, text='abc')
self.assertEqual(self.nb.tab(self.child1, text=None), 'abc')
self.assertEqual(self.nb.tab(self.child1, 'text'), 'abc')
def test_tabs(self):
self.assertEqual(len(self.nb.tabs()), 2)
self.nb.forget(self.child1)
self.nb.forget(self.child2)
self.assertEqual(self.nb.tabs(), ())
def test_traversal(self):
self.nb.pack()
self.nb.wait_visibility()
self.nb.select(0)
support.simulate_mouse_click(self.nb, 5, 5)
self.nb.focus_force()
self.nb.event_generate('<Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.tab(self.child1, text='a', underline=0)
self.nb.enable_traversal()
self.nb.focus_force()
support.simulate_mouse_click(self.nb, 5, 5)
self.nb.event_generate('<Alt-a>')
self.assertEqual(self.nb.select(), str(self.child1))
class TreeviewTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.tv = ttk.Treeview(padding=0)
def tearDown(self):
self.tv.destroy()
support.root_withdraw()
def test_bbox(self):
self.tv.pack()
self.assertEqual(self.tv.bbox(''), '')
self.tv.wait_visibility()
self.tv.update()
item_id = self.tv.insert('', 'end')
children = self.tv.get_children()
self.assertTrue(children)
bbox = self.tv.bbox(children[0])
self.assertEqual(len(bbox), 4)
self.assertTrue(isinstance(bbox, tuple))
for item in bbox:
if not isinstance(item, int):
self.fail("Invalid bounding box: %s" % bbox)
break
# compare width in bboxes
self.tv['columns'] = ['test']
self.tv.column('test', width=50)
bbox_column0 = self.tv.bbox(children[0], 0)
root_width = self.tv.column('#0', width=None)
self.assertEqual(bbox_column0[0], bbox[0] + root_width)
# verify that bbox of a closed item is the empty string
child1 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.bbox(child1), '')
def test_children(self):
# no children yet, should get an empty tuple
self.assertEqual(self.tv.get_children(), ())
item_id = self.tv.insert('', 'end')
self.assertTrue(isinstance(self.tv.get_children(), tuple))
self.assertEqual(self.tv.get_children()[0], item_id)
# add item_id and child3 as children of child2
child2 = self.tv.insert('', 'end')
child3 = self.tv.insert('', 'end')
self.tv.set_children(child2, item_id, child3)
self.assertEqual(self.tv.get_children(child2), (item_id, child3))
# child3 has child2 as parent, thus trying to set child2 as a children
# of child3 should result in an error
self.assertRaises(tkinter.TclError,
self.tv.set_children, child3, child2)
# remove child2 children
self.tv.set_children(child2)
self.assertEqual(self.tv.get_children(child2), ())
# remove root's children
self.tv.set_children('')
self.assertEqual(self.tv.get_children(), ())
def test_column(self):
# return a dict with all options/values
self.assertTrue(isinstance(self.tv.column('#0'), dict))
# return a single value of the given option
self.assertTrue(isinstance(self.tv.column('#0', width=None), int))
# set a new value for an option
self.tv.column('#0', width=10)
# testing new way to get option value
self.assertEqual(self.tv.column('#0', 'width'), 10)
self.assertEqual(self.tv.column('#0', width=None), 10)
# check read-only option
self.assertRaises(tkinter.TclError, self.tv.column, '#0', id='X')
self.assertRaises(tkinter.TclError, self.tv.column, 'invalid')
invalid_kws = [
{'unknown_option': 'some value'}, {'stretch': 'wrong'},
{'anchor': 'wrong'}, {'width': 'wrong'}, {'minwidth': 'wrong'}
]
for kw in invalid_kws:
self.assertRaises(tkinter.TclError, self.tv.column, '#0',
**kw)
def test_delete(self):
self.assertRaises(tkinter.TclError, self.tv.delete, '#0')
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
self.tv.delete(item_id)
self.assertFalse(self.tv.get_children())
# reattach should fail
self.assertRaises(tkinter.TclError,
self.tv.reattach, item_id, '', 'end')
# test multiple item delete
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
self.assertEqual(self.tv.get_children(), (item1, item2))
self.tv.delete(item1, item2)
self.assertFalse(self.tv.get_children())
def test_detach_reattach(self):
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
# calling detach without items is valid, although it does nothing
prev = self.tv.get_children()
self.tv.detach() # this should do nothing
self.assertEqual(prev, self.tv.get_children())
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# detach item with children
self.tv.detach(item_id)
self.assertFalse(self.tv.get_children())
# reattach item with children
self.tv.reattach(item_id, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# move a children to the root
self.tv.move(item2, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, item2))
self.assertEqual(self.tv.get_children(item_id), ())
# bad values
self.assertRaises(tkinter.TclError,
self.tv.reattach, 'nonexistent', '', 'end')
self.assertRaises(tkinter.TclError,
self.tv.detach, 'nonexistent')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, 'otherparent', 'end')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, '', 'invalid')
# multiple detach
self.tv.detach(item_id, item2)
self.assertEqual(self.tv.get_children(), ())
self.assertEqual(self.tv.get_children(item_id), ())
def test_exists(self):
self.assertEqual(self.tv.exists('something'), False)
self.assertEqual(self.tv.exists(''), True)
self.assertEqual(self.tv.exists({}), False)
# the following will make a tk.call equivalent to
# tk.call(treeview, "exists") which should result in an error
# in the tcl interpreter since tk requires an item.
self.assertRaises(tkinter.TclError, self.tv.exists, None)
def test_focus(self):
# nothing is focused right now
self.assertEqual(self.tv.focus(), '')
item1 = self.tv.insert('', 'end')
self.tv.focus(item1)
self.assertEqual(self.tv.focus(), item1)
self.tv.delete(item1)
self.assertEqual(self.tv.focus(), '')
# try focusing inexistent item
self.assertRaises(tkinter.TclError, self.tv.focus, 'hi')
def test_heading(self):
# check a dict is returned
self.assertTrue(isinstance(self.tv.heading('#0'), dict))
# check a value is returned
self.tv.heading('#0', text='hi')
self.assertEqual(self.tv.heading('#0', 'text'), 'hi')
self.assertEqual(self.tv.heading('#0', text=None), 'hi')
# invalid option
self.assertRaises(tkinter.TclError, self.tv.heading, '#0',
background=None)
# invalid value
self.assertRaises(tkinter.TclError, self.tv.heading, '#0',
anchor=1)
# XXX skipping for now; should be fixed to work with newer ttk
@unittest.skip
def test_heading_callback(self):
def simulate_heading_click(x, y):
support.simulate_mouse_click(self.tv, x, y)
self.tv.update_idletasks()
success = [] # no success for now
self.tv.pack()
self.tv.wait_visibility()
self.tv.heading('#0', command=lambda: success.append(True))
self.tv.column('#0', width=100)
self.tv.update()
# assuming that the coords (5, 5) fall into heading #0
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
success = []
commands = self.tv.master._tclCommands
self.tv.heading('#0', command=str(self.tv.heading('#0', command=None)))
self.assertEqual(commands, self.tv.master._tclCommands)
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
# XXX The following raises an error in a tcl interpreter, but not in
# Python
#self.tv.heading('#0', command='I dont exist')
#simulate_heading_click(5, 5)
def test_index(self):
# item 'what' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.index, 'what')
self.assertEqual(self.tv.index(''), 0)
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
c1 = self.tv.insert(item1, 'end')
c2 = self.tv.insert(item1, 'end')
self.assertEqual(self.tv.index(item1), 0)
self.assertEqual(self.tv.index(c1), 0)
self.assertEqual(self.tv.index(c2), 1)
self.assertEqual(self.tv.index(item2), 1)
self.tv.move(item2, '', 0)
self.assertEqual(self.tv.index(item2), 0)
self.assertEqual(self.tv.index(item1), 1)
# check that index still works even after its parent and siblings
# have been detached
self.tv.detach(item1)
self.assertEqual(self.tv.index(c2), 1)
self.tv.detach(c1)
self.assertEqual(self.tv.index(c2), 0)
# but it fails after item has been deleted
self.tv.delete(item1)
self.assertRaises(tkinter.TclError, self.tv.index, c2)
def test_insert_item(self):
# parent 'none' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.insert, 'none', 'end')
# open values
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='please')
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=True)))
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=False)))
# invalid index
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'middle')
# trying to duplicate item id is invalid
itemid = self.tv.insert('', 'end', 'first-item')
self.assertEqual(itemid, 'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
MockTclObj('first-item'))
# unicode values
value = '\xe1ba'
item = self.tv.insert('', 'end', values=(value, ))
self.assertEqual(self.tv.item(item, 'values'), (value, ))
self.assertEqual(self.tv.item(item, values=None), (value, ))
self.tv.item(item, values=list(self.tv.item(item, values=None)))
self.assertEqual(self.tv.item(item, values=None), (value, ))
self.assertTrue(isinstance(self.tv.item(item), dict))
# erase item values
self.tv.item(item, values='')
self.assertFalse(self.tv.item(item, values=None))
# item tags
item = self.tv.insert('', 'end', tags=[1, 2, value])
self.assertEqual(self.tv.item(item, tags=None), ('1', '2', value))
self.tv.item(item, tags=[])
self.assertFalse(self.tv.item(item, tags=None))
self.tv.item(item, tags=(1, 2))
self.assertEqual(self.tv.item(item, tags=None), ('1', '2'))
# values with spaces
item = self.tv.insert('', 'end', values=('a b c',
'%s %s' % (value, value)))
self.assertEqual(self.tv.item(item, values=None),
('a b c', '%s %s' % (value, value)))
# text
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text="Label here"), text=None),
"Label here")
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text=value), text=None),
value)
def test_set(self):
self.tv['columns'] = ['A', 'B']
item = self.tv.insert('', 'end', values=['a', 'b'])
self.assertEqual(self.tv.set(item), {'A': 'a', 'B': 'b'})
self.tv.set(item, 'B', 'a')
self.assertEqual(self.tv.item(item, values=None), ('a', 'a'))
self.tv['columns'] = ['B']
self.assertEqual(self.tv.set(item), {'B': 'a'})
self.tv.set(item, 'B', 'b')
self.assertEqual(self.tv.set(item, column='B'), 'b')
self.assertEqual(self.tv.item(item, values=None), ('b', 'a'))
self.tv.set(item, 'B', 123)
self.assertEqual(self.tv.set(item, 'B'), 123)
self.assertEqual(self.tv.item(item, values=None), (123, 'a'))
self.assertEqual(self.tv.set(item), {'B': 123})
# inexistent column
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A')
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A', 'b')
# inexistent item
self.assertRaises(tkinter.TclError, self.tv.set, 'notme')
def test_tag_bind(self):
events = []
item1 = self.tv.insert('', 'end', tags=['call'])
item2 = self.tv.insert('', 'end', tags=['call'])
self.tv.tag_bind('call', '<ButtonPress-1>',
lambda evt: events.append(1))
self.tv.tag_bind('call', '<ButtonRelease-1>',
lambda evt: events.append(2))
self.tv.pack()
self.tv.wait_visibility()
self.tv.update()
pos_y = set()
found = set()
for i in range(0, 100, 10):
if len(found) == 2: # item1 and item2 already found
break
item_id = self.tv.identify_row(i)
if item_id and item_id not in found:
pos_y.add(i)
found.add(item_id)
self.assertEqual(len(pos_y), 2) # item1 and item2 y pos
for y in pos_y:
support.simulate_mouse_click(self.tv, 0, y)
# by now there should be 4 things in the events list, since each
# item had a bind for two events that were simulated above
self.assertEqual(len(events), 4)
for evt in zip(events[::2], events[1::2]):
self.assertEqual(evt, (1, 2))
def test_tag_configure(self):
# Just testing parameter passing for now
self.assertRaises(TypeError, self.tv.tag_configure)
self.assertRaises(tkinter.TclError, self.tv.tag_configure,
'test', sky='blue')
self.tv.tag_configure('test', foreground='blue')
self.assertEqual(str(self.tv.tag_configure('test', 'foreground')),
'blue')
self.assertEqual(str(self.tv.tag_configure('test', foreground=None)),
'blue')
self.assertTrue(isinstance(self.tv.tag_configure('test'), dict))
tests_gui = (
WidgetTest, ButtonTest, CheckbuttonTest, RadiobuttonTest,
ComboboxTest, EntryTest, PanedwindowTest, ScaleTest, NotebookTest,
TreeviewTest
)
if __name__ == "__main__":
run_unittest(*tests_gui)
|
|
from __future__ import print_function, unicode_literals
#! /usr/bin/env python
#
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see LICENSE. Contact: [email protected]
#
# pylint: disable=invalid-name,missing-docstring,no-member
from io import open
from khmer import utils
"""
Find all reads connected to the given contigs on a per-partition basis.
% sweep-reads.py -r <range> <contigs fastp> \
<reads1> <reads2> ... <readsN>
"""
EPILOG = """
Output will be a collection of files corresponding to the partitions;
each partition gets a file (prefixed with the output prefix option),
which means this could output many tens or hundreds of thousands of files.
Users should plan accordingly.
This script is very lenient on IO errors, due to the large number of file
operations needed. Thus, errors opening a file for buffer flush or writing
a read to a file will not crash the program; instead, if there were errors,
the user will be warned at the end of execution. Errors with opening read files
are also handled -- we move on to the next read file if there is an error
opening.
"""
import screed
import sys
from collections import defaultdict
import os
import time
import khmer
from khmer.khmer_args import (build_nodegraph_args, report_on_config, info)
from khmer.kfile import (check_input_files, check_valid_file_exists,
check_space)
from khmer.utils import write_record
DEFAULT_NUM_BUFFERS = 50000
DEFAULT_MAX_READS = 1000000
DEFAULT_BUFFER_SIZE = 10
DEFAULT_OUT_PREF = 'reads'
DEFAULT_RANGE = -1
MAX_HSIZE = 4e7
MIN_KSIZE = 21
def fmt_fasta(name, seq, labels=[]):
return '>{name}\t{labels}\n{seq}\n'.format(
name=name, labels='\t'.join([str(l) for l in labels]), seq=seq)
def fmt_fastq(name, seq, quality, labels=[]):
return '@{name}\t{labels}\n{seq}\n+\n{acc}\n'.format(
name=name, labels='\t'.join([str(l) for l in labels]), seq=seq,
acc=quality)
class ReadBuffer(object):
def __init__(self):
self.buf = []
def push(self, seq_str):
self.buf.append(seq_str)
def flush(self):
return ''.join(self.buf)
def is_full(self, full):
if len(self.buf) >= full:
return True
else:
return False
def __len__(self):
return len(self.buf)
class ReadBufferManager(object):
def __init__(self, max_buffers, max_reads, max_size, output_pref, outdir,
extension):
self.buffers = {}
self.buffer_counts = {}
self.max_buffers = max_buffers
self.max_reads = max_reads
self.extension = extension
self.output_pref = output_pref
self.outdir = outdir
self.buffer_flush = max_size
self.cur_reads = 0
self.cur_files = 0
self.num_write_errors = 0
self.num_file_errors = 0
print('''Init new ReadBuffer [
Max Buffers: {num_bufs}
Max Reads: {max_reads}
Buffer flush: {buf_flush}
]'''.format(num_bufs=self.max_buffers, max_reads=self.max_reads,
buf_flush=self.buffer_flush), file=sys.stderr)
def flush_buffer(self, buf_id):
fn = '{prefix}_{buffer_id}.{ext}'.format(prefix=self.output_pref,
buffer_id=buf_id,
ext=self.extension)
fpath = os.path.join(self.outdir, fn)
buf = self.buffers[buf_id]
try:
outfp = open(fpath, 'a')
except (IOError, OSError) as _:
print('!! ERROR: {_} !!'.format(_=_), file=sys.stderr)
print('*** Failed to open {fn} for \
buffer flush'.format(fn=fpath), file=sys.stderr)
self.num_file_errors += 1
else:
outfp.write(buf.flush())
outfp.close()
finally:
self.cur_reads -= len(buf)
del self.buffers[buf_id]
def queue(self, seq_str, buf_id):
if buf_id in self.buffers:
self.buffers[buf_id].push(seq_str)
if self.buffers[buf_id].is_full(self.buffer_flush):
self.flush_buffer(buf_id)
else:
new_buf = ReadBuffer()
new_buf.push(seq_str)
self.buffers[buf_id] = new_buf
self.cur_reads += 1
if self.cur_reads > self.max_reads:
print('** Reached max num reads...', file=sys.stderr)
self.flush_all()
if len(self.buffers) > self.max_buffers:
# self.clean_buffers(2)
print('** Reached max num buffers...', file=sys.stderr)
self.flush_all()
def flush_all(self):
print('*** Flushing all to files...', file=sys.stderr)
buf_ids = list(self.buffers.keys())
for buf_id in buf_ids:
self.flush_buffer(buf_id)
assert self.cur_reads == 0
def get_parser():
parser = build_nodegraph_args('Takes a partitioned reference file \
and a list of reads, and sorts reads \
by which partition they connect to')
parser.epilog = EPILOG
parser.add_argument(
'-r', '--traversal_range', type=int, dest='traversal_range',
default=DEFAULT_RANGE, help='depth of breadth-first search to perform\
from each read')
parser.add_argument('-b', '--buffer_size', dest='max_reads', type=int,
default=DEFAULT_MAX_READS,
help='Max total reads to buffer before flushing')
parser.add_argument('-l', '--buffer_length', dest='buffer_size', type=int,
default=DEFAULT_BUFFER_SIZE,
help='Max length of an individual label buffer \
before flushing')
parser.add_argument('--prefix', dest='output_prefix',
default=DEFAULT_OUT_PREF,
help='Prefix for sorted read files')
parser.add_argument('--outdir', dest='outdir',
help='output directory; default is location of \
fastp file')
parser.add_argument('-m', '--max_buffers', dest='max_buffers', type=int,
default=DEFAULT_NUM_BUFFERS,
help='Max individual label buffers before flushing')
labeling = parser.add_mutually_exclusive_group(required=True)
labeling.add_argument('--label-by-pid', dest='label_by_pid',
action='store_true', help='separate reads by\
reference partition id')
labeling.add_argument('--label-by-seq', dest='label_by_seq',
action='store_true', help='separate reads by\
reference sequence')
labeling.add_argument('--label-by-group', dest='group_size', type=int,
help='separate reads by arbitrary sized groups\
of reference sequences')
parser.add_argument(dest='input_fastp', help='Reference fasta or fastp')
parser.add_argument('input_files', nargs='+',
help='Reads to be swept and sorted')
parser.add_argument('-f', '--force', default=False, action='store_true',
help='Overwrite output file if it exists')
return parser
def main():
info('sweep-reads-buffered.py', ['sweep'])
parser = get_parser()
args = parser.parse_args()
if args.max_tablesize < MAX_HSIZE:
args.max_tablesize = MAX_HSIZE
if args.ksize < MIN_KSIZE:
args.ksize = MIN_KSIZE
report_on_config(args, graphtype='nodegraph')
K = args.ksize
HT_SIZE = args.max_tablesize
N_HT = args.n_tables
traversal_range = args.traversal_range
input_fastp = args.input_fastp
if not args.outdir:
outdir = os.path.dirname(input_fastp)
else:
outdir = args.outdir
max_buffers = args.max_buffers
output_pref = args.output_prefix
buf_size = args.buffer_size
max_reads = args.max_reads
check_input_files(args.input_fastp, args.force)
check_valid_file_exists(args.input_files)
all_input_files = [input_fastp]
all_input_files.extend(args.input_files)
# Check disk space availability
check_space(all_input_files, args.force)
# figure out input file type (FA/FQ) -- based on first file
ix = iter(screed.open(args.input_files[0]))
record = next(ix)
del ix
extension = 'fa'
if hasattr(record, 'quality'): # fastq!
extension = 'fq'
output_buffer = ReadBufferManager(
max_buffers, max_reads, buf_size, output_pref, outdir, extension)
# consume the partitioned fasta with which to label the graph
ht = khmer.GraphLabels(K, HT_SIZE, N_HT)
try:
print('consuming input sequences...', file=sys.stderr)
if args.label_by_pid:
print('...labeling by partition id (pid)', file=sys.stderr)
ht.consume_partitioned_fasta_and_tag_with_labels(input_fastp)
elif args.label_by_seq:
print('...labeling by sequence', file=sys.stderr)
for n, record in enumerate(screed.open(input_fastp)):
if n % 50000 == 0:
print('...consumed {n} sequences...'.format(n=n), file=sys.stderr)
ht.consume_sequence_and_tag_with_labels(record.sequence, n)
else:
print('...labeling to create groups of size {s}'.format(
s=args.group_size), file=sys.stderr)
label = -1
g = 0
try:
outfp = open('{pref}_base_{g}.{ext}'.format(pref=output_pref,
g=g,
ext=extension
), 'wb')
for n, record in enumerate(screed.open(input_fastp)):
if n % args.group_size == 0:
label += 1
if label > g:
g = label
outfp = open('{pref}_base_{g}.{ext}'.format(
pref=output_pref, g=g,
ext=extension), 'wb')
if n % 50000 == 0:
print('...consumed {n} sequences...'.format(n=n), file=sys.stderr)
ht.consume_sequence_and_tag_with_labels(record.sequence,
label)
write_record(record, outfp)
except (IOError, OSError) as e:
print('!! ERROR !!', e, file=sys.stderr)
print('...error splitting input. exiting...', file=sys.stderr)
except (IOError, OSError) as e:
print('!! ERROR: !!', e, file=sys.stderr)
print('...error consuming \
{i}. exiting...'.format(i=input_fastp), file=sys.stderr)
print('done consuming input sequence. \
added {t} tags and {l} \
labels...'.format(t=ht.graph.n_tags(),
l=ht.n_labels()))
label_dict = defaultdict(int)
label_number_dist = []
n_orphaned = 0
n_labeled = 0
n_mlabeled = 0
total_t = time.clock()
start_t = time.clock()
for read_file in args.input_files:
print('** sweeping {read_file} for labels...'.format(
read_file=read_file), file=sys.stderr)
file_t = 0.0
try:
read_fp = screed.open(read_file)
except (IOError, OSError) as error:
print('!! ERROR: !!', error, file=sys.stderr)
print('*** Could not open {fn}, skipping...'.format(
fn=read_file), file=sys.stderr)
else:
for _, record in enumerate(read_fp):
if _ % 50000 == 0:
end_t = time.clock()
batch_t = end_t - start_t
file_t += batch_t
print('\tswept {n} reads [{nc} labeled, \
{no} orphaned] \
** {sec}s ({sect}s total)' \
.format(n=_, nc=n_labeled,
no=n_orphaned,
sec=batch_t, sect=file_t), file=sys.stderr)
start_t = time.clock()
seq = record.sequence
name = record.name
try:
labels = ht.sweep_label_neighborhood(seq, traversal_range)
except ValueError as e:
pass
else:
if hasattr(record, 'quality'):
seq_str = fmt_fastq(name, seq, record.quality, labels)
else:
seq_str = fmt_fasta(name, seq, labels)
label_number_dist.append(len(labels))
if labels:
n_labeled += 1
if len(labels) > 1:
output_buffer.queue(seq_str, 'multi')
n_mlabeled += 1
label_dict['multi'] += 1
else:
output_buffer.queue(seq_str, labels[0])
label_dict[labels[0]] += 1
else:
n_orphaned += 1
output_buffer.queue(seq_str, 'orphaned')
label_dict['orphaned'] += 1
print('** End of file {fn}...'.format(fn=read_file), file=sys.stderr)
output_buffer.flush_all()
read_fp.close()
# gotta output anything left in the buffers at the end!
print('** End of run...', file=sys.stderr)
output_buffer.flush_all()
total_t = time.clock() - total_t
if output_buffer.num_write_errors > 0 or output_buffer.num_file_errors > 0:
print('! WARNING: Sweep finished with errors !', file=sys.stderr)
print('** {writee} reads not written'.format(
writee=output_buffer.num_write_errors), file=sys.stderr)
print('** {filee} errors opening files'.format(
filee=output_buffer.num_file_errors), file=sys.stderr)
print('swept {n_reads} for labels...'.format(
n_reads=n_labeled + n_orphaned), file=sys.stderr)
print('...with {nc} labeled and {no} orphaned'.format(
nc=n_labeled, no=n_orphaned), file=sys.stderr)
print('...and {nmc} multilabeled'.format(nmc=n_mlabeled), file=sys.stderr)
print('** outputting label number distribution...', file=sys.stderr)
fn = os.path.join(outdir, '{pref}.dist.txt'.format(pref=output_pref))
with open(fn, 'w', encoding='utf-8') as outfp:
for nc in label_number_dist:
outfp.write('{nc}\n'.format(nc=nc))
fn = os.path.join(outdir, '{pref}.counts.csv'.format(pref=output_pref))
print('** outputting label read counts...', file=sys.stderr)
with open(fn, 'w', encoding='utf-8') as outfp:
for k in label_dict:
outfp.write('{l},{c}\n'.format(l=k, c=label_dict[k]))
if __name__ == '__main__':
main()
|
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
import logging
logger = logging.getLogger(__name__)
import time
from matplotlib import pyplot as plt
from collections import deque, defaultdict
"""
roi.py is a benchmark to evaluate the speed difference between:
Option 1:
1. Create binary mask where the roi is True and everything else is False
2. In-place multiply binary mask with data set.
3. Sum masked array
Option 2:
1. Generate list of roi coordinates
a. 1-D [x1, x2, x3 ...]
b. 2-D [(x1, y1), (x2, y2), ... ]
c. 3-D [(x1, y1, z1), (x2, y2, z2), ... ]
2. Iterate over data sets and extract the relevant values at each coordinate
3. Sum coordinate values
"""
def get_bin_mask(dsize, roi_list):
bin_mask = np.zeros(dsize, dtype=bool)
for (x, y) in roi_list:
bin_mask[x, y] = True
return bin_mask
def option_1(data_list, roi_list, bin_mask, stat_func, make_bin_mask=True):
"""
Option 1:
1. Create binary mask where the roi is True and everything else is False
2. In-place multiply binary mask with data set.
3. Sum masked array
Parameters
----------
data : list
ndarray list
roi : list
coordinate list. len(roi[0]) == data[0].ndims
stat_func :
sum, avg, stddev, etc...
"""
if (make_bin_mask):
bin_mask = get_bin_mask(data_list[0].shape, roi_list)
roi = []
for data in data_list:
masked = np.multiply(data, bin_mask)
roi.append(stat_func(masked))
return roi
def option_1a(data_list, roi_list, bin_mask, stat_func, make_bin_mask=True):
"""
Option 1:
1. Create binary mask where the roi is True and everything else is False
2. In-place multiply binary mask with data set.
3. Sum masked array
Parameters
----------
data : list
ndarray list
roi : list
coordinate list. len(roi[0]) == data[0].ndims
stat_func :
sum, avg, stddev, etc...
"""
if(make_bin_mask):
bin_mask = get_bin_mask(data_list[0].shape, roi_list)
roi = deque()
for data in data_list:
roi.append(stat_func(data[bin_mask]))
return np.array(roi)
def option_2(data_list, roi_list, stat_func):
"""
Option 2:
1. Generate list of roi coordinates
a. 1-D [x1, x2, x3 ...]
b. 2-D [(x1, y1), (x2, y2), ... ]
c. 3-D [(x1, y1, z1), (x2, y2, z2), ... ]
2. Iterate over data sets and extract the relevant values at each coordinate
3. Sum coordinate values
Parameters
----------
data : list
ndarray list
roi : list
coordinate list. len(roi[0]) == data[0].ndims
stat_func :
sum, avg, stddev, etc...
"""
roi = []
for data_list in data_list:
cur_val = 0
for (x, y) in roi_list:
cur_val += data_list[x][y]
roi.append(cur_val)
return roi
def option_3(data_list, roi_list, stat_func):
data = np.asarray(data_list)
bin_mask = get_bin_mask(data.shape[1:], roi_list)
return stat_func(data * bin_mask, axis=tuple(range(1, data.ndim)))
def option_4(data_list, roi_list, stat_func):
data = np.asarray(data_list)
bin_mask = get_bin_mask(data.shape[1:], roi_list)
return stat_func(data[:, bin_mask], axis=1)
def datagen_2d(nx, ny, nz):
return [np.ones((nx, ny)) for j in range(nz)]
def get_2d_circle_coords(cx, cy, radius, nx, ny):
min_x = cx - radius
max_x = cx + radius
min_y = cx - radius
max_y = cx + radius
if min_x < 0:
min_x = 0
if max_x > nx:
max_x = nx
if min_y < 0:
min_y = 0
if max_y > ny:
max_y = ny
coords_list = []
for y in np.arange(min_y, max_y, 1):
y_rel = y - cy
for x in np.arange(min_x, max_x, 1):
x_rel = x - cx
len = np.sqrt(y_rel * y_rel + x_rel * x_rel)
if len < radius:
coords_list.append((x, y))
return coords_list
if __name__ == "__main__":
nx = 2048
ny = 2048
nz = 10
cx = nx / 2
cy = ny / 2
radius = 25
stat_func = np.sum
data_list = datagen_2d(nx, ny, nz)
roi_list = get_2d_circle_coords(cx, cy, radius, nx, ny)
print("Approx area of circle: {0}".format(len(roi_list)))
print("Computed area of circle: {0}".format(np.pi * radius * radius))
radii = np.arange(75, 5, -5)
cycles = 5
test_functions = [
{'color': 'r',
'func': option_1,
'kwargs': {'make_bin_mask': True},
'label': 'op1_make',
'ls': '--'},
{'color': 'r',
'func': option_1,
'kwargs': {'make_bin_mask': False},
'label': 'op1_pre',
'ls': '-'},
{'color': 'b',
'func': option_1a,
'kwargs': {'make_bin_mask': True},
'label': 'op1a_make',
'ls': '--'},
{'color': 'b',
'func': option_1a,
'kwargs': {'make_bin_mask': False},
'label': 'op1a_pre',
'ls': '-'},
{'color': 'k',
'func': option_3,
'kwargs': {},
'label': 'op3',
'ls': '-'},
{'color': 'g',
'func': option_4,
'kwargs': {},
'label': 'op4',
'ls': '-'}]
vals = defaultdict(list)
errs = defaultdict(list)
roi_pixels = []
for radius in radii:
roi_list = get_2d_circle_coords(cx, cy, radius, nx, ny)
roi_pixels.append(len(roi_list))
bin_mask = get_bin_mask(data_list[0].shape, roi_list)
for data, label_post_fix in zip((data_list, np.asarray(data_list)),
('_list', '_array')):
for test_dict in test_functions:
# un-pack the useful stuff
label = test_dict['label'] + label_post_fix
t_kw = test_dict['kwargs']
tf = test_dict['func']
time_deque = deque()
# special case option 1
if 'make_bin_mask' in t_kw:
t_kw['bin_mask'] = bin_mask
# loop over number of cycles
for _ in range(cycles):
# get the time
t1 = time.time()
# run the function
res = tf(data_list=data,
roi_list=roi_list, stat_func=stat_func,
**t_kw)
# get the time after
t2 = time.time()
# record the delta
time_deque.append(t2 - t1)
# compute the statistics
vals[label].append(np.mean(time_deque))
errs[label].append(np.std(time_deque))
# do plotting
fig, ax = plt.subplots(1, 1)
for test_dict in test_functions:
c = test_dict['color']
ls = test_dict['ls']
for lw, post_fix in zip((1, 4), ('_list', '_array')):
label = test_dict['label'] + post_fix
ax.errorbar(roi_pixels, vals[label],
yerr=errs[label],
label=label, color=c, linestyle=ls, lw=lw)
ax.legend(loc='upper right')
ax.set_xlabel("Number of pixels in ROI")
ax.set_ylabel("Average time for {0} cycles (s)".format(cycles))
plt.show()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
endpoint for heat AWS-compatible CloudWatch API
"""
from heat.api.aws import exception
from heat.api.aws import utils as api_utils
from heat.common import wsgi
from heat.common import policy
from heat.common import exception as heat_exception
from heat.rpc import client as rpc_client
from heat.rpc import api as engine_api
import heat.openstack.common.rpc.common as rpc_common
from heat.openstack.common import log as logging
logger = logging.getLogger(__name__)
class WatchController(object):
"""
WSGI controller for CloudWatch resource in heat API
Implements the API actions
"""
def __init__(self, options):
self.options = options
self.engine_rpcapi = rpc_client.EngineClient()
self.policy = policy.Enforcer(scope='cloudwatch')
def _enforce(self, req, action):
"""Authorize an action against the policy.json."""
try:
self.policy.enforce(req.context, action, {})
except heat_exception.Forbidden:
raise exception.HeatAccessDeniedError("Action %s not allowed " %
action + "for user")
except Exception as ex:
# We expect policy.enforce to either pass or raise Forbidden
# however, if anything else happens, we want to raise
# HeatInternalFailureError, failure to do this results in
# the user getting a big stacktrace spew as an API response
raise exception.HeatInternalFailureError("Error authorizing " +
"action %s" % action)
@staticmethod
def _reformat_dimensions(dims):
'''
Reformat dimensions list into AWS API format
Parameter dims is a list of dicts
'''
newdims = []
for count, d in enumerate(dims, 1):
for key in d.keys():
newdims.append({'Name': key, 'Value': d[key]})
return newdims
def delete_alarms(self, req):
"""
Implements DeleteAlarms API action
"""
self._enforce(req, 'DeleteAlarms')
return exception.HeatAPINotImplementedError()
def describe_alarm_history(self, req):
"""
Implements DescribeAlarmHistory API action
"""
self._enforce(req, 'DescribeAlarmHistory')
return exception.HeatAPINotImplementedError()
def describe_alarms(self, req):
"""
Implements DescribeAlarms API action
"""
self._enforce(req, 'DescribeAlarms')
def format_metric_alarm(a):
"""
Reformat engine output into the AWS "MetricAlarm" format
"""
keymap = {
engine_api.WATCH_ACTIONS_ENABLED: 'ActionsEnabled',
engine_api.WATCH_ALARM_ACTIONS: 'AlarmActions',
engine_api.WATCH_TOPIC: 'AlarmArn',
engine_api.WATCH_UPDATED_TIME:
'AlarmConfigurationUpdatedTimestamp',
engine_api.WATCH_DESCRIPTION: 'AlarmDescription',
engine_api.WATCH_NAME: 'AlarmName',
engine_api.WATCH_COMPARISON: 'ComparisonOperator',
engine_api.WATCH_DIMENSIONS: 'Dimensions',
engine_api.WATCH_PERIODS: 'EvaluationPeriods',
engine_api.WATCH_INSUFFICIENT_ACTIONS:
'InsufficientDataActions',
engine_api.WATCH_METRIC_NAME: 'MetricName',
engine_api.WATCH_NAMESPACE: 'Namespace',
engine_api.WATCH_OK_ACTIONS: 'OKActions',
engine_api.WATCH_PERIOD: 'Period',
engine_api.WATCH_STATE_REASON: 'StateReason',
engine_api.WATCH_STATE_REASON_DATA: 'StateReasonData',
engine_api.WATCH_STATE_UPDATED_TIME: 'StateUpdatedTimestamp',
engine_api.WATCH_STATE_VALUE: 'StateValue',
engine_api.WATCH_STATISTIC: 'Statistic',
engine_api.WATCH_THRESHOLD: 'Threshold',
engine_api.WATCH_UNIT: 'Unit'}
# AWS doesn't return StackId in the main MetricAlarm
# structure, so we add StackId as a dimension to all responses
a[engine_api.WATCH_DIMENSIONS].append({'StackId':
a[engine_api.WATCH_STACK_ID]
})
# Reformat dimensions list into AWS API format
a[engine_api.WATCH_DIMENSIONS] = self._reformat_dimensions(
a[engine_api.WATCH_DIMENSIONS])
return api_utils.reformat_dict_keys(keymap, a)
con = req.context
parms = dict(req.params)
try:
name = parms['AlarmName']
except KeyError:
name = None
try:
watch_list = self.engine_rpcapi.show_watch(con, watch_name=name)
except rpc_common.RemoteError as ex:
return exception.map_remote_error(ex)
res = {'MetricAlarms': [format_metric_alarm(a)
for a in watch_list]}
result = api_utils.format_response("DescribeAlarms", res)
return result
def describe_alarms_for_metric(self, req):
"""
Implements DescribeAlarmsForMetric API action
"""
self._enforce(req, 'DescribeAlarmsForMetric')
return exception.HeatAPINotImplementedError()
def disable_alarm_actions(self, req):
"""
Implements DisableAlarmActions API action
"""
self._enforce(req, 'DisableAlarmActions')
return exception.HeatAPINotImplementedError()
def enable_alarm_actions(self, req):
"""
Implements EnableAlarmActions API action
"""
self._enforce(req, 'EnableAlarmActions')
return exception.HeatAPINotImplementedError()
def get_metric_statistics(self, req):
"""
Implements GetMetricStatistics API action
"""
self._enforce(req, 'GetMetricStatistics')
return exception.HeatAPINotImplementedError()
def list_metrics(self, req):
"""
Implements ListMetrics API action
Lists metric datapoints associated with a particular alarm,
or all alarms if none specified
"""
self._enforce(req, 'ListMetrics')
def format_metric_data(d, fil={}):
"""
Reformat engine output into the AWS "Metric" format
Takes an optional filter dict, which is traversed
so a metric dict is only returned if all keys match
the filter dict
"""
dimensions = [
{'AlarmName': d[engine_api.WATCH_DATA_ALARM]},
{'Timestamp': d[engine_api.WATCH_DATA_TIME]}
]
for key in d[engine_api.WATCH_DATA]:
dimensions.append({key: d[engine_api.WATCH_DATA][key]})
newdims = self._reformat_dimensions(dimensions)
result = {
'MetricName': d[engine_api.WATCH_DATA_METRIC],
'Dimensions': newdims,
'Namespace': d[engine_api.WATCH_DATA_NAMESPACE],
}
for f in fil:
try:
value = result[f]
if value != fil[f]:
# Filter criteria not met, return None
return
except KeyError:
logger.warning("Invalid filter key %s, ignoring" % f)
return result
con = req.context
parms = dict(req.params)
# FIXME : Don't yet handle filtering by Dimensions
filter_result = dict((k, v) for (k, v) in parms.iteritems() if k in
("MetricName", "Namespace"))
logger.debug("filter parameters : %s" % filter_result)
try:
# Engine does not currently support query by namespace/metric
# so we pass None/None and do any filtering locally
null_kwargs = {'metric_namespace': None,
'metric_name': None}
watch_data = self.engine_rpcapi.show_watch_metric(con,
**null_kwargs)
except rpc_common.RemoteError as ex:
return exception.map_remote_error(ex)
res = {'Metrics': []}
for d in watch_data:
metric = format_metric_data(d, filter_result)
if metric:
res['Metrics'].append(metric)
result = api_utils.format_response("ListMetrics", res)
return result
def put_metric_alarm(self, req):
"""
Implements PutMetricAlarm API action
"""
self._enforce(req, 'PutMetricAlarm')
return exception.HeatAPINotImplementedError()
def put_metric_data(self, req):
"""
Implements PutMetricData API action
"""
self._enforce(req, 'PutMetricData')
con = req.context
parms = dict(req.params)
namespace = api_utils.get_param_value(parms, 'Namespace')
# Extract data from the request so we can pass it to the engine
# We have to do this in two passes, because the AWS
# query format nests the dimensions within the MetricData
# query-parameter-list (see AWS PutMetricData docs)
# extract_param_list gives a list-of-dict, which we then
# need to process (each dict) for dimensions
metric_data = api_utils.extract_param_list(parms, prefix='MetricData')
if not len(metric_data):
logger.error("Request does not contain required MetricData")
return exception.HeatMissingParameterError("MetricData list")
watch_name = None
dimensions = []
for p in metric_data:
dimension = api_utils.extract_param_pairs(p,
prefix='Dimensions',
keyname='Name',
valuename='Value')
if 'AlarmName' in dimension:
watch_name = dimension['AlarmName']
else:
dimensions.append(dimension)
# Extract the required data from the metric_data
# and format dict to pass to engine
data = {'Namespace': namespace,
api_utils.get_param_value(metric_data[0], 'MetricName'): {
'Unit': api_utils.get_param_value(metric_data[0], 'Unit'),
'Value': api_utils.get_param_value(metric_data[0],
'Value'),
'Dimensions': dimensions}}
try:
self.engine_rpcapi.create_watch_data(con, watch_name, data)
except rpc_common.RemoteError as ex:
return exception.map_remote_error(ex)
result = {'ResponseMetadata': None}
return api_utils.format_response("PutMetricData", result)
def set_alarm_state(self, req):
"""
Implements SetAlarmState API action
"""
self._enforce(req, 'SetAlarmState')
# Map from AWS state names to those used in the engine
state_map = {'OK': engine_api.WATCH_STATE_OK,
'ALARM': engine_api.WATCH_STATE_ALARM,
'INSUFFICIENT_DATA': engine_api.WATCH_STATE_NODATA}
con = req.context
parms = dict(req.params)
# Get mandatory parameters
name = api_utils.get_param_value(parms, 'AlarmName')
state = api_utils.get_param_value(parms, 'StateValue')
if state not in state_map:
logger.error("Invalid state %s, expecting one of %s" %
(state, state_map.keys()))
return exception.HeatInvalidParameterValueError("Invalid state %s"
% state)
# Check for optional parameters
# FIXME : We don't actually do anything with these in the engine yet..
state_reason = None
state_reason_data = None
if 'StateReason' in parms:
state_reason = parms['StateReason']
if 'StateReasonData' in parms:
state_reason_data = parms['StateReasonData']
logger.debug("setting %s to %s" % (name, state_map[state]))
try:
self.engine_rpcapi.set_watch_state(con, watch_name=name,
state=state_map[state])
except rpc_common.RemoteError as ex:
return exception.map_remote_error(ex)
return api_utils.format_response("SetAlarmState", "")
def create_resource(options):
"""
Watch resource factory method.
"""
deserializer = wsgi.JSONRequestDeserializer()
return wsgi.Resource(WatchController(options), deserializer)
|
|
import ast
import builtins
from collections.abc import Mapping
import os
import sys
import threading
import warnings
import yaml
no_default = "__no_default__"
paths = [
os.getenv("DASK_ROOT_CONFIG", "/etc/dask"),
os.path.join(sys.prefix, "etc", "dask"),
os.path.join(os.path.expanduser("~"), ".config", "dask"),
os.path.join(os.path.expanduser("~"), ".dask"),
]
if "DASK_CONFIG" in os.environ:
PATH = os.environ["DASK_CONFIG"]
paths.append(PATH)
else:
PATH = os.path.join(os.path.expanduser("~"), ".config", "dask")
global_config = config = {}
config_lock = threading.Lock()
defaults = []
def canonical_name(k, config):
"""Return the canonical name for a key.
Handles user choice of '-' or '_' conventions by standardizing on whichever
version was set first. If a key already exists in either hyphen or
underscore form, the existing version is the canonical name. If neither
version exists the original key is used as is.
"""
try:
if k in config:
return k
except TypeError:
# config is not a mapping, return the same name as provided
return k
altk = k.replace("_", "-") if "_" in k else k.replace("-", "_")
if altk in config:
return altk
return k
def update(old, new, priority="new"):
""" Update a nested dictionary with values from another
This is like dict.update except that it smoothly merges nested values
This operates in-place and modifies old
Parameters
----------
priority: string {'old', 'new'}
If new (default) then the new dictionary has preference.
Otherwise the old dictionary does.
Examples
--------
>>> a = {'x': 1, 'y': {'a': 2}}
>>> b = {'x': 2, 'y': {'b': 3}}
>>> update(a, b) # doctest: +SKIP
{'x': 2, 'y': {'a': 2, 'b': 3}}
>>> a = {'x': 1, 'y': {'a': 2}}
>>> b = {'x': 2, 'y': {'b': 3}}
>>> update(a, b, priority='old') # doctest: +SKIP
{'x': 1, 'y': {'a': 2, 'b': 3}}
See Also
--------
dask.config.merge
"""
for k, v in new.items():
k = canonical_name(k, old)
if isinstance(v, Mapping):
if k not in old or old[k] is None:
old[k] = {}
update(old[k], v, priority=priority)
else:
if priority == "new" or k not in old:
old[k] = v
return old
def merge(*dicts):
""" Update a sequence of nested dictionaries
This prefers the values in the latter dictionaries to those in the former
Examples
--------
>>> a = {'x': 1, 'y': {'a': 2}}
>>> b = {'y': {'b': 3}}
>>> merge(a, b) # doctest: +SKIP
{'x': 1, 'y': {'a': 2, 'b': 3}}
See Also
--------
dask.config.update
"""
result = {}
for d in dicts:
update(result, d)
return result
def collect_yaml(paths=paths):
""" Collect configuration from yaml files
This searches through a list of paths, expands to find all yaml or json
files, and then parses each file.
"""
# Find all paths
file_paths = []
for path in paths:
if os.path.exists(path):
if os.path.isdir(path):
try:
file_paths.extend(
sorted(
[
os.path.join(path, p)
for p in os.listdir(path)
if os.path.splitext(p)[1].lower()
in (".json", ".yaml", ".yml")
]
)
)
except OSError:
# Ignore permission errors
pass
else:
file_paths.append(path)
configs = []
# Parse yaml files
for path in file_paths:
try:
with open(path) as f:
data = yaml.safe_load(f.read()) or {}
configs.append(data)
except (OSError, IOError):
# Ignore permission errors
pass
return configs
def collect_env(env=None):
""" Collect config from environment variables
This grabs environment variables of the form "DASK_FOO__BAR_BAZ=123" and
turns these into config variables of the form ``{"foo": {"bar-baz": 123}}``
It transforms the key and value in the following way:
- Lower-cases the key text
- Treats ``__`` (double-underscore) as nested access
- Calls ``ast.literal_eval`` on the value
"""
if env is None:
env = os.environ
d = {}
for name, value in env.items():
if name.startswith("DASK_"):
varname = name[5:].lower().replace("__", ".")
try:
d[varname] = ast.literal_eval(value)
except (SyntaxError, ValueError):
d[varname] = value
result = {}
set(d, config=result)
return result
def ensure_file(source, destination=None, comment=True):
"""
Copy file to default location if it does not already exist
This tries to move a default configuration file to a default location if
if does not already exist. It also comments out that file by default.
This is to be used by downstream modules (like dask.distributed) that may
have default configuration files that they wish to include in the default
configuration path.
Parameters
----------
source : string, filename
Source configuration file, typically within a source directory.
destination : string, directory
Destination directory. Configurable by ``DASK_CONFIG`` environment
variable, falling back to ~/.config/dask.
comment : bool, True by default
Whether or not to comment out the config file when copying.
"""
if destination is None:
destination = PATH
# destination is a file and already exists, never overwrite
if os.path.isfile(destination):
return
# If destination is not an existing file, interpret as a directory,
# use the source basename as the filename
directory = destination
destination = os.path.join(directory, os.path.basename(source))
try:
if not os.path.exists(destination):
os.makedirs(directory, exist_ok=True)
# Atomically create destination. Parallel testing discovered
# a race condition where a process can be busy creating the
# destination while another process reads an empty config file.
tmp = "%s.tmp.%d" % (destination, os.getpid())
with open(source) as f:
lines = list(f)
if comment:
lines = [
"# " + line if line.strip() and not line.startswith("#") else line
for line in lines
]
with open(tmp, "w") as f:
f.write("".join(lines))
try:
os.rename(tmp, destination)
except OSError:
os.remove(tmp)
except (IOError, OSError):
pass
class set(object):
""" Temporarily set configuration values within a context manager
Parameters
----------
arg : mapping or None, optional
A mapping of configuration key-value pairs to set.
**kwargs :
Additional key-value pairs to set. If ``arg`` is provided, values set
in ``arg`` will be applied before those in ``kwargs``.
Double-underscores (``__``) in keyword arguments will be replaced with
``.``, allowing nested values to be easily set.
Examples
--------
>>> import dask
Set ``'foo.bar'`` in a context, by providing a mapping.
>>> with dask.config.set({'foo.bar': 123}):
... pass
Set ``'foo.bar'`` in a context, by providing a keyword argument.
>>> with dask.config.set(foo__bar=123):
... pass
Set ``'foo.bar'`` globally.
>>> dask.config.set(foo__bar=123) # doctest: +SKIP
See Also
--------
dask.config.get
"""
def __init__(self, arg=None, config=config, lock=config_lock, **kwargs):
with lock:
self.config = config
self._record = []
if arg is not None:
for key, value in arg.items():
key = check_deprecations(key)
self._assign(key.split("."), value, config)
if kwargs:
for key, value in kwargs.items():
key = key.replace("__", ".")
key = check_deprecations(key)
self._assign(key.split("."), value, config)
def __enter__(self):
return self.config
def __exit__(self, type, value, traceback):
for op, path, value in reversed(self._record):
d = self.config
if op == "replace":
for key in path[:-1]:
d = d.setdefault(key, {})
d[path[-1]] = value
else: # insert
for key in path[:-1]:
try:
d = d[key]
except KeyError:
break
else:
d.pop(path[-1], None)
def _assign(self, keys, value, d, path=(), record=True):
"""Assign value into a nested configuration dictionary
Parameters
----------
keys : Sequence[str]
The nested path of keys to assign the value.
value : object
d : dict
The part of the nested dictionary into which we want to assign the
value
path : Tuple[str], optional
The path history up to this point.
record : bool, optional
Whether this operation needs to be recorded to allow for rollback.
"""
key = canonical_name(keys[0], d)
path = path + (key,)
if len(keys) == 1:
if record:
if key in d:
self._record.append(("replace", path, d[key]))
else:
self._record.append(("insert", path, None))
d[key] = value
else:
if key not in d:
if record:
self._record.append(("insert", path, None))
d[key] = {}
# No need to record subsequent operations after an insert
record = False
self._assign(keys[1:], value, d[key], path, record=record)
def collect(paths=paths, env=None):
"""
Collect configuration from paths and environment variables
Parameters
----------
paths : List[str]
A list of paths to search for yaml config files
env : dict
The system environment variables
Returns
-------
config: dict
See Also
--------
dask.config.refresh: collect configuration and update into primary config
"""
if env is None:
env = os.environ
configs = collect_yaml(paths=paths)
configs.append(collect_env(env=env))
return merge(*configs)
def refresh(config=config, defaults=defaults, **kwargs):
"""
Update configuration by re-reading yaml files and env variables
This mutates the global dask.config.config, or the config parameter if
passed in.
This goes through the following stages:
1. Clearing out all old configuration
2. Updating from the stored defaults from downstream libraries
(see update_defaults)
3. Updating from yaml files and environment variables
Note that some functionality only checks configuration once at startup and
may not change behavior, even if configuration changes. It is recommended
to restart your python process if convenient to ensure that new
configuration changes take place.
See Also
--------
dask.config.collect: for parameters
dask.config.update_defaults
"""
config.clear()
for d in defaults:
update(config, d, priority="old")
update(config, collect(**kwargs))
def get(key, default=no_default, config=config):
"""
Get elements from global config
Use '.' for nested access
Examples
--------
>>> from dask import config
>>> config.get('foo') # doctest: +SKIP
{'x': 1, 'y': 2}
>>> config.get('foo.x') # doctest: +SKIP
1
>>> config.get('foo.x.y', default=123) # doctest: +SKIP
123
See Also
--------
dask.config.set
"""
keys = key.split(".")
result = config
for k in keys:
k = canonical_name(k, result)
try:
result = result[k]
except (TypeError, IndexError, KeyError):
if default is not no_default:
return default
else:
raise
return result
def rename(aliases, config=config):
""" Rename old keys to new keys
This helps migrate older configuration versions over time
"""
old = []
new = {}
for o, n in aliases.items():
value = get(o, None, config=config)
if value is not None:
old.append(o)
new[n] = value
for k in old:
del config[canonical_name(k, config)] # TODO: support nested keys
set(new, config=config)
def update_defaults(new, config=config, defaults=defaults):
""" Add a new set of defaults to the configuration
It does two things:
1. Add the defaults to a global collection to be used by refresh later
2. Updates the global config with the new configuration
prioritizing older values over newer ones
"""
defaults.append(new)
update(config, new, priority="old")
def expand_environment_variables(config):
""" Expand environment variables in a nested config dictionary
This function will recursively search through any nested dictionaries
and/or lists.
Parameters
----------
config : dict, iterable, or str
Input object to search for environment variables
Returns
-------
config : same type as input
Examples
--------
>>> expand_environment_variables({'x': [1, 2, '$USER']}) # doctest: +SKIP
{'x': [1, 2, 'my-username']}
"""
if isinstance(config, Mapping):
return {k: expand_environment_variables(v) for k, v in config.items()}
elif isinstance(config, str):
return os.path.expandvars(config)
elif isinstance(config, (list, tuple, builtins.set)):
return type(config)([expand_environment_variables(v) for v in config])
else:
return config
deprecations = {
"fuse_ave_width": "optimization.fuse.ave-width",
"fuse_max_height": "optimization.fuse.max-height",
"fuse_max_width": "optimization.fuse.max-width",
"fuse_subgraphs": "optimization.fuse.subgraphs",
"fuse_rename_keys": "optimization.fuse.rename-keys",
"fuse_max_depth_new_edges": "optimization.fuse.max-depth-new-edges",
}
def check_deprecations(key: str, deprecations: dict = deprecations):
""" Check if the provided value has been renamed or removed
Parameters
----------
key : str
The configuration key to check
deprecations : Dict[str, str]
The mapping of aliases
Examples
--------
>>> deprecations = {"old_key": "new_key", "invalid": None}
>>> check_deprecations("old_key", deprecations=deprecations) # doctest: +SKIP
UserWarning: Configuration key "old_key" has been deprecated. Please use "new_key" instead.
>>> check_deprecations("invalid", deprecations=deprecations)
Traceback (most recent call last):
...
ValueError: Configuration value "invalid" has been removed
>>> check_deprecations("another_key", deprecations=deprecations)
'another_key'
Returns
-------
new: str
The proper key, whether the original (if no deprecation) or the aliased
value
"""
if key in deprecations:
new = deprecations[key]
if new:
warnings.warn(
'Configuration key "{}" has been deprecated. '
'Please use "{}" instead'.format(key, new)
)
return new
else:
raise ValueError('Configuration value "{}" has been removed'.format(key))
else:
return key
def _initialize():
fn = os.path.join(os.path.dirname(__file__), "dask.yaml")
ensure_file(source=fn)
with open(fn) as f:
_defaults = yaml.safe_load(f)
update_defaults(_defaults)
refresh()
_initialize()
|
|
import os
import py_compile
import shutil
import sys
import tempfile
import unittest
import zipfile
from test import test_support
from java.lang import Thread
import pkgutil
class ClasspathImporterTestCase(unittest.TestCase):
def setUp(self):
self.orig_context = Thread.currentThread().contextClassLoader
def tearDown(self):
Thread.currentThread().contextClassLoader = self.orig_context
# I don't like the checked in jar file bug1239.jar. The *one* thing I
# liked about the tests in bugtests/ is that you could create a java file,
# compile it, jar it and destroy the jar when done. Maybe when we move to
# JDK 6 and can use JSR-199 to do runtime compiling, we can go back to
# that. Anyway, see http://bugs.jython.org/issue1239. In short, jars added
# with sys.path.append where not getting scanned if they start with a top
# level package we already have, like the "org" in org.python.*
def test_bug1239(self):
with test_support.DirsOnSysPath("Lib/test/bug1239.jar"):
import org.test403javapackage.test403
# different from test_bug1239 in that only a Java package is imported, not
# a Java class. I'd also like to get rid of this checked in test jar.
def test_bug1126(self):
with test_support.DirsOnSysPath("Lib/test/bug1126/bug1126.jar"):
import org.subpackage
class PyclasspathImporterTestCase(unittest.TestCase):
RESOURCE_DATA = "Always look\non the bright side\r\nof life."
def setUp(self):
self.orig_context = Thread.currentThread().contextClassLoader
self.temp_dir = tempfile.mkdtemp()
self.modules = sys.modules.keys()
def tearDown(self):
Thread.currentThread().contextClassLoader = self.orig_context
for module in sys.modules.keys():
if module not in self.modules:
del sys.modules[module]
try:
shutil.rmtree(self.temp_dir)
except OSError:
# On Windows at least we cannot delete the open JAR
pass
def prepareJar(self, orig_jar):
# Create a new copy of the checked-in test jar
orig_jar = test_support.findfile(orig_jar)
jar = os.path.join(self.temp_dir, os.path.basename(orig_jar))
shutil.copy(orig_jar, jar)
return jar
def compileToJar(self, jar, compile_path=''):
# Add a compiled version of prefer_compiled.py to the jar
source = 'prefer_compiled.py'
code = os.path.join(self.temp_dir, source)
with open(code, 'w') as fp:
fp.write('compiled = True')
# Compile that file
py_compile.compile(code)
# Now add the compiled file to the jar
compiled = source.replace('.py', '$py.class')
with zipfile.ZipFile(jar, 'a') as zip:
zip.write(os.path.join(self.temp_dir, compiled),
os.path.join(compile_path, 'jar_pkg', compiled))
return compiled
def addResourceToJar(self, jar, package='jar_pkg'):
name = 'testdata.dat'
with zipfile.ZipFile(jar, 'a') as zip:
zip.writestr(package + '/' + name, self.RESOURCE_DATA)
return name
def checkImports(self, prefix, compiled):
import flat_in_jar
self.assertEquals(flat_in_jar.value, 7)
import jar_pkg
self.assertEquals(prefix + '/jar_pkg/__init__.py', jar_pkg.__file__)
from jar_pkg import prefer_compiled
self.assertEquals(prefix + '/jar_pkg/' + compiled, prefer_compiled.__file__)
self.assert_(prefer_compiled.compiled)
self.assertRaises(NameError, __import__, 'flat_bad')
self.assertRaises(NameError, __import__, 'jar_pkg.bad')
def test_default_pyclasspath(self):
jar = self.prepareJar('classimport.jar')
compiled = self.compileToJar(jar)
Thread.currentThread().contextClassLoader = test_support.make_jar_classloader(jar)
self.checkImports('__pyclasspath__', compiled)
def test_path_in_pyclasspath(self):
jar = self.prepareJar('classimport_Lib.jar')
compiled = self.compileToJar(jar, 'Lib')
Thread.currentThread().contextClassLoader = test_support.make_jar_classloader(jar)
with test_support.DirsOnSysPath():
sys.path = ['__pyclasspath__/Lib']
self.checkImports('__pyclasspath__/Lib', compiled)
def test_loader_is_package(self):
jar = self.prepareJar('classimport.jar')
Thread.currentThread().contextClassLoader = test_support.make_jar_classloader(jar)
mod_name = 'flat_in_jar'
loader = pkgutil.get_loader(mod_name)
self.assertFalse(loader.is_package(mod_name))
self.assertTrue(loader.is_package('jar_pkg'))
self.assertFalse(loader.is_package('jar_pkg.prefer_compiled'))
@unittest.skipIf(test_support.is_jython_posix, "FIXME: failing on Linux issue #2422")
def test_loader_get_code(self):
# Execute Python code out of the JAR
jar = self.prepareJar('classimport.jar')
Thread.currentThread().contextClassLoader = test_support.make_jar_classloader(jar)
loader = pkgutil.get_loader('jar_pkg')
space = { 'value':None, 'compiled':None}
# flat_in_jar contains the assignment value = 7
code = loader.get_code('flat_in_jar')
exec code in space
self.assertEquals(space['value'], 7)
# jar_pkg.prefer_compiled contains the assignment compiled = False
code = loader.get_code('jar_pkg.prefer_compiled')
exec code in space
self.assertEquals(space['compiled'], False)
# Compile a new one containing the assignment compiled = True
self.compileToJar(jar)
code = loader.get_code('jar_pkg.prefer_compiled')
exec code in space
self.assertEquals(space['compiled'], True)
def test_pkgutil_get_data(self):
# Test loader.get_data used via pkgutil
jar = self.prepareJar('classimport.jar')
name = self.addResourceToJar(jar)
Thread.currentThread().contextClassLoader = test_support.make_jar_classloader(jar)
data = pkgutil.get_data('jar_pkg', name)
self.assertIsInstance(data, bytes)
self.assertEqual(data, self.RESOURCE_DATA)
def test_loader_get_data(self):
# Test loader.get_data used via pkgutil.get_loader
jar = self.prepareJar('classimport.jar')
name = self.addResourceToJar(jar)
Thread.currentThread().contextClassLoader = test_support.make_jar_classloader(jar)
loader = pkgutil.get_loader('jar_pkg')
# path is a resource path (not file system path using os.path.sep)
path = 'jar_pkg/' + name
data = loader.get_data(path)
self.assertIsInstance(data, bytes)
self.assertEqual(data, self.RESOURCE_DATA)
def test_importer_get_data(self):
# Test loader.get_data used via pkgutil.get_importer
jar = self.prepareJar('classimport.jar')
name = self.addResourceToJar(jar)
Thread.currentThread().contextClassLoader = test_support.make_jar_classloader(jar)
importer = pkgutil.get_importer('__pyclasspath__/')
# path is a resource path (may be file system path using os.path.sep)
path = os.path.join('jar_pkg', name)
data = importer.get_data(path)
self.assertIsInstance(data, bytes)
self.assertEqual(data, self.RESOURCE_DATA)
# Check works a second time (stream use internal to implementation)
data = importer.get_data(path)
self.assertEqual(data, self.RESOURCE_DATA)
def test_importer_get_source(self):
# Test loader.get_source used via pkgutil.get_importer
jar = self.prepareJar('classimport.jar')
Thread.currentThread().contextClassLoader = test_support.make_jar_classloader(jar)
importer = pkgutil.get_importer('__pyclasspath__/')
# In package
mod = 'jar_pkg.prefer_compiled'
source = importer.get_source(mod)
self.assertIsInstance(source, bytes)
self.assertEqual(source, 'compiled = False\n')
def test_main():
test_support.run_unittest(
ClasspathImporterTestCase,
PyclasspathImporterTestCase
)
if __name__ == '__main__':
test_main()
|
|
#!/usr/bin/env python
import unittest
from test import test_support
import errno
import socket
import select
import thread, threading
import time
import traceback
import Queue
import sys
import os
import array
from weakref import proxy
import signal
HOST = test_support.HOST
MSG = 'Michael Gilfix was here\n'
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = test_support.bind_port(self.serv)
self.serv.listen(1)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = test_support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceeded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = Queue.Queue(1)
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
self.__setUp()
if not self.server_ready.is_set():
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if not self.queue.empty():
msg = self.queue.get()
self.fail(msg)
def clientRun(self, test_func):
self.server_ready.wait()
self.client_ready.set()
self.clientSetUp()
if not callable(test_func):
raise TypeError, "test_func must be a callable function"
try:
test_func()
except Exception, strerror:
self.queue.put(strerror)
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError, "clientSetUp must be implemented."
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SocketConnectedTest(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
def raise_error(*args, **kwargs):
raise socket.error
def raise_herror(*args, **kwargs):
raise socket.herror
def raise_gaierror(*args, **kwargs):
raise socket.gaierror
self.failUnlessRaises(socket.error, raise_error,
"Error raising socket exception.")
self.failUnlessRaises(socket.error, raise_herror,
"Error raising socket exception.")
self.failUnlessRaises(socket.error, raise_gaierror,
"Error raising socket exception.")
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except socket.error:
# Probably name lookup wasn't set up right; skip this test
return
self.assert_(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except socket.error:
# Probably a similar problem as above; skip this test
return
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
if hasattr(sys, "getrefcount"):
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except SystemError:
if sys.getrefcount(__name__) <> orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except socket.error:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1L<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1L<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1L, 2L, 3L ]
bad_values = [ -1, -2, -3, -1L, -2L, -3L ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if sys.platform in ('linux2', 'freebsd4', 'freebsd5', 'freebsd6',
'freebsd7', 'freebsd8', 'darwin'):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except socket.error:
pass
else:
raise socket.error
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf it it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except socket.error:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
def testIPv4toString(self):
if not hasattr(socket, 'inet_pton'):
return # No inet_pton() on this platform
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
self.assertEquals('\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEquals('\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEquals('\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEquals('\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEquals('\xff\xff\xff\xff', f('255.255.255.255'))
self.assertEquals('\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEquals('\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEquals('\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEquals('\xff\xff\xff\xff', g('255.255.255.255'))
def testIPv6toString(self):
if not hasattr(socket, 'inet_pton'):
return # No inet_pton() on this platform
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
return
except ImportError:
return
f = lambda a: inet_pton(AF_INET6, a)
self.assertEquals('\x00' * 16, f('::'))
self.assertEquals('\x00' * 16, f('0::0'))
self.assertEquals('\x00\x01' + '\x00' * 14, f('1::'))
self.assertEquals(
'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
def testStringToIPv4(self):
if not hasattr(socket, 'inet_ntop'):
return # No inet_ntop() on this platform
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
self.assertEquals('1.0.1.0', f('\x01\x00\x01\x00'))
self.assertEquals('170.85.170.85', f('\xaa\x55\xaa\x55'))
self.assertEquals('255.255.255.255', f('\xff\xff\xff\xff'))
self.assertEquals('1.2.3.4', f('\x01\x02\x03\x04'))
self.assertEquals('1.0.1.0', g('\x01\x00\x01\x00'))
self.assertEquals('170.85.170.85', g('\xaa\x55\xaa\x55'))
self.assertEquals('255.255.255.255', g('\xff\xff\xff\xff'))
def testStringToIPv6(self):
if not hasattr(socket, 'inet_ntop'):
return # No inet_ntop() on this platform
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
return
except ImportError:
return
f = lambda a: inet_ntop(AF_INET6, a)
self.assertEquals('::', f('\x00' * 16))
self.assertEquals('::1', f('\x00' * 15 + '\x01'))
self.assertEquals(
'aef:b01:506:1001:ffff:9997:55:170',
f('\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname(). Use a temporary socket to elicit an unused
# ephemeral port that we can use later in the test.
tempsock = socket.socket()
tempsock.bind(("0.0.0.0", 0))
(host, port) = tempsock.getsockname()
tempsock.close()
del tempsock
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
my_ip_addr = socket.gethostbyname(socket.gethostname())
self.assert_(name[0] in ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.failIf(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.failIf(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(socket.error, sock.send, "spam")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_sock_ioctl(self):
if os.name != "nt":
return
self.assert_(hasattr(socket.socket, 'ioctl'))
self.assert_(hasattr(socket, 'SIO_RCVALL'))
self.assert_(hasattr(socket, 'RCVALL_ON'))
self.assert_(hasattr(socket, 'RCVALL_OFF'))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = ''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, 'f' * 2048)
def _testSendAll(self):
big_chunk = 'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
if not hasattr(socket, "fromfd"):
return # On Windows, this doesn't exist
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), '')
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(0)
start = time.time()
try:
self.serv.accept()
except socket.error:
pass
end = time.time()
self.assert_((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except socket.error:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except socket.error:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
bufsize = -1 # Use default buffer size
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
SocketConnectedTest.setUp(self)
self.serv_file = self.cli_conn.makefile('rb', self.bufsize)
def tearDown(self):
self.serv_file.close()
self.assert_(self.serv_file.closed)
self.serv_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.cli_file = self.serv_conn.makefile('wb')
def clientTearDown(self):
self.cli_file.close()
self.assert_(self.cli_file.closed)
self.cli_file = None
SocketConnectedTest.clientTearDown(self)
def testSmallRead(self):
# Performing small file read test
first_seg = self.serv_file.read(len(MSG)-3)
second_seg = self.serv_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, MSG)
def _testSmallRead(self):
self.cli_file.write(MSG)
self.cli_file.flush()
def testFullRead(self):
# read until EOF
msg = self.serv_file.read()
self.assertEqual(msg, MSG)
def _testFullRead(self):
self.cli_file.write(MSG)
self.cli_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = ''
while 1:
char = self.serv_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, MSG)
def _testUnbufferedRead(self):
self.cli_file.write(MSG)
self.cli_file.flush()
def testReadline(self):
# Performing file readline test
line = self.serv_file.readline()
self.assertEqual(line, MSG)
def _testReadline(self):
self.cli_file.write(MSG)
self.cli_file.flush()
def testReadlineAfterRead(self):
a_baloo_is = self.serv_file.read(len("A baloo is"))
self.assertEqual("A baloo is", a_baloo_is)
_a_bear = self.serv_file.read(len(" a bear"))
self.assertEqual(" a bear", _a_bear)
line = self.serv_file.readline()
self.assertEqual("\n", line)
line = self.serv_file.readline()
self.assertEqual("A BALOO IS A BEAR.\n", line)
line = self.serv_file.readline()
self.assertEqual(MSG, line)
def _testReadlineAfterRead(self):
self.cli_file.write("A baloo is a bear\n")
self.cli_file.write("A BALOO IS A BEAR.\n")
self.cli_file.write(MSG)
self.cli_file.flush()
def testReadlineAfterReadNoNewline(self):
end_of_ = self.serv_file.read(len("End Of "))
self.assertEqual("End Of ", end_of_)
line = self.serv_file.readline()
self.assertEqual("Line", line)
def _testReadlineAfterReadNoNewline(self):
self.cli_file.write("End Of Line")
def testClosedAttr(self):
self.assert_(not self.serv_file.closed)
def _testClosedAttr(self):
self.assert_(not self.cli_file.closed)
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that httplib relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.serv_file.readline() # first line
self.assertEqual(line, "A. " + MSG) # first line
self.serv_file = self.cli_conn.makefile('rb', 0)
line = self.serv_file.readline() # second line
self.assertEqual(line, "B. " + MSG) # second line
def _testUnbufferedReadline(self):
self.cli_file.write("A. " + MSG)
self.cli_file.write("B. " + MSG)
self.cli_file.flush()
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
def testWithoutServer(self):
port = test_support.find_unused_port()
self.failUnlessRaises(
socket.error,
lambda: socket.create_connection((HOST, port))
)
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.family, 2)
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assert_(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
finally:
socket.setdefaulttimeout(None)
self.assertEquals(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assert_(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
time.sleep(3)
conn.send("done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, "done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.failUnlessRaises(socket.timeout, lambda: sock.recv(5))
class Urllib2FileobjectTest(unittest.TestCase):
# urllib2.HTTPHandler has "borrowed" socket._fileobject, and requires that
# it close the socket if the close c'tor argument is true
def testClose(self):
class MockSocket:
closed = False
def flush(self): pass
def close(self): self.closed = True
# must not close unless we request it: the original use of _fileobject
# by module socket requires that the underlying socket not be closed until
# the _socketobject that created the _fileobject is closed
s = MockSocket()
f = socket._fileobject(s)
f.close()
self.assert_(not s.closed)
s = MockSocket()
f = socket._fileobject(s, close=True)
f.close()
self.assert_(s.closed)
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.failUnlessRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except socket.error:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
if not hasattr(signal, "alarm"):
return # can only test on *nix
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketTCPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.failUnlessRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except socket.error:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assert_(issubclass(socket.error, Exception))
self.assert_(issubclass(socket.herror, socket.error))
self.assert_(issubclass(socket.gaierror, socket.error))
self.assert_(issubclass(socket.timeout, socket.error))
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = "\x00python-test-hello\x00\xff"
s1 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s1.bind(address)
s1.listen(1)
s2 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s2.connect(s1.getsockname())
s1.accept()
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = "\x00" + "h" * (self.UNIX_PATH_MAX - 1)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.assertRaises(socket.error, s.bind, address)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvInto(self):
buf = array.array('c', ' '*1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf.tostring()[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvInto(self):
buf = buffer(MSG)
self.serv_conn.send(buf)
def testRecvFromInto(self):
buf = array.array('c', ' '*1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf.tostring()[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromInto(self):
buf = buffer(MSG)
self.serv_conn.send(buf)
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
if test_support.verbose:
print "TIPC module is not loaded, please 'sudo modprobe tipc'"
return False
class TIPCTest (unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + (TIPC_UPPER - TIPC_LOWER) / 2, 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
class TIPCThreadableTest (unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen(5)
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
def clientSetUp(self):
# The is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + (TIPC_UPPER - TIPC_LOWER) / 2, 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2]
if sys.platform != 'mac':
tests.extend([ BasicUDPTest, UDPTimeoutTest ])
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
Urllib2FileobjectTest,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
])
if hasattr(socket, "socketpair"):
tests.append(BasicSocketPairTest)
if sys.platform == 'linux2':
tests.append(TestLinuxAbstractNamespace)
if isTipcAvailable():
tests.append(TIPCTest)
tests.append(TIPCThreadableTest)
thread_info = test_support.threading_setup()
test_support.run_unittest(*tests)
test_support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
|
# -*- coding: utf-8 -*-
"""
A program to run multiple Spigelman's Monster simulations in order for
parametric analysis
"""
import os, time, ast, importlib, pylab
import numpy as np
import spiegelman
import parameters as defaults
from matplotlib.collections import LineCollection
from matplotlib.colors import colorConverter as colourConverter
def multipleRuns(number, inputs = None):
sim_folder = 'Sim_' + time.strftime('%d%m%y_%H%M%S')
os.mkdir(sim_folder)
sims_list = list()
for N in range(number):
sims_list.append(spiegelman.go(inputs, 0,
sim_folder + '/' + str(N) + '.SIMHIST'))
return sims_list
# temp class
class pSet(object):
def __init__(self,p):
if isinstance(p, pSet):
self.parameters = dict(p.parameters)
else:
self.parameters = dict(p)
def set(self,key,value):
self.parameters[key] = type(value)(value)
def get(self, key):
return self.parameters[key]
def list(self):
return self.parameters
# interactive parameter runs
def parameterRuns():
importlib.reload(defaults)
sims_list = list()
print('Default Parameters:')
[print(s,':',defaults.parameters[s]) for s in defaults.parameters]
# extract information
totalRuns = int(input('How many runs are required? '))
print('Parameters Available:')
print([p for p in defaults.parameters])
param = input('Which Parameter would you like to change? ')
if not defaults.parameters.keys().__contains__(param):
print('Invalid parameter')
raise(KeyError)
elif isinstance(defaults.parameters[param], dict):
sub_params = [sub for sub in defaults.parameters[param]]
print('Sub-parameters to tune: ', sub_params)
prange = dict()
parameters = dict()
for n in sub_params:
prange[n] = input('Please enter the range for the sub-parameter '+ n+
' (seperate with ::) ').split('::')
if len(prange[n]) != 2:
print('Non standard ranges not yet implemented.')
raise(Exception)
parameters = [{n : (float(prange[n][0]) + k*(float(prange[n][1]) - \
float(prange[n][0]))/(totalRuns-1)) for \
n in sub_params} for k in range(totalRuns) ]
elif isinstance(defaults.parameters[param], (float,int)):
prange = input('Please enter the range of the parameter (separate with ::) ')
inputs = prange.count('::') + 1
prange = prange.split('::')
if inputs == totalRuns:
parameters = list()
for k in prange:
parameters.append(ast.literal_eval(k))
elif inputs == 2:
pMin = float(prange[0])
pMax = float(prange[1])
parameters = [pMin + k*(pMax-pMin)/(totalRuns-1) for k in range(totalRuns)]
else:
print('Format has not been implemented. Try MIN::MAX or give one arg/run')
raise(Exception)
elif isinstance(defaults.parameters[param], (list,tuple)):
print('Parameter is a tuple or list, not implemented yet')
raise(Exception)
else:
print('Unknown parameter Type')
raise(Exception)
# make parameter inputs
psSet = list()
for n in range(totalRuns):
df = defaults.parameters
df[param] = parameters[n]
psSet.append(pSet(dict(df)))
print('Simulations Made, now running')
# run sims
try:
sim_folder = 'Sim_' + time.strftime('%d%m%y_%H%M%S')
os.mkdir(sim_folder)
for n in range(totalRuns):
name = sim_folder + '/' + str(n) + '.SIMHIST'
sims_list.append(spiegelman.go(psSet[n], -1, name))
except Exception as ex:
print(ex)
finally:
print('Returning Parameter List and Simulation List')
return [psSet, sims_list]
"""
#def paramComparisons(paramOutput, comp_pnt = None, position = -1):
# pList = paramOutput[0]
# sims_list = paramOutput[1]
# if comp_pnt == None:
# print('Categories', list(sims_list[0].history.keys()))
# comp_pnt = input('What category to compare? ')
# if comp_pnt not in sims_list[0].history.keys():
# print('Key Error, Quitting')
# return
#
# ind_pnt = [k for k in pList[1].parameters
# if pList[1].parameters[k] != pList[0].parameters[k]]
# if len(ind_pnt) != 1:
# print('Parameters that Changed:', ind_pnt)
# print('All Parameters:', [k for k in pList[1].parameters])
# ind_pnt = input('Which parameter to index with? ')
# else:
# ind_pnt = ind_pnt[0]
#
# print('Comparing:', comp_pnt)
# indices = list()
# values = list()
# for n in sims_list:
# indices.append(pList[sims_list.index(n)].parameters[ind_pnt])
# comp_vals = n.history[comp_pnt][position]
# if isinstance(comp_vals, dict):
# values.append([comp_vals[k] for k in comp_vals])
# else:
# values.append(comp_vals)
# #print(indices[-1],values[-1])
#
# spiegelman.plt.plot(indices,values,'.')
# spiegelman.plt.xlabel(ind_pnt)
# spiegelman.plt.ylabel(comp_pnt)
# spiegelman.plt.title(comp_pnt + ' by ' + ind_pnt + ' in Iteration' + str(position))
#
#
# return [indices,values]
"""
def plotComparison(paramOutput, heat = False, comp_pnt = None):
pList = paramOutput[0]
sList = paramOutput[1]
multigraph = False
#determine output to compare
if comp_pnt == None:
print('Categories', list(sList[0].history.keys()))
comp_pnt = input('What category to compare? ')
if comp_pnt not in sList[0].history.keys():
print('Key Error, Quitting')
return
#determine parameter to compare by, and its values
ind_pnt = [k for k in pList[1].parameters
if pList[1].parameters[k] != pList[0].parameters[k]]
if (len(ind_pnt) != 1):
print('Parameters that Changed:', ind_pnt)
print('All Parameters:', [k for k in pList[1].parameters])
ind_pnt = input('Which parameter to index with? ')
indices = [p.parameters[ind_pnt] for p in pList]
elif (len(ind_pnt[0]) != 1):
print('Parameters that Changed:', ind_pnt[0])
print('Sub-Parameters:', [k for k in pList[0].parameters[ind_pnt[0]]
if (pList[0].parameters[ind_pnt[0]][k] !=
pList[1].parameters[ind_pnt[0]][k])])
sub_ind = input('Which sub-parameter to index with? ')
indices = [p.parameters[ind_pnt[0]][sub_ind] for p in pList]
ind_pnt = ind_pnt[0] + '.' + sub_ind
else:
ind_pnt = ind_pnt[0]
print('Parameter: ', ind_pnt)
indices = [p.parameters[ind_pnt] for p in pList]
#determine values
epochs = [range(s.parameters['Cycles']) for s in sList]
if isinstance(sList[0].history[comp_pnt][0],dict):
values = [[[k[j] for j in k] for k in s.history[comp_pnt]] for s in sList]
multigraph = True
else:
values = [s.history[comp_pnt] for s in sList]
if heat == True:
heatMap(comp_pnt, ind_pnt, indices, epochs, values)
return
#determine plot type
if comp_pnt == 'Lengths':
#use hist
print('Histogram not implemented yet')
return
elif multigraph:
print('Multigraph not yet implemented')
return
else:
vert = [list(zip(epochs[values.index(v)],v)) for v in values]
fig = spiegelman.plt.figure()
ax = spiegelman.Axes3D(fig)
colours = [colourConverter.to_rgba(i) for i in 'bgrcmyk']
lines = LineCollection(vert, colors=colours)
lines.set_alpha(0.7)
lines.set_linewidth(1.5)
ax.add_collection3d(lines, zs=indices, zdir='y')
ax.set_xlabel('Epoch')
ax.set_ylabel(ind_pnt)
ax.set_zlabel(comp_pnt)
ax.set_xlim3d(min([i for j in epochs for i in j]), max([i for j in epochs for i in j]))
ax.set_ylim3d(min(indices),max(indices))
ax.set_zlim3d(min([i for j in values for i in j]), max([i for j in values for i in j]))
spiegelman.plt.show()
return [indices,vert]
def getOutputs(folder):
start = time.time()
outputs = list([list(),list()])
for fileName in os.listdir(folder):
if fileName[-8:] == '.SIMHIST':
outputs[1].append(spiegelman.SpSim(folder+'/'+fileName))
outputs[0].append(pSet(outputs[1][-1].parameters))
print('Outputs Obtained in', round(time.time() - start, 3), 's')
return outputs
def customRun(specs = None):
if isinstance(specs,(pSet, str, spiegelman.SpSim)):
s = spiegelman.go(specs)
else:
print('Custom Run, manually changing parameters')
ps = dict()
for n in defaults.parameters:
print('Changing', n,'\nDefault:', defaults.parameters[n])
got = input('Please enter the value for this parameter: ')
ps[n] = ast.literal_eval(got) if got != str() else defaults.parameters[n]
s = spiegelman.go(pSet(ps))
return s
def heatMap(comp_pnt, ind_pnt, indices, epochs, values):
spiegelman.plt.close()
indices = np.tile(np.array(indices),(len(epochs[0]),1)).transpose()
epochs = np.array(epochs)
values = np.array(values)
spiegelman.plt.contourf(epochs,indices,values)
spiegelman.plt.colorbar()
return
def heatDists(outs,folder):
try:
os.mkdir(folder+'/Dists')
except Exception as ex:
print(ex)
for i in outs[1]:
i.plotting3('h', heat=True)
fm = spiegelman.plt.get_current_fig_manager()
fm.window.showMaximized()
pylab.savefig(folder + '/Dists/'+ str(outs[1].index(i)) + '.png')
def cull_runs(ns = None, lims = None, plot = False):
if ns == None:
ns = int(input('Enter number of runs: '))
if lims == None:
llim = ast.literal_eval(input('Enter lower bound: '))
ulim = ast.literal_eval(input('Enter upper bound: '))
else:
llim = min(lims)
ulim = max(lims)
ps = np.transpose([np.linspace(llim[i],ulim[i], num=ns) for i in range(len(llim))])
ss = list()
outFolder = time.strftime('%d%m%y_%H%M')
os.mkdir(outFolder)
for p in ps:
def cullfn(x):
return spiegelman.cull_function.cull_function(x,None,p)
print('p = ', p)
out = outFolder + '/' + time.strftime('%d%m%y_%H%M%S.SIMHIST')
ss.append(spiegelman.SpSim())
ss[-1].parameters['CullParameter'] = tuple(p)
ss[-1].run(cull = cullfn)
ss[-1].export_to(out)
if plot:
cplotting(ss, outFolder)
return ss
def cplotting(ss, outFolder=None):
if outFolder == None:
try:
os.mkdir('Temp')
finally:
outFolder = 'Temp/'
os.mkdir(outFolder+'/Pics')
cull_plot(ss)
spiegelman.plt.savefig(outFolder+'/Pics/cplot.png')
for s in ss:
N = str(ss.index(s))
for g in ('a','p','u%','n'):
s.plotting(g)
figM = spiegelman.plt.get_current_fig_manager()
figM.window.showMaximized()
spiegelman.plt.savefig(outFolder+'/Pics/'+N+g+'.png')
s.plotting3('h')
figM = spiegelman.plt.get_current_fig_manager()
figM.window.showMaximized()
spiegelman.plt.savefig(outFolder+'/Pics/'+N+'h.png')
def cull_plot(ss, cm = spiegelman.CM.RdBu):
averages = np.transpose([x.history['Average'] for x in ss])
try:
params = [np.sqrt(np.sum(q**2)) for x in ss for q in x.parameters['CullParameter']]
plabel = 'Normed Parameter Value'
except:
params = range(np.size(averages,1))
plabel = 'Parameter Number'
epochs = range(np.size(averages,0))
#return(averages,params,epochs)
spiegelman.plt.close()
extents = [params[0],params[-1],epochs[0],epochs[-1]]
f,ax = spiegelman.plt.subplots(1,1,figsize=(6,6))
im = ax.imshow(np.log10(averages), extent=extents, interpolation='None', cmap = cm, aspect='auto')
f.colorbar(im)
ax.set_title('Logged Average Lengths')
ax.set_xlabel(plabel)
ax.set_ylabel('Epoch')
|
|
# Author: Mathieu Blondel <[email protected]>
# Arnaud Joly <[email protected]>
# Maheshakya Wijewardena <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, RegressorMixin
from .utils import check_random_state
from .utils.validation import check_array
from .utils.validation import check_consistent_length
from .utils.random import random_choice_csc
from .utils.stats import _weighted_percentile
from .utils.multiclass import class_distribution
class DummyClassifier(BaseEstimator, ClassifierMixin):
"""
DummyClassifier is a classifier that makes predictions using simple rules.
This classifier is useful as a simple baseline to compare with other
(real) classifiers. Do not use it for real problems.
Read more in the :ref:`User Guide <dummy_estimators>`.
Parameters
----------
strategy : str
Strategy to use to generate predictions.
* "stratified": generates predictions by respecting the training
set's class distribution.
* "most_frequent": always predicts the most frequent label in the
training set.
* "prior": always predicts the class that maximizes the class prior
(like "most_frequent") and ``predict_proba`` returns the class prior.
* "uniform": generates predictions uniformly at random.
* "constant": always predicts a constant label that is provided by
the user. This is useful for metrics that evaluate a non-majority
class
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use.
constant : int or str or array of shape = [n_outputs]
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
Attributes
----------
classes_ : array or list of array of shape = [n_classes]
Class labels for each output.
n_classes_ : array or list of array of shape = [n_classes]
Number of label for each output.
class_prior_ : array or list of array of shape = [n_classes]
Probability of each class for each output.
n_outputs_ : int,
Number of outputs.
outputs_2d_ : bool,
True if the output at fit is 2d, else false.
`sparse_output_` : bool,
True if the array returned from predict is to be in sparse CSC format.
Is automatically set to True if the input y is passed in sparse format.
"""
def __init__(self, strategy="stratified", random_state=None,
constant=None):
self.strategy = strategy
self.random_state = random_state
self.constant = constant
def fit(self, X, y, sample_weight=None):
"""Fit the random classifier.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("most_frequent", "stratified", "uniform",
"constant", "prior"):
raise ValueError("Unknown strategy type.")
if self.strategy == "uniform" and sp.issparse(y):
y = y.toarray()
warnings.warn('A local copy of the target data has been converted '
'to a numpy array. Predicting on sparse target data '
'with the uniform strategy would not save memory '
'and would be slower.',
UserWarning)
self.sparse_output_ = sp.issparse(y)
if not self.sparse_output_:
y = np.atleast_1d(y)
self.output_2d_ = y.ndim == 2
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if self.strategy == "constant":
if self.constant is None:
raise ValueError("Constant target value has to be specified "
"when the constant strategy is used.")
else:
constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
if constant.shape[0] != self.n_outputs_:
raise ValueError("Constant target value should have "
"shape (%d, 1)." % self.n_outputs_)
(self.classes_,
self.n_classes_,
self.class_prior_) = class_distribution(y, sample_weight)
if (self.strategy == "constant" and
any(constant[k] not in self.classes_[k]
for k in range(self.n_outputs_))):
# Checking in case of constant strategy if the constant
# provided by the user is in y.
raise ValueError("The constant target value must be "
"present in training data")
if self.n_outputs_ == 1 and not self.output_2d_:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self.class_prior_ = self.class_prior_[0]
return self
def predict(self, X):
"""Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
# Compute probability only once
if self.strategy == "stratified":
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
proba = [proba]
if self.sparse_output_:
class_prob = None
if self.strategy in ("most_frequent", "prior"):
classes_ = [np.array([cp.argmax()]) for cp in class_prior_]
elif self.strategy == "stratified":
class_prob = class_prior_
elif self.strategy == "uniform":
raise ValueError("Sparse target prediction is not "
"supported with the uniform strategy")
elif self.strategy == "constant":
classes_ = [np.array([c]) for c in constant]
y = random_choice_csc(n_samples, classes_, class_prob,
self.random_state)
else:
if self.strategy in ("most_frequent", "prior"):
y = np.tile([classes_[k][class_prior_[k].argmax()] for
k in range(self.n_outputs_)], [n_samples, 1])
elif self.strategy == "stratified":
y = np.vstack(classes_[k][proba[k].argmax(axis=1)] for
k in range(self.n_outputs_)).T
elif self.strategy == "uniform":
ret = [classes_[k][rs.randint(n_classes_[k], size=n_samples)]
for k in range(self.n_outputs_)]
y = np.vstack(ret).T
elif self.strategy == "constant":
y = np.tile(self.constant, (n_samples, 1))
if self.n_outputs_ == 1 and not self.output_2d_:
y = np.ravel(y)
return y
def predict_proba(self, X):
"""
Return probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like or list of array-lke of shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically, for each
output.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1 and not self.output_2d_:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
P = []
for k in range(self.n_outputs_):
if self.strategy == "most_frequent":
ind = np.ones(n_samples, dtype=int) * class_prior_[k].argmax()
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
elif self.strategy == "prior":
out = np.ones((n_samples, 1)) * class_prior_[k]
elif self.strategy == "stratified":
out = rs.multinomial(1, class_prior_[k], size=n_samples)
elif self.strategy == "uniform":
out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
out /= n_classes_[k]
elif self.strategy == "constant":
ind = np.where(classes_[k] == constant[k])
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
P.append(out)
if self.n_outputs_ == 1 and not self.output_2d_:
P = P[0]
return P
def predict_log_proba(self, X):
"""
Return log probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like or list of array-like of shape = [n_samples, n_classes]
Returns the log probability of the sample for each class in
the model, where classes are ordered arithmetically for each
output.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
return [np.log(p) for p in proba]
class DummyRegressor(BaseEstimator, RegressorMixin):
"""
DummyRegressor is a regressor that makes predictions using
simple rules.
This regressor is useful as a simple baseline to compare with other
(real) regressors. Do not use it for real problems.
Read more in the :ref:`User Guide <dummy_estimators>`.
Parameters
----------
strategy : str
Strategy to use to generate predictions.
* "mean": always predicts the mean of the training set
* "median": always predicts the median of the training set
* "quantile": always predicts a specified quantile of the training set,
provided with the quantile parameter.
* "constant": always predicts a constant value that is provided by
the user.
constant : int or float or array of shape = [n_outputs]
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
quantile : float in [0.0, 1.0]
The quantile to predict using the "quantile" strategy. A quantile of
0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the
maximum.
Attributes
----------
constant_ : float or array of shape [n_outputs]
Mean or median or quantile of the training targets or constant value
given by the user.
n_outputs_ : int,
Number of outputs.
outputs_2d_ : bool,
True if the output at fit is 2d, else false.
"""
def __init__(self, strategy="mean", constant=None, quantile=None):
self.strategy = strategy
self.constant = constant
self.quantile = quantile
def fit(self, X, y, sample_weight=None):
"""Fit the random regressor.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("mean", "median", "quantile", "constant"):
raise ValueError("Unknown strategy type: %s, expected "
"'mean', 'median', 'quantile' or 'constant'"
% self.strategy)
y = check_array(y, ensure_2d=False)
if len(y) == 0:
raise ValueError("y must not be empty.")
self.output_2d_ = y.ndim == 2
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
check_consistent_length(X, y, sample_weight)
if self.strategy == "mean":
self.constant_ = np.average(y, axis=0, weights=sample_weight)
elif self.strategy == "median":
if sample_weight is None:
self.constant_ = np.median(y, axis=0)
else:
self.constant_ = [_weighted_percentile(y[:, k], sample_weight,
percentile=50.)
for k in range(self.n_outputs_)]
elif self.strategy == "quantile":
if self.quantile is None or not np.isscalar(self.quantile):
raise ValueError("Quantile must be a scalar in the range "
"[0.0, 1.0], but got %s." % self.quantile)
percentile = self.quantile * 100.0
if sample_weight is None:
self.constant_ = np.percentile(y, axis=0, q=percentile)
else:
self.constant_ = [_weighted_percentile(y[:, k], sample_weight,
percentile=percentile)
for k in range(self.n_outputs_)]
elif self.strategy == "constant":
if self.constant is None:
raise TypeError("Constant target value has to be specified "
"when the constant strategy is used.")
self.constant = check_array(self.constant,
accept_sparse=['csr', 'csc', 'coo'],
ensure_2d=False, ensure_min_samples=0)
if self.output_2d_ and self.constant.shape[0] != y.shape[1]:
raise ValueError(
"Constant target value should have "
"shape (%d, 1)." % y.shape[1])
self.constant_ = self.constant
self.constant_ = np.reshape(self.constant_, (1, -1))
return self
def predict(self, X):
"""
Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.
"""
if not hasattr(self, "constant_"):
raise ValueError("DummyRegressor not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples = X.shape[0]
y = np.ones((n_samples, 1)) * self.constant_
if self.n_outputs_ == 1 and not self.output_2d_:
y = np.ravel(y)
return y
|
|
"""
File: internals.py
Author: Ulf Krumnack
Email: [email protected]
Github: https://github.com/krumnack
"""
# standard imports
import importlib
import sys
import os
import re
# Qt imports
from PyQt5 import QtCore
from PyQt5.QtWidgets import QWidget, QGroupBox, QLabel, QPushButton
from PyQt5.QtWidgets import QGridLayout, QHBoxLayout, QVBoxLayout
from PyQt5.QtWidgets import QPlainTextEdit, QComboBox
from PyQt5.QtGui import QFontDatabase
# toolbox imports
from dltb.util.resource import Resource, ModuleResource
from dltb.util.hardware import cpus, gpus, cuda
from util import add_timer_callback
from toolbox import Toolbox
# GUI imports
from .panel import Panel
from ..utils import QObserver, QAttribute, protect
class ModuleInfo(QGroupBox, QObserver, qobservables={
# FIXME[hack]: check what we are really interested in ...
Resource: Resource.Change.all()}):
"""A Widget providing information about a module.
The Widget observes the :py:class:`ModuleResource` associated
with the module it provides information about. If the
state of this resource changes (e.g. if the module is imported),
the information will be updated.
"""
_resource: Resource = None
def __init__(self, resource: Resource = None, **kwargs) -> None:
super().__init__(**kwargs)
self._initGui()
self.setResource(resource)
def __del__(self):
self.setResource(None)
super().__del__()
def setResource(self, resource: Resource) -> None:
interests = Resource.Change('status_changed')
self._exchangeView('_resource', resource, interests=interests)
def resource_changed(self, resource: Resource,
change: Resource.Change) -> None:
if change.status_changed:
self.update()
def _initGui(self):
"""Create a :py:class:`QGroupBox` showing module information.
"""
layout = QVBoxLayout()
buttonLayout = QHBoxLayout()
self._importButton = QPushButton("Load")
self._importButton.clicked.connect(self._onImportButtonClicked)
buttonLayout.addWidget(self._importButton)
self._installButton = QPushButton("Install")
self._installButton.clicked.connect(self._onInstallButtonClicked)
buttonLayout.addWidget(self._installButton)
buttonLayout.addStretch()
self._nameLabel = QLabel()
self._versionLabel = QLabel()
self._libraryLabel = QLabel()
self._descriptionLabel = QLabel()
layout.addWidget(self._nameLabel)
layout.addLayout(buttonLayout)
layout.addWidget(self._versionLabel)
layout.addWidget(self._libraryLabel)
layout.addWidget(self._descriptionLabel)
layout.addStretch()
self.setLayout(layout)
@QtCore.pyqtSlot()
def _onImportButtonClicked(self):
self._resource.prepare()
@QtCore.pyqtSlot()
def _onInstallButtonClicked(self):
self._resource.install()
def update(self):
haveResource = self._resource is not None and bool(self._resource)
self.setTitle(self._resource.label if haveResource else "None")
self._installButton.setVisible(haveResource and
not self._resource.available)
self._importButton.setVisible(haveResource and
self._resource.available and
not self._resource.prepared)
if not haveResource:
self._nameLabel.setText("No module")
self._versionLabel.setText("")
self._libraryLabel.setText("")
self._descriptionLabel.setText("")
else:
self._nameLabel.setText(self._resource.module)
self._descriptionLabel.setText(self._resource.description)
if self._resource.prepared:
module = sys.modules[self._resource.module]
self._versionLabel.setText("Version: " +
self._resource.version)
self._libraryLabel.setText("Library: " +
module.__file__)
else:
self._versionLabel.setText("")
self._libraryLabel.setText("")
class QProcessInfo(QWidget, QObserver, qobservables={
Toolbox: {'processes_changed'}}):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._initUI()
def _initUI(self) -> None:
self._button = QPushButton("Test")
self._button.clicked.connect(self._onButtonClicked)
layout = QVBoxLayout()
layout.addWidget(self._button)
self.setLayout(layout)
@QtCore.pyqtSlot()
@protect
def _onButtonClicked(self):
self._toolbox.notify_process("Test")
def toolbox_changed(self, toolbox: Toolbox, info: Toolbox.Change) -> None:
pass # FIXME[todo]: implementation
class InternalsPanel(Panel, QAttribute, qattributes={Toolbox: False}):
"""A Panel displaying system internals.
May be of interest during development.
Attributes
----------
_modules: dict
A mapping from module names to module information.
This can be the acutal module (if already loaded),
or a string describing the state of the module
("not loaddd" or "not found"). This information
is initialized and updated by the method
:py:meth:_updateModules.
_moduleName: str = None
Graphical elements
------------------
_grid: QGridLayout
_moduleGrid: QGridLayout = None
"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._modules = {}
self._moduleName = None
self.initUI()
def initUI(self):
self._layout = QVBoxLayout()
self._grid = QGridLayout()
self._grid.addWidget(self.modulesInfo(), 0, 0)
self._grid.addLayout(self.systemInfo(), 0, 1)
self._layout.addLayout(self._grid)
self._info = QLabel("Info")
self._layout.addWidget(self._info)
self._processInfo = QProcessInfo()
self.addAttributePropagation(Toolbox, self._processInfo)
self._layout.addWidget(self._processInfo)
self.setLayout(self._layout)
#@QtCore.pyqtSlot()
@protect
def _onInfo(self, checked: bool = False):
sender = self.sender()
print(sender, type(sender), sender.text())
resource = Resource[sender.ID]
self.showInfo(ModuleInfo(resource=resource))
#@QtCore.pyqtSlot()
@protect
def _onUpdateModules(self, checked: bool = False) -> None:
self._updateModules()
def showInfo(self, info: QWidget):
"""Show a new info widget.
Arguments
---------
info: QWidget
The widget to be displayed in the info region of the
panel. This will replace the previously displayed
info widget.
"""
if self._layout.replaceWidget(self._info, info) is not None:
self._info.deleteLater()
self._info = info
def modulesInfo(self) -> QGroupBox:
"""Create a QGridLayout with two columns displaying module
information. The first column contains the module name, the
second column version (if loaded) or availability.
Modules are listed in the order given by :py:meth:modules.
Returns
------
box: QGroupBox
A QWidget displaying the module information.
"""
box = QGroupBox('Modules')
box.setMinimumWidth(300)
self._moduleGrid = QGridLayout()
self._moduleGrid.addWidget(QLabel("<b>Package</b>", self), 0,0)
self._moduleGrid.addWidget(QLabel("<b>Version</b>", self), 0,1)
for i,m in enumerate(ModuleResource):
button = QPushButton(m.label, self)
button.ID = m._id # FIXME[hack]
button.setFlat(True)
button.clicked.connect(self._onInfo)
self._moduleGrid.addWidget(button, 1+i, 0)
self._moduleGrid.addWidget(QLabel('', self), 1+i, 1)
self._updateModules()
boxLayout = QVBoxLayout()
boxLayout.addLayout(self._moduleGrid)
updateButton = QPushButton("Update")
updateButton.clicked.connect(self._onUpdateModules)
boxLayout.addWidget(updateButton)
boxLayout.addStretch()
box.setLayout(boxLayout)
return box
def _updateModules(self):
"""Update the module list.
"""
for i, m in enumerate(ModuleResource):
if m.prepared:
info = m.version
elif m.available:
info = "not loaded"
else:
info = "not found"
self._moduleGrid.itemAtPosition(1+i, 1).widget().setText(info)
def systemInfo(self):
pythonBox = QGroupBox('Python')
boxLayout = QVBoxLayout()
boxLayout.addWidget(QLabel(f"Python version: {sys.version}"))
boxLayout.addWidget(QLabel(f"Platform: {sys.platform}"))
boxLayout.addWidget(QLabel(f"Prefix: {sys.prefix}"))
boxLayout.addWidget(QLabel(f"Executable: {sys.executable}"))
boxLayout.addStretch()
pythonBox.setLayout(boxLayout)
hardwareBox = QGroupBox('Hardware')
boxLayout = QVBoxLayout()
for i, cpu in enumerate(cpus):
prefix = f"{i+1}. " if len(cpus) > 1 else ""
boxLayout.addWidget(QLabel(f"{prefix}CPU: {cpu.name}"))
for i, gpu in enumerate(gpus):
prefix = f"{i+1}. " if len(gpus) > 1 else ""
boxLayout.addWidget(QLabel(f"{prefix}GPU: {gpu.name}"))
# Memory (1)
import os
mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')
# SC_PAGE_SIZE is often 4096.
# SC_PAGESIZE and SC_PAGE_SIZE are equal.
mem = mem_bytes>>20
boxLayout.addWidget(QLabel("Total physical memory: {:,} MiB".
format(mem)))
boxLayout.addStretch()
hardwareBox.setLayout(boxLayout)
#
# Platform
#
systemBox = QGroupBox('System')
boxLayout = QVBoxLayout()
import platform
boxLayout.addWidget(QLabel(f"node: {platform.node()}"))
# boxLayout.addWidget(QLabel(f"uname: {platform.uname()}"))
boxLayout.addWidget(QLabel(f"system: {platform.system()}"))
boxLayout.addWidget(QLabel(f"release: {platform.release()}"))
# boxLayout.addWidget(QLabel(f"version: {platform.version()}"))
boxLayout.addWidget(QLabel(f"machine/processor: "
f"{platform.machine()}/"
f"{platform.processor()}"))
boxLayout.addStretch()
systemBox.setLayout(boxLayout)
resourcesBox = QGroupBox('Resources')
boxLayout = QVBoxLayout()
# Memory (2)
# a useful solution that works for various operating systems,
# including Linux, Windows 7, etc.:
try:
import psutil
mem = psutil.virtual_memory()
# mem.total: total physical memory available
boxLayout.addWidget(QLabel("Total physical memory: "
f"{mem.total}"))
process = psutil.Process(os.getpid())
boxLayout.addWidget(QLabel("Memory usage: "
f"{process.memory_info().rss}"))
except ModuleNotFoundError:
pass
# For Unixes (Linux, Mac OS X, Solaris) you could also use the
# getrusage() function from the standard library module
# resource. The resulting object has the attribute ru_maxrss,
# which gives peak memory usage for the calling process.
# resource is a standard library module.
import resource
# The Python docs aren't clear on what the units are exactly,
# but the Mac OS X man page for getrusage(2) describes the
# units as bytes. The Linux man page isn't clear, but it seems
# to be equivalent to the information from /proc/self/status,
# which is in kilobytes.
rusage = resource.getrusage(resource.RUSAGE_SELF)
boxLayout.addWidget(QLabel("Peak Memory usage: {:,} kiB".
format(rusage.ru_maxrss)))
if cuda is not None:
button = QPushButton("CUDA")
#button.setFlat(True)
@protect
def slot(clicked: bool):
self.showInfo(self.cudaInfo())
button.clicked.connect(slot)
boxLayout.addWidget(button)
resourcesBox.setLayout(boxLayout)
#
# layout the boxes
#
layout = QVBoxLayout()
row = QHBoxLayout()
row.addWidget(hardwareBox)
row.addWidget(pythonBox)
row.addWidget(systemBox)
layout.addLayout(row)
layout.addWidget(resourcesBox)
layout.addStretch()
return layout
def cv2Info(self, cv2):
layout = QVBoxLayout()
info = cv2.getBuildInformation()
text = QPlainTextEdit()
fixedFont = QFontDatabase.systemFont(QFontDatabase.FixedFont)
text.document().setDefaultFont(fixedFont)
text.setReadOnly(True)
text.setPlainText(info)
layout.addWidget(QLabel(f"Version: {cv2.__version__}"))
layout.addWidget(QLabel(f"Library: {cv2.__file__}"))
layout.addWidget(text)
layout.addStretch()
return layout
def tensorflowInfo(self, tf):
layout = QVBoxLayout()
label = QLabel("<b>Tensorflow<b>\n"
f"Version = {tf.__version__}")
layout.addWidget(label)
layout.addWidget(QLabel(f"Tensorflow devices:"))
# There is an undocumented method called
# device_lib.list_local_devices() that enables you to list
# the devices available in the local process (As an
# undocumented method, this is subject to backwards
# incompatible changes.)
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
for dev in local_device_protos:
layout.addWidget(QLabel(f"Device: {dev.name} ({dev.device_type})"))
# Note that (at least up to TensorFlow 1.4), calling
# device_lib.list_local_devices() will run some initialization
# code that, by default, will allocate all of the GPU memory
# on all of the devices (GitHub issue). To avoid this, first
# create a session with an explicitly small
# per_process_gpu_fraction, or allow_growth=True, to prevent
# all of the memory being allocated. See this question for
# more details
# https://stackoverflow.com/questions/38009682/how-to-tell-if-tensorflow-is-using-gpu-acceleration-from-inside-python-shell
layout.addStretch()
return layout
def kerasInfo(self, keras):
layout = QVBoxLayout()
layout.addWidget(QLabel(f"Backend: {keras.backend.backend()}"))
layout.addStretch()
return layout
def cudaInfo(self):
cudaBox = QGroupBox('CUDA')
boxLayout = QVBoxLayout()
if os.path.exists('/proc/driver/nvidia/version'):
with open('/proc/driver/nvidia/version') as f:
driver_info = f.read()
match = re.search('Kernel Module +([^ ]*)', driver_info)
if match:
boxLayout.addWidget(QLabel(f"Kernel module: {match.group(1)}"))
match = re.search('gcc version +([^ ]*)', driver_info)
if match:
boxLayout.addWidget(QLabel(f"GCC version: {match.group(1)}"))
boxLayout.addWidget(QLabel(f"NVIDIA Kernel driver: "
f"{cuda.driver_version}"))
boxLayout.addWidget(QLabel(f"CUDA Toolkit version: "
f"{cuda.toolkit_version}"))
text = QPlainTextEdit()
fixedFont = QFontDatabase.systemFont(QFontDatabase.FixedFont)
text.document().setDefaultFont(fixedFont)
text.setReadOnly(True)
text.setPlainText(str(cuda.nvidia_smi))
text.appendPlainText(str(cuda.nvidia_smi_l))
boxLayout.addWidget(text)
# Now use the python module pycuda
try:
import pycuda.autoinit
import pycuda.driver as cuda
(free,total) = cuda.mem_get_info()
boxLayout.addWidget(QLabel("<b>Global GPU Memory</b>"))
boxLayout.addWidget(QLabel(f"Total: {total}"))
boxLayout.addWidget(QLabel(f"Free: {free}"))
boxLayout.addWidget(QLabel("Global memory occupancy: "
f"{free*100/total:2.4}% free"))
for devicenum in range(cuda.Device.count()):
device=cuda.Device(devicenum)
attrs=device.get_attributes()
# Beyond this point is just pretty printing
print("\n===Attributes for device %d"%devicenum)
for (key,value) in attrs.items():
print("%s:%s"%(str(key),str(value)))
except ImportError as e:
print(e, file=sys.stderr)
# ImportError: libcurand.so.8.0
# The problem occurs with the current anaconda version
# (2017.1, "conda install -c lukepfister pycuda").
# The dynamic library "_driver.cpython-36m-x86_64-linux-gnu.so"
# is linked against "libcurand.so.8.0". However, the cudatookit
# installed by anaconda is verion 9.0.
boxLayout.addWidget(QLabel("Python CUDA module (pycuda) not availabe"))
try:
nvmlInfo = QNvmlInfo()
boxLayout.addWidget(nvmlInfo)
add_timer_callback(nvmlInfo.update)
except ImportError as e:
print(e, file=sys.stderr)
boxLayout.addWidget(QLabel("Python NVML module (py3nvml) not availabe"))
cudaBox.setLayout(boxLayout)
return cudaBox
class QNvmlInfo(QWidget):
"""A QWidget for displaying information obtained from the
NVIDIA Management Library (Python bindings: py3nvml)
Attributes
----------
nvml: module
A reference to the NVIDIA Management Library.
_deviceCount: int
The number of NVIDA devices.
_handle:
An NVML handle for the current device. None if no device
is selected.
Graphical elements
------------------
_devices: QComboBox
A combo box to select the current device.
_name: QLabel
_driver_version: QLabel
_temperature: QLabel
_temperature_slowdown: QLabel
_temperature_shutdown: QLabel
"""
def __init__(self, parent=None):
"""Initialize this :py:class:`QNvmlInfo`. This includes importing the
NVIDIA Management Library Python bindings (py3nvml) and
initializing that module. If any of these operations fails, a
dummy content will be created displaying a corresponding
message.
"""
super().__init__(parent)
try:
self.nvml = importlib.import_module('py3nvml.py3nvml')
self.nvml.nvmlInit()
self._deviceCount = self.nvml.nvmlDeviceGetCount()
self._handle = None
self._initUI()
except ModuleNotFoundError:
self.nvml = None
layout = QVBoxLayout()
layout.addWidget(QLabel("NVIDIA Management Library not available"))
self.setLayout(layout)
def __del__(self):
"""Freeing resources. This includes shutting down the NVML module.
"""
if self.nvml is not None:
self.nvml.nvmlShutdown()
self.nvml = None
def _initUI(self):
"""Create an interface containing a QComboBox to select the current
device and several QLabels to display (device) information,
including driver version, as well as slowdown, shutdown, and
current device temperature.
"""
layout = QVBoxLayout()
grid = QGridLayout()
self._driver_version = QLabel(self.nvml.nvmlSystemGetDriverVersion())
grid.addWidget(QLabel("Driver Version"), 0,0)
grid.addWidget(self._driver_version, 0,1)
layout.addLayout(grid)
self._devices = QComboBox()
for i in range(self._deviceCount):
handle = self.nvml.nvmlDeviceGetHandleByIndex(i)
self._devices.addItem(self.nvml.nvmlDeviceGetName(handle))
@protect
def slot(device: int) -> None:
self.update()
self._devices.activated.connect(slot)
layout.addWidget(self._devices)
grid = QGridLayout()
self._name = QLabel()
grid.addWidget(QLabel("Name"), 0,0)
grid.addWidget(self._name, 0,1)
self._temperature = QLabel()
grid.addWidget(QLabel("Temperatur"), 1,0)
box = QHBoxLayout()
box.addWidget(self._temperature)
box.addWidget(QLabel(u'\N{DEGREE SIGN}C'))
box.addStretch()
grid.addLayout(box, 1,1)
self._temperature_slowdown = QLabel()
grid.addWidget(QLabel("Slowdown Temperatur"), 2,0)
box = QHBoxLayout()
box.addWidget(self._temperature_slowdown)
box.addWidget(QLabel(u'\N{DEGREE SIGN}C'))
box.addStretch()
grid.addLayout(box, 2,1)
self._temperature_shutdown = QLabel()
grid.addWidget(QLabel("Shutdown Temperatur"), 3,0)
box = QHBoxLayout()
box.addWidget(self._temperature_shutdown)
box.addWidget(QLabel(u'\N{DEGREE SIGN}C'))
box.addStretch()
grid.addLayout(box, 3,1)
layout.addLayout(grid)
layout.addStretch()
self.setLayout(layout)
def update(self):
"""Update the widgets indicating slowdown, shutdown, and current
temperature.
"""
currentIndex = self._devices.currentIndex()
if self._handle is None and currentIndex is not None:
self._handle = self.nvml.nvmlDeviceGetHandleByIndex(currentIndex)
self._name.setText(self.nvml.nvmlDeviceGetName(self._handle))
slowdown = self.nvml.nvmlDeviceGetTemperatureThreshold(self._handle,
self.nvml.NVML_TEMPERATURE_THRESHOLD_SLOWDOWN)
shutdown = self.nvml.nvmlDeviceGetTemperatureThreshold(self._handle,
self.nvml.NVML_TEMPERATURE_THRESHOLD_SHUTDOWN)
self._temperature_slowdown.setText(str(slowdown))
self._temperature_shutdown.setText(str(shutdown))
if self._handle is not None:
temperature = self.nvml.nvmlDeviceGetTemperature(self._handle,
self.nvml.NVML_TEMPERATURE_GPU)
self._temperature.setText(str(temperature))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# utils.py
#
# Copyright 2016 Bruno S <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
"""utils module from ProperImage,
for coadding astronomical images.
Written by Bruno SANCHEZ
PhD of Astromoy - UNC
[email protected]
Instituto de Astronomia Teorica y Experimental (IATE) UNC
Cordoba - Argentina
Of 301
"""
import os
import astroalign as aa
import numpy as np
import scipy.ndimage as ndimage
from astropy.io import fits
from astropy.modeling import fitting, models
from astropy.stats import sigma_clipped_stats
from numpy.lib.recfunctions import append_fields
from scipy import sparse
from scipy.spatial import cKDTree
aa.PIXEL_TOL = 0.3
aa.NUM_NEAREST_NEIGHBORS = 5
aa.MIN_MATCHES_FRACTION = 0.6
def store_img(img, path=None):
if isinstance(img[0, 0], np.complex):
img = img.real
if isinstance(img, np.ma.core.MaskedArray):
mask = img.mask.astype("int")
data = img.data
hdu_data = fits.PrimaryHDU(data)
hdu_data.scale(type="float32")
hdu_mask = fits.ImageHDU(mask, uint="uint8")
hdu_mask.header["IMG_TYPE"] = "BAD_PIXEL_MASK"
hdu = fits.HDUList([hdu_data, hdu_mask])
else:
hdu = fits.PrimaryHDU(img)
if path is not None:
hdu.writeto(path, overwrite=True)
else:
return hdu
def crossmatch(X1, X2, max_distance=np.inf):
"""Cross-match the values between X1 and X2
By default, this uses a KD Tree for speed.
Parameters
----------
X1 : array_like
first dataset, shape(N1, D)
X2 : array_like
second dataset, shape(N2, D)
max_distance : float (optional)
maximum radius of search. If no point is within the given radius,
then inf will be returned.
Returns
-------
dist, ind: ndarrays
The distance and index of the closest point in X2 to each point in X1
Both arrays are length N1.
Locations with no match are indicated by
dist[i] = inf, ind[i] = N2
"""
X1 = np.asarray(X1, dtype=float)
X2 = np.asarray(X2, dtype=float)
N1, D = X1.shape
N2, D2 = X2.shape
if D != D2:
raise ValueError("Arrays must have the same second dimension")
kdt = cKDTree(X2)
dist, ind = kdt.query(X1, k=1, distance_upper_bound=max_distance)
return dist, ind
def _matching(master, cat, masteridskey=None, radius=1.5, masked=False):
"""Function to match stars between frames."""
if masteridskey is None:
masterids = np.arange(len(master))
master["masterindex"] = masterids
idkey = "masterindex"
else:
idkey = masteridskey
masterXY = np.empty((len(master), 2), dtype=np.float64)
masterXY[:, 0] = master["x"]
masterXY[:, 1] = master["y"]
imXY = np.empty((len(cat), 2), dtype=np.float64)
imXY[:, 0] = cat["x"]
imXY[:, 1] = cat["y"]
dist, ind = crossmatch(masterXY, imXY, max_distance=radius)
dist_, ind_ = crossmatch(imXY, masterXY, max_distance=radius)
IDs = np.zeros_like(ind_) - 13133
for i in range(len(ind_)):
if dist_[i] != np.inf:
ind_o = ind_[i]
if dist[ind_o] != np.inf:
ind_s = ind[ind_o]
if ind_s == i:
IDs[i] = master[idkey][ind_o]
if masked:
mask = IDs > 0
return (IDs, mask)
return IDs
def transparency(images, master=None):
"""Transparency calculator, using Ofek method."""
if master is None:
p = len(images)
master = images[0]
imglist = images[1:]
else:
# master is a separated file
p = len(images) + 1
imglist = images
mastercat = master.best_sources
try:
mastercat = append_fields(
mastercat,
"sourceid",
np.arange(len(mastercat)),
usemask=False,
dtypes=int,
)
except ValueError:
pass
detect = np.repeat(True, len(mastercat))
# Matching the sources
for img in imglist:
newcat = img.best_sources
ids, mask = _matching(
mastercat,
newcat,
masteridskey="sourceid",
radius=2.0,
masked=True,
)
try:
newcat = append_fields(newcat, "sourceid", ids, usemask=False)
except ValueError:
newcat["sourceid"] = ids
for i in range(len(mastercat)):
if mastercat[i]["sourceid"] not in ids:
detect[i] = False
newcat.sort(order="sourceid")
img.update_sources(newcat)
try:
mastercat = append_fields(
mastercat, "detected", detect, usemask=False, dtypes=bool
)
except ValueError:
mastercat["detected"] = detect
# Now populating the vector of magnitudes
q = sum(mastercat["detected"])
if q != 0:
m = np.zeros(p * q)
# here 20 is a common value for a zp, and is only for weighting
m[:q] = (
-2.5 * np.log10(mastercat[mastercat["detected"]]["flux"]) + 20.0
)
j = 0
for row in mastercat[mastercat["detected"]]:
for img in imglist:
cat = img.best_sources
imgrow = cat[cat["sourceid"] == row["sourceid"]]
m[q + j] = -2.5 * np.log10(imgrow["flux"]) + 20.0
j += 1
master.update_sources(mastercat)
ident = sparse.identity(q)
col = np.repeat(1.0, q)
sparses = []
for j in range(p):
ones_col = np.zeros((q, p))
ones_col[:, j] = col
sparses.append([sparse.csc_matrix(ones_col), ident])
H = sparse.bmat(sparses)
P = sparse.linalg.lsqr(H, m)
zps = P[0][:p]
meanmags = P[0][p:]
return np.asarray(zps), np.asarray(meanmags)
else:
return np.ones(p), np.nan
def _align_for_diff(refpath, newpath, newmask=None):
"""Function to align two images using their paths,
and returning newpaths for differencing.
We will allways rotate and align the new image to the reference,
so it is easier to compare differences along time series.
"""
ref = np.ma.masked_invalid(fits.getdata(refpath))
new = fits.getdata(newpath)
hdr = fits.getheader(newpath)
if newmask is not None:
new = np.ma.masked_array(new, mask=fits.getdata(newmask))
else:
new = np.ma.masked_invalid(new)
dest_file = "aligned_" + os.path.basename(newpath)
dest_file = os.path.join(os.path.dirname(newpath), dest_file)
try:
new2 = aa.register(ref, new.filled(np.median(new)))
except ValueError:
ref = ref.astype(float)
new = new.astype(float)
new2 = aa.register(ref, new)
hdr.set("comment", "aligned img " + newpath + " to " + refpath)
if isinstance(new2, np.ma.masked_array):
hdu = fits.HDUList(
[
fits.PrimaryHDU(new2.data, header=hdr),
fits.ImageHDU(new2.mask.astype("uint8")),
]
)
hdu.writeto(dest_file, overwrite=True)
else:
fits.writeto(dest_file, new2, hdr, overwrite=True)
return dest_file
def _align_for_coadd(imglist):
"""
Function to align a group of images for coadding, it uses
the astroalign `align_image` tool.
"""
ref = imglist[0]
new_list = [ref]
for animg in imglist[1:]:
registrd, registrd_mask = aa.register(
animg.data, ref.data, propagate_mask=True
)
# [: ref.data.shape[0], : ref.data.shape[1]], Deprecated
new_list.append(
type(animg)(registrd, mask=registrd_mask, borders=False)
)
return new_list
def find_S_local_maxima(S_image, threshold=2.5, neighborhood_size=5):
mean, median, std = sigma_clipped_stats(S_image, maxiters=3)
labeled, num_objects = ndimage.label((S_image - mean) / std > threshold)
xy = np.array(
ndimage.center_of_mass(S_image, labeled, range(1, num_objects + 1))
)
cat = []
for x, y in xy:
cat.append((y, x, (S_image[int(x), int(y)] - mean) / std))
return cat
def chunk_it(seq, num):
"""Creates chunks of a sequence suitable for data parallelism using
multiprocessing.
Parameters
----------
seq: list, array or sequence like object. (indexable)
data to separate in chunks
num: int
number of chunks required
Returns
-------
Sorted list.
List of chunks containing the data splited in num parts.
"""
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last) : int(last + avg)])
last += avg
try:
return sorted(out, reverse=True)
except TypeError:
return out
except ValueError:
return out
def fit_gaussian2d(b, fitter=None):
if fitter is None:
fitter = fitting.LevMarLSQFitter()
y2, x2 = np.mgrid[: b.shape[0], : b.shape[1]]
ampl = b.max() - b.min()
p = models.Gaussian2D(
x_mean=b.shape[1] / 2.0,
y_mean=b.shape[0] / 2.0,
x_stddev=1.0,
y_stddev=1.0,
theta=np.pi / 4.0,
amplitude=ampl,
)
p += models.Const2D(amplitude=b.min())
out = fitter(p, x2, y2, b, maxiter=1000)
return out
|
|
# Copyright (c) 2012 OpenStack Foundation.
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_messaging import serializer as om_serializer
from oslo_service import service
from neutron.common import exceptions
from neutron import context
LOG = logging.getLogger(__name__)
TRANSPORT = None
NOTIFIER = None
ALLOWED_EXMODS = [
exceptions.__name__,
]
EXTRA_EXMODS = []
TRANSPORT_ALIASES = {
'neutron.openstack.common.rpc.impl_fake': 'fake',
'neutron.openstack.common.rpc.impl_qpid': 'qpid',
'neutron.openstack.common.rpc.impl_kombu': 'rabbit',
'neutron.openstack.common.rpc.impl_zmq': 'zmq',
'neutron.rpc.impl_fake': 'fake',
'neutron.rpc.impl_qpid': 'qpid',
'neutron.rpc.impl_kombu': 'rabbit',
'neutron.rpc.impl_zmq': 'zmq',
}
# NOTE(salv-orlando): I am afraid this is a global variable. While not ideal,
# they're however widely used throughout the code base. It should be set to
# true if the RPC server is not running in the current process space. This
# will prevent get_connection from creating connections to the AMQP server
RPC_DISABLED = False
def init(conf):
global TRANSPORT, NOTIFIER
exmods = get_allowed_exmods()
TRANSPORT = oslo_messaging.get_transport(conf,
allowed_remote_exmods=exmods,
aliases=TRANSPORT_ALIASES)
serializer = RequestContextSerializer()
NOTIFIER = oslo_messaging.Notifier(TRANSPORT, serializer=serializer)
def cleanup():
global TRANSPORT, NOTIFIER
assert TRANSPORT is not None
assert NOTIFIER is not None
TRANSPORT.cleanup()
TRANSPORT = NOTIFIER = None
def add_extra_exmods(*args):
EXTRA_EXMODS.extend(args)
def clear_extra_exmods():
del EXTRA_EXMODS[:]
def get_allowed_exmods():
return ALLOWED_EXMODS + EXTRA_EXMODS
def get_client(target, version_cap=None, serializer=None):
assert TRANSPORT is not None
serializer = RequestContextSerializer(serializer)
return oslo_messaging.RPCClient(TRANSPORT,
target,
version_cap=version_cap,
serializer=serializer)
def get_server(target, endpoints, serializer=None):
assert TRANSPORT is not None
serializer = RequestContextSerializer(serializer)
return oslo_messaging.get_rpc_server(TRANSPORT, target, endpoints,
'eventlet', serializer)
def get_notifier(service=None, host=None, publisher_id=None):
assert NOTIFIER is not None
if not publisher_id:
publisher_id = "%s.%s" % (service, host or cfg.CONF.host)
return NOTIFIER.prepare(publisher_id=publisher_id)
class RequestContextSerializer(om_serializer.Serializer):
"""This serializer is used to convert RPC common context into
Neutron Context.
"""
def __init__(self, base=None):
super(RequestContextSerializer, self).__init__()
self._base = base
def serialize_entity(self, ctxt, entity):
if not self._base:
return entity
return self._base.serialize_entity(ctxt, entity)
def deserialize_entity(self, ctxt, entity):
if not self._base:
return entity
return self._base.deserialize_entity(ctxt, entity)
def serialize_context(self, ctxt):
return ctxt.to_dict()
def deserialize_context(self, ctxt):
rpc_ctxt_dict = ctxt.copy()
user_id = rpc_ctxt_dict.pop('user_id', None)
if not user_id:
user_id = rpc_ctxt_dict.pop('user', None)
tenant_id = rpc_ctxt_dict.pop('tenant_id', None)
if not tenant_id:
tenant_id = rpc_ctxt_dict.pop('project_id', None)
return context.Context(user_id, tenant_id, **rpc_ctxt_dict)
class Service(service.Service):
"""Service object for binaries running on hosts.
A service enables rpc by listening to queues based on topic and host.
"""
def __init__(self, host, topic, manager=None, serializer=None):
super(Service, self).__init__()
self.host = host
self.topic = topic
self.serializer = serializer
if manager is None:
self.manager = self
else:
self.manager = manager
def start(self):
super(Service, self).start()
self.conn = create_connection(new=True)
LOG.debug("Creating Consumer connection for Service %s",
self.topic)
endpoints = [self.manager]
self.conn.create_consumer(self.topic, endpoints)
# Hook to allow the manager to do other initializations after
# the rpc connection is created.
if callable(getattr(self.manager, 'initialize_service_hook', None)):
self.manager.initialize_service_hook(self)
# Consume from all consumers in threads
self.conn.consume_in_threads()
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.conn.close()
except Exception:
pass
super(Service, self).stop()
class Connection(object):
def __init__(self):
super(Connection, self).__init__()
self.servers = []
def create_consumer(self, topic, endpoints, fanout=False):
target = oslo_messaging.Target(
topic=topic, server=cfg.CONF.host, fanout=fanout)
server = get_server(target, endpoints)
self.servers.append(server)
def consume_in_threads(self):
for server in self.servers:
server.start()
return self.servers
def close(self):
for server in self.servers:
server.stop()
for server in self.servers:
server.wait()
class VoidConnection(object):
def create_consumer(self, topic, endpoints, fanout=False):
pass
def consume_in_threads(self):
pass
def close(self):
pass
# functions
def create_connection(new=True):
# NOTE(salv-orlando): This is a clever interpreation of the factory design
# patter aimed at preventing plugins from initializing RPC servers upon
# initialization when they are running in the REST over HTTP API server.
# The educated reader will perfectly be able that this a fairly dirty hack
# to avoid having to change the initialization process of every plugin.
if RPC_DISABLED:
return VoidConnection()
return Connection()
|
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import hmac
import httplib2
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
import six
import six.moves.urllib.parse as urlparse
import webob
from neutron.agent.linux import utils as agent_utils
from neutron.agent.metadata import config
from neutron.agent import rpc as agent_rpc
from neutron.common import constants as n_const
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron.i18n import _LE, _LW
from neutron.openstack.common.cache import cache
LOG = logging.getLogger(__name__)
MODE_MAP = {
config.USER_MODE: 0o644,
config.GROUP_MODE: 0o664,
config.ALL_MODE: 0o666,
}
class MetadataPluginAPI(object):
"""Agent-side RPC for metadata agent-to-plugin interaction.
This class implements the client side of an rpc interface used by the
metadata service to make calls back into the Neutron plugin. The server
side is defined in
neutron.api.rpc.handlers.metadata_rpc.MetadataRpcCallback. For more
information about changing rpc interfaces, see
doc/source/devref/rpc_api.rst.
API version history:
1.0 - Initial version.
"""
def __init__(self, topic):
target = oslo_messaging.Target(
topic=topic,
namespace=n_const.RPC_NAMESPACE_METADATA,
version='1.0')
self.client = n_rpc.get_client(target)
def get_ports(self, context, filters):
cctxt = self.client.prepare()
return cctxt.call(context, 'get_ports', filters=filters)
class MetadataProxyHandler(object):
def __init__(self, conf):
self.conf = conf
if self.conf.cache_url:
self._cache = cache.get_cache(self.conf.cache_url)
else:
self._cache = False
self.plugin_rpc = MetadataPluginAPI(topics.PLUGIN)
self.context = context.get_admin_context_without_session()
@webob.dec.wsgify(RequestClass=webob.Request)
def __call__(self, req):
try:
LOG.debug("Request: %s", req)
instance_id, tenant_id = self._get_instance_and_tenant_id(req)
if instance_id:
return self._proxy_request(instance_id, tenant_id, req)
else:
return webob.exc.HTTPNotFound()
except Exception:
LOG.exception(_LE("Unexpected error."))
msg = _('An unknown error has occurred. '
'Please try your request again.')
explanation = six.text_type(msg)
return webob.exc.HTTPInternalServerError(explanation=explanation)
def _get_ports_from_server(self, router_id=None, ip_address=None,
networks=None):
"""Get ports from server."""
filters = self._get_port_filters(router_id, ip_address, networks)
return self.plugin_rpc.get_ports(self.context, filters)
def _get_port_filters(self, router_id=None, ip_address=None,
networks=None):
filters = {}
if router_id:
filters['device_id'] = [router_id]
filters['device_owner'] = n_const.ROUTER_INTERFACE_OWNERS
if ip_address:
filters['fixed_ips'] = {'ip_address': [ip_address]}
if networks:
filters['network_id'] = networks
return filters
@utils.cache_method_results
def _get_router_networks(self, router_id):
"""Find all networks connected to given router."""
internal_ports = self._get_ports_from_server(router_id=router_id)
return tuple(p['network_id'] for p in internal_ports)
@utils.cache_method_results
def _get_ports_for_remote_address(self, remote_address, networks):
"""Get list of ports that has given ip address and are part of
given networks.
:param networks: list of networks in which the ip address will be
searched for
"""
return self._get_ports_from_server(networks=networks,
ip_address=remote_address)
def _get_ports(self, remote_address, network_id=None, router_id=None):
"""Search for all ports that contain passed ip address and belongs to
given network.
If no network is passed ports are searched on all networks connected to
given router. Either one of network_id or router_id must be passed.
"""
if network_id:
networks = (network_id,)
elif router_id:
networks = self._get_router_networks(router_id)
else:
raise TypeError(_("Either one of parameter network_id or router_id"
" must be passed to _get_ports method."))
return self._get_ports_for_remote_address(remote_address, networks)
def _get_instance_and_tenant_id(self, req):
remote_address = req.headers.get('X-Forwarded-For')
network_id = req.headers.get('X-Neutron-Network-ID')
router_id = req.headers.get('X-Neutron-Router-ID')
ports = self._get_ports(remote_address, network_id, router_id)
if len(ports) == 1:
return ports[0]['device_id'], ports[0]['tenant_id']
return None, None
def _proxy_request(self, instance_id, tenant_id, req):
headers = {
'X-Forwarded-For': req.headers.get('X-Forwarded-For'),
'X-Instance-ID': instance_id,
'X-Tenant-ID': tenant_id,
'X-Instance-ID-Signature': self._sign_instance_id(instance_id)
}
nova_ip_port = '%s:%s' % (self.conf.nova_metadata_ip,
self.conf.nova_metadata_port)
url = urlparse.urlunsplit((
self.conf.nova_metadata_protocol,
nova_ip_port,
req.path_info,
req.query_string,
''))
h = httplib2.Http(
ca_certs=self.conf.auth_ca_cert,
disable_ssl_certificate_validation=self.conf.nova_metadata_insecure
)
if self.conf.nova_client_cert and self.conf.nova_client_priv_key:
h.add_certificate(self.conf.nova_client_priv_key,
self.conf.nova_client_cert,
nova_ip_port)
resp, content = h.request(url, method=req.method, headers=headers,
body=req.body)
if resp.status == 200:
LOG.debug(str(resp))
req.response.content_type = resp['content-type']
req.response.body = content
return req.response
elif resp.status == 403:
LOG.warn(_LW(
'The remote metadata server responded with Forbidden. This '
'response usually occurs when shared secrets do not match.'
))
return webob.exc.HTTPForbidden()
elif resp.status == 400:
return webob.exc.HTTPBadRequest()
elif resp.status == 404:
return webob.exc.HTTPNotFound()
elif resp.status == 409:
return webob.exc.HTTPConflict()
elif resp.status == 500:
msg = _(
'Remote metadata server experienced an internal server error.'
)
LOG.warn(msg)
explanation = six.text_type(msg)
return webob.exc.HTTPInternalServerError(explanation=explanation)
else:
raise Exception(_('Unexpected response code: %s') % resp.status)
def _sign_instance_id(self, instance_id):
secret = self.conf.metadata_proxy_shared_secret
if isinstance(secret, six.text_type):
secret = secret.encode('utf-8')
if isinstance(instance_id, six.text_type):
instance_id = instance_id.encode('utf-8')
return hmac.new(secret, instance_id, hashlib.sha256).hexdigest()
class UnixDomainMetadataProxy(object):
def __init__(self, conf):
self.conf = conf
agent_utils.ensure_directory_exists_without_file(
cfg.CONF.metadata_proxy_socket)
self._init_state_reporting()
def _init_state_reporting(self):
self.context = context.get_admin_context_without_session()
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
self.agent_state = {
'binary': 'neutron-metadata-agent',
'host': cfg.CONF.host,
'topic': 'N/A',
'configurations': {
'metadata_proxy_socket': cfg.CONF.metadata_proxy_socket,
'nova_metadata_ip': cfg.CONF.nova_metadata_ip,
'nova_metadata_port': cfg.CONF.nova_metadata_port,
'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats,
},
'start_flag': True,
'agent_type': n_const.AGENT_TYPE_METADATA}
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
try:
self.state_rpc.report_state(
self.context,
self.agent_state,
use_call=self.agent_state.get('start_flag'))
except AttributeError:
# This means the server does not support report_state
LOG.warn(_LW('Neutron server does not support state report.'
' State report for this agent will be disabled.'))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_LE("Failed reporting state!"))
return
self.agent_state.pop('start_flag', None)
def _get_socket_mode(self):
mode = self.conf.metadata_proxy_socket_mode
if mode == config.DEDUCE_MODE:
user = self.conf.metadata_proxy_user
if (not user or user == '0' or user == 'root'
or agent_utils.is_effective_user(user)):
# user is agent effective user or root => USER_MODE
mode = config.USER_MODE
else:
group = self.conf.metadata_proxy_group
if not group or agent_utils.is_effective_group(group):
# group is agent effective group => GROUP_MODE
mode = config.GROUP_MODE
else:
# otherwise => ALL_MODE
mode = config.ALL_MODE
return MODE_MAP[mode]
def run(self):
server = agent_utils.UnixDomainWSGIServer('neutron-metadata-agent')
server.start(MetadataProxyHandler(self.conf),
self.conf.metadata_proxy_socket,
workers=self.conf.metadata_workers,
backlog=self.conf.metadata_backlog,
mode=self._get_socket_mode())
server.wait()
|
|
import os
import logging
from functools import wraps
from urlparse import urlparse
import tempfile
from typecheck import accepts, Self
from zinc.models import ZincIndex, ZincManifest, ZincCatalogConfig, ZincFlavorSpec
from zinc.defaults import defaults
from zinc.formats import Formats
import zinc.helpers as helpers
import zinc.utils as utils
log = logging.getLogger(__name__)
################################################################################
# TODO: rename to CatalogPathHelper?
class ZincCatalogPathHelper(object):
def __init__(self, format='1'):
if format != defaults['zinc_format']:
raise Exception("Incompatible format %s" % (format))
self._format = format
@property
def format(self):
return self._format
@property
def manifests_dir(self):
return "manifests"
@property
def archives_dir(self):
return "archives"
@property
def objects_dir(self):
return "objects"
@property
def config_dir(self):
return "config"
@property
def config_flavorspec_dir(self):
return os.path.join(self.config_dir, "flavorspecs")
def path_for_index(self):
return defaults['catalog_index_name']
def manifest_name(self, bundle_name, version):
return "%s-%d.json" % (bundle_name, version)
def path_for_manifest_for_bundle_version(self, bundle_name, version):
manifest_filename = self.manifest_name(bundle_name, version)
manifest_path = os.path.join(self.manifests_dir, manifest_filename)
return manifest_path
def path_for_manifest(self, manifest):
return self.path_for_manifest_for_bundle_version(
manifest.bundle_name, manifest.version)
def path_for_file_with_sha(self, sha, ext=None, format=None):
if ext is not None and format is not None:
raise Exception(
"Should specify either `ext` or `format`, not both.")
if format is not None:
ext = helpers.file_extension_for_format(format)
subdir = os.path.join(self.objects_dir, sha[0:2], sha[2:4])
file = sha
if ext is not None:
file = file + '.' + ext
return os.path.join(subdir, file)
def archive_name(self, bundle_name, version, flavor=None):
if flavor is None:
return "%s-%d.tar" % (bundle_name, version)
else:
return "%s-%d~%s.tar" % (bundle_name, version, flavor)
def path_for_archive_for_bundle_version(
self, bundle_name, version, flavor=None):
archive_filename = self.archive_name(bundle_name, version, flavor=flavor)
archive_path = os.path.join(self.archives_dir, archive_filename)
return archive_path
def path_for_flavorspec_name(self, flavorspec_name):
filename = '%s.json' % flavorspec_name
return os.path.join(self.config_flavorspec_dir, filename)
################################################################################
class ZincAbstractCatalog(object):
def get_index(self):
"""
Returns an *immutable* copy of the catalog index.
"""
raise NotImplementedError()
def get_manifest(self, bundle_name, version):
"""
Returns an *immutable* copy of the manifest for the specified
`bundle_name` and version`.
"""
raise NotImplementedError()
def update_bundle(self, new_manifest):
raise NotImplementedError()
# special
def import_path(self, src_path):
raise NotImplementedError()
def delete_bundle_version(self, bundle_name, version):
raise NotImplementedError()
def update_distribution(self, distribution_name, bundle_name, bundle_version, save_previous=True):
raise NotImplementedError()
def delete_distribution(self, distribution_name, bundle_name):
raise NotImplementedError()
def verify(self):
raise NotImplementedError()
def clean(self, **kwargs):
raise NotImplementedError()
### Non-abstract methods
def manifest_for_bundle(self, bundle_name, version=None):
"""
Get a manifest for bundle. If version is not specified, it gets the
manifest with the highest version number.
"""
index = self.get_index()
all_versions = index.versions_for_bundle(bundle_name)
if version is None and len(all_versions) > 0:
version = all_versions[-1]
elif version not in all_versions:
return None # throw exception?
return self.get_manifest(bundle_name, version)
def manifest_for_bundle_descriptor(self, bundle_descriptor):
"""
Convenience method to get a manifest by bundle_descriptor.
"""
return self.manifest_for_bundle(
helpers.bundle_id_from_bundle_descriptor(bundle_descriptor),
helpers.bundle_version_from_bundle_descriptor(bundle_descriptor))
def bundle_descriptors(self):
bundle_descriptors = []
index = self.get_index()
for bundle_name in index.bundle_names():
for version in index.versions_for_bundle(bundle_name):
bundle_descriptors.append("%s-%d" % (bundle_name, version))
manifest = self.manifest_for_bundle(bundle_name, version)
if manifest is None:
log.warn('Could not load manifest for %s-%d' % (bundle_name, version))
continue
for flavor in manifest.flavors:
bundle_descriptors.append("%s-%d~%s" %
(bundle_name, version, flavor))
return bundle_descriptors
################################################################################
class ZincCatalogLock(object):
def __init__(self, catalog, lock):
self._catalog = catalog
self._lock = lock
def __enter__(self):
self._lock.acquire()
self._catalog._reload()
def __exit__(self, exc_type, exc_value, traceback):
self._catalog.save()
self._lock.release()
def is_locked(self):
return self._lock.is_locked()
class ZincCatalog(ZincAbstractCatalog):
def __init__(self, storage=None, coordinator=None, path_helper=None,
lock_timeout=None, **kwargs):
assert storage
super(ZincCatalog, self).__init__(**kwargs)
self._coordinator = coordinator
self._storage = storage
self._ph = path_helper or ZincCatalogPathHelper()
self._manifests = {}
self.lock_timeout = lock_timeout or defaults['catalog_lock_timeout']
self._reload()
if self._coordinator is not None:
self._lock = ZincCatalogLock(self,
self._coordinator.get_index_lock(
domain=self.id,
timeout=lock_timeout))
def lock(self):
assert self._lock
return self._lock
### Properties ###
@property
def url(self):
return self._storage.url
@property
def path(self):
return urlparse(self.url).path
@property
def id(self):
return self.index.id
@property
def path_helper(self):
return self._ph
def format(self):
return self.index.format
### General Internal Methods ###
def _reload(self):
self.index = self._read_index()
if self.index.format != defaults['zinc_format']:
raise Exception("Incompatible format %s" % (self.index.format))
self._read_config_file()
def _read_config_file(self):
#log.warn('reimplement config loading')
self.config = ZincCatalogConfig()
#config_path = pjoin(self.path, defaults['catalog_config_name'])
#self.config = load_config(config_path)
def _ensure_index_lock(func):
@wraps(func)
def with_ensure_index_lock(self, *args, **kwargs):
assert self._coordinator
if not self.lock().is_locked():
with self.lock():
output = func(self, *args, **kwargs)
else:
output = func(self, *args, **kwargs)
return output
return with_ensure_index_lock
### I/O Helpers ###
def _read(self, rel_path):
f = self._storage.get(rel_path)
return f.read() if f is not None else None
def _write(self, subpath, bytes, raw=True, gzip=True, max_age=None):
if raw:
self._storage.puts(subpath, bytes, max_age=max_age)
if gzip:
self._storage.puts(subpath + '.gz', utils.gzip_bytes(bytes), max_age=max_age)
def _read_index(self):
subpath = self._ph.path_for_index()
bytes = self._read(subpath)
return ZincIndex.from_bytes(bytes)
def _write_index(self, index, raw=True, gzip=True):
subpath = self._ph.path_for_index()
bytes = index.to_bytes()
max_age = defaults['catalog_index_max_age_seconds']
self._write(subpath, bytes, raw=raw, gzip=gzip, max_age=max_age)
if defaults['catalog_write_legacy_index']:
self._write('index.json', bytes, raw=raw, gzip=gzip, max_age=max_age)
def _read_manifest(self, bundle_name, version):
subpath = self._ph.path_for_manifest_for_bundle_version(bundle_name,
version)
bytes = self._read(subpath)
if bytes is not None:
return ZincManifest.from_bytes(bytes)
else:
return None
def _write_manifest(self, manifest, raw=True, gzip=True):
subpath = self._ph.path_for_manifest(manifest)
bytes = manifest.to_bytes()
self._write(subpath, bytes, raw=raw, gzip=gzip)
def _get_file_info(self, sha, preferred_formats=None):
if preferred_formats is None:
preferred_formats = defaults['catalog_preferred_formats']
for format in preferred_formats:
subpath = self._ph.path_for_file_with_sha(sha, format=format)
meta = self._storage.get_meta(subpath)
if meta is not None:
return {
'sha': sha,
'size': meta['size'],
'format': format
}
return None
def _read_file(self, sha, ext=None):
subpath = self._ph.path_for_file_with_sha(sha, ext=ext)
return self._storage.get(subpath)
def _write_file(self, sha, src_path, format=None):
format = format or Formats.RAW # default to RAW
if format not in defaults['catalog_valid_formats']:
raise Exception("Invalid format '%s'." % (format))
ext = format if format != Formats.RAW else None
subpath = self._ph.path_for_file_with_sha(sha, ext)
with open(src_path, 'r') as src_file:
self._storage.put(subpath, src_file)
return subpath
def _get_archive_info(self, bundle_name, version, flavor=None):
subpath = self._ph.path_for_archive_for_bundle_version(bundle_name,
version,
flavor=flavor)
meta = self._storage.get_meta(subpath)
return meta
def _write_archive(self, bundle_name, version, src_path, flavor=None):
subpath = self._ph.path_for_archive_for_bundle_version(bundle_name,
version,
flavor=flavor)
with open(src_path, 'r') as src_file:
self._storage.put(subpath, src_file)
return subpath
def _read_archive(self, bundle_name, version, flavor=None):
subpath = self._ph.path_for_archive_for_bundle_version(bundle_name,
version,
flavor=flavor)
return self._storage.get(subpath)
@_ensure_index_lock
def _reserve_version_for_bundle(self, bundle_name):
self.index.increment_next_version_for_bundle(bundle_name)
return self.index.next_version_for_bundle(bundle_name)
### "Public" Methods
def save(self):
self._write_index(self.index)
def get_index(self):
return self.index.clone(mutable=False)
@accepts(Self(), basestring, int)
def get_manifest(self, bundle_name, version):
return self._read_manifest(bundle_name, version)
@_ensure_index_lock
@accepts(Self(), ZincManifest)
def update_bundle(self, new_manifest):
assert new_manifest
existing_versions = self.index.versions_for_bundle(new_manifest.bundle_name)
if new_manifest.version in existing_versions:
raise ValueError("Bundle version already exists.")
next_version = self.index.next_version_for_bundle(new_manifest.bundle_name)
if new_manifest.version > next_version:
raise ValueError("Unexpected manifest version.")
### verify all files in the filelist exist in the repo
missing_shas = list()
info_by_path = dict()
for path in new_manifest.files.keys():
sha = new_manifest.sha_for_file(path)
file_info = self._get_file_info(sha)
if file_info is None:
missing_shas.append(sha)
else:
info_by_path[path] = file_info
if len(missing_shas) > 0:
# TODO: better error
raise Exception("Missing shas: %s" % (missing_shas))
### TODO: verify archives?
### write manifest
self._write_manifest(new_manifest)
### update catalog index
self.index.add_version_for_bundle(new_manifest.bundle_name,
new_manifest.version)
@accepts(Self(), basestring)
def import_path(self, src_path):
sha = utils.sha1_for_path(src_path)
file_info = self._get_file_info(sha)
if file_info is not None:
return file_info
# gzip the file first, and see if it passes the compression threshhold
# TODO: this is stupid inefficient
with tempfile.NamedTemporaryFile() as tmp_file:
src_path_gz = tmp_file.name
with open(src_path) as src_file:
tmp_file.write(utils.gzip_bytes(src_file.read()))
tmp_file.flush()
src_size = os.path.getsize(src_path)
src_gz_size = os.path.getsize(src_path_gz)
if src_size > 0 and float(src_gz_size) / src_size <= self.config.gzip_threshhold:
final_src_path = src_path_gz
final_src_size = src_gz_size
format = Formats.GZ
else:
final_src_path = src_path
final_src_size = src_size
format = Formats.RAW
imported_path = self._write_file(sha, final_src_path, format=format)
file_info = {
'sha': sha,
'size': final_src_size,
'format': format
}
log.info("Imported %s --> %s" % (src_path, file_info))
log.debug("Imported path: %s" % imported_path)
return file_info
@_ensure_index_lock
@accepts(Self(), basestring, int)
def delete_bundle_version(self, bundle_name, version):
self.index.delete_bundle_version(bundle_name, version)
@_ensure_index_lock
@accepts(Self(), basestring, basestring, int, bool)
def update_distribution(self, distribution_name, bundle_name, bundle_version, save_previous=True):
if save_previous:
cur_version = self.index.version_for_bundle(bundle_name, distribution_name)
if cur_version is not None and cur_version != bundle_version:
prev_distro = helpers.distro_previous_name(distribution_name)
self.index.update_distribution(prev_distro, bundle_name, cur_version)
self.index.update_distribution(distribution_name, bundle_name, bundle_version)
@_ensure_index_lock
@accepts(Self(), basestring, basestring, bool)
def delete_distribution(self, distribution_name, bundle_name, delete_previous=True):
self.index.delete_distribution(distribution_name, bundle_name)
if delete_previous:
prev_distro = helpers.distro_previous_name(distribution_name)
self.index.delete_distribution(prev_distro, bundle_name)
def get_flavorspec_names(self):
subpath = self.path_helper.config_flavorspec_dir
return [os.path.splitext(p)[0] for p in self._storage.list(prefix=subpath)]
def get_flavorspec(self, flavorspec_name):
subpath = self._ph.path_for_flavorspec_name(flavorspec_name)
bytes = self._read(subpath)
return ZincFlavorSpec.from_bytes(bytes)
def update_flavorspec_from_json_string(self, name, json_string):
subpath = self._ph.path_for_flavorspec_name(name)
self._write(subpath, json_string, raw=True, gzip=False)
def update_flavorspec_from_path(self, src_path, name=None):
with open(src_path, 'r') as src_file:
json_string = src_file.read()
if name is None:
name = os.path.splitext(os.path.basename(src_path))[0]
self.update_flavorspec_from_json_string(name, json_string)
def delete_flavorspec(self, name):
subpath = self._ph.path_for_flavorspec_name(name)
self._storage.delete(subpath)
@_ensure_index_lock
def clean(self, dry_run=False):
verb = 'Would remove' if dry_run else 'Removing'
bundle_descriptors = self.bundle_descriptors()
### 1. scan manifests for ones that aren't in index
dir = self._ph.manifests_dir
for f in self._storage.list(dir):
remove = False
if not (f.endswith(".json") or f.endswith(".json.gz")):
# remove stray files
remove = True
else:
bundle_descr = f.split(".")[0]
if bundle_descr not in bundle_descriptors:
remove = True
if remove:
subpath = os.path.join(dir, f)
log.info("%s %s" % (verb, subpath))
if not dry_run:
self._storage.delete(subpath)
### 2. scan archives for ones that aren't in index
dir = self._ph.archives_dir
for f in self._storage.list(dir):
remove = False
if not (f.endswith(".tar")):
# remove stray files
remove = True
else:
bundle_descr = f.split(".")[0]
if bundle_descr not in bundle_descriptors:
remove = True
if remove:
subpath = os.path.join(dir, f)
log.info("%s %s" % (verb, subpath))
if not dry_run:
self._storage.delete(subpath)
### 3. clean objects
all_objects = set()
for bundle_desc in bundle_descriptors:
manifest = self.manifest_for_bundle_descriptor(bundle_desc)
for f, meta in manifest.files.iteritems():
all_objects.add(meta['sha'])
dir = self._ph.objects_dir
for path in self._storage.list(dir):
basename = os.path.basename(path)
obj = os.path.splitext(basename)[0]
if obj not in all_objects:
subpath = os.path.join(dir, path)
log.info("%s %s" % (verb, subpath))
if not dry_run:
self._storage.delete(subpath)
|
|
# encoding=utf8
import datetime
from distutils.version import StrictVersion
import hashlib
import os.path
import random
from seesaw.config import realize, NumberConfigValue
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.task import SimpleTask, LimitConcurrent
from seesaw.tracker import GetItemFromTracker, PrepareStatsForTracker, \
UploadWithTracker, SendDoneToTracker
import shutil
import socket
import subprocess
import sys
import time
import string
import seesaw
from seesaw.externalprocess import WgetDownload
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.util import find_executable
# check the seesaw version
if StrictVersion(seesaw.__version__) < StrictVersion("0.1.5"):
raise Exception("This pipeline needs seesaw version 0.1.5 or higher.")
###########################################################################
# Find a useful Wget+Lua executable.
#
# WGET_LUA will be set to the first path that
# 1. does not crash with --version, and
# 2. prints the required version string
WGET_LUA = find_executable(
"Wget+Lua",
["GNU Wget 1.14.lua.20130523-9a5c"],
[
"./wget-lua",
"./wget-lua-warrior",
"./wget-lua-local",
"../wget-lua",
"../../wget-lua",
"/home/warrior/wget-lua",
"/usr/bin/wget-lua"
]
)
if not WGET_LUA:
raise Exception("No usable Wget+Lua found.")
###########################################################################
# The version number of this pipeline definition.
#
# Update this each time you make a non-cosmetic change.
# It will be added to the WARC files and reported to the tracker.
VERSION = "20140928.02"
USER_AGENT = 'ArchiveTeam'
TRACKER_ID = 'verizon'
TRACKER_HOST = 'tracker.archiveteam.org'
###########################################################################
# This section defines project-specific tasks.
#
# Simple tasks (tasks that do not need any concurrency) are based on the
# SimpleTask class and have a process(item) method that is called for
# each item.
class CheckIP(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "CheckIP")
self._counter = 0
def process(self, item):
# NEW for 2014! Check if we are behind firewall/proxy
if self._counter <= 0:
item.log_output('Checking IP address.')
ip_set = set()
ip_set.add(socket.gethostbyname('twitter.com'))
ip_set.add(socket.gethostbyname('facebook.com'))
ip_set.add(socket.gethostbyname('youtube.com'))
ip_set.add(socket.gethostbyname('microsoft.com'))
ip_set.add(socket.gethostbyname('icanhas.cheezburger.com'))
ip_set.add(socket.gethostbyname('archiveteam.org'))
if len(ip_set) != 6:
item.log_output('Got IP addresses: {0}'.format(ip_set))
item.log_output(
'Are you behind a firewall/proxy? That is a big no-no!')
raise Exception(
'Are you behind a firewall/proxy? That is a big no-no!')
# Check only occasionally
if self._counter <= 0:
self._counter = 10
else:
self._counter -= 1
class PrepareDirectories(SimpleTask):
def __init__(self, warc_prefix):
SimpleTask.__init__(self, "PrepareDirectories")
self.warc_prefix = warc_prefix
def process(self, item):
item_name = item["item_name"]
escaped_item_name = item_name.replace(':', '_').replace('/', '_')
dirname = "/".join((item["data_dir"], escaped_item_name))
if os.path.isdir(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
item["item_dir"] = dirname
item["warc_file_base"] = "%s-%s-%s" % (self.warc_prefix, escaped_item_name,
time.strftime("%Y%m%d-%H%M%S"))
open("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "w").close()
class MoveFiles(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "MoveFiles")
def process(self, item):
# NEW for 2014! Check if wget was compiled with zlib support
if os.path.exists("%(item_dir)s/%(warc_file_base)s.warc"):
raise Exception('Please compile wget with zlib support!')
os.rename("%(item_dir)s/%(warc_file_base)s.warc.gz" % item,
"%(data_dir)s/%(warc_file_base)s.warc.gz" % item)
shutil.rmtree("%(item_dir)s" % item)
def get_hash(filename):
with open(filename, 'rb') as in_file:
return hashlib.sha1(in_file.read()).hexdigest()
CWD = os.getcwd()
PIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py'))
LUA_SHA1 = get_hash(os.path.join(CWD, 'verizon.lua'))
def stats_id_function(item):
# NEW for 2014! Some accountability hashes and stats.
d = {
'pipeline_hash': PIPELINE_SHA1,
'lua_hash': LUA_SHA1,
'python_version': sys.version,
}
return d
class WgetArgs(object):
def realize(self, item):
wget_args = [
WGET_LUA,
"-U", USER_AGENT,
"-nv",
"--lua-script", "verizon.lua",
"-o", ItemInterpolation("%(item_dir)s/wget.log"),
"--no-check-certificate",
"--output-document", ItemInterpolation("%(item_dir)s/wget.tmp"),
"--truncate-output",
"-e", "robots=off",
"--no-cookies",
"--rotate-dns",
"--recursive", "--level=inf",
"--no-parent",
"--page-requisites",
"--timeout", "30",
"--tries", "inf",
"--span-hosts",
"--waitretry", "30",
"--domains", "mysite.verizon.net,members.bellatlantic.net",
"--warc-file", ItemInterpolation("%(item_dir)s/%(warc_file_base)s"),
"--warc-header", "operator: Archive Team",
"--warc-header", "verizon-dld-script-version: " + VERSION,
"--warc-header", ItemInterpolation("verizon-user: %(item_name)s"),
]
item_name = item['item_name']
assert ':' in item_name
item_type, item_value = item_name.split(':', 1)
item['item_type'] = item_type
item['item_value'] = item_value
assert item_type in ('verizon', 'bellatlantic', 'bellatlantic36pack', 'verizon36pack', 'verizon1296pack', 'bellatlantic1296pack')
if item_type == 'verizon':
wget_args.append('http://mysite.verizon.net/{0}/'.format(item_value))
elif item_type == 'bellatlantic':
wget_args.append('http://members.bellatlantic.net/{0}/'.format(item_value))
elif item_type == 'bellatlantic36pack':
wget_args.append('http://members.bellatlantic.net/{0}0/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}1/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}2/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}3/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}4/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}5/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}6/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}7/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}8/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}9/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}a/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}b/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}c/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}d/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}e/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}f/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}g/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}h/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}i/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}j/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}k/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}l/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}m/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}n/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}o/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}p/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}q/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}r/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}s/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}t/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}u/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}v/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}w/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}x/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}y/'.format(item_value))
wget_args.append('http://members.bellatlantic.net/{0}z/'.format(item_value))
elif item_type == 'verizon36pack':
wget_args.append('http://mysite.verizon.net/{0}0/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}1/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}2/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}3/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}4/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}5/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}6/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}7/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}8/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}9/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}a/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}b/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}c/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}d/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}e/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}f/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}g/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}h/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}i/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}j/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}k/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}l/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}m/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}n/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}o/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}p/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}q/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}r/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}s/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}t/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}u/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}v/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}w/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}x/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}y/'.format(item_value))
wget_args.append('http://mysite.verizon.net/{0}z/'.format(item_value))
elif item_type == 'verizon1296pack':
suffixes = string.digits + string.lowercase
for args in [('http://mysite.verizon.net/{0}0{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}1{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}2{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}3{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}4{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}5{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}6{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}7{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}8{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}9{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}a{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}b{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}c{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}d{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}e{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}f{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}g{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}h{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}i{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}j{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}k{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}l{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}m{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}n{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}o{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}p{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}q{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}r{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}s{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}t{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}u{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}v{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}w{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}x{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}y{1}/'.format(item_value, s), \
'http://mysite.verizon.net/{0}z{1}/'.format(item_value, s)) for s in suffixes]:
wget_args.append(args[0])
wget_args.append(args[1])
wget_args.append(args[2])
wget_args.append(args[3])
wget_args.append(args[4])
wget_args.append(args[5])
wget_args.append(args[6])
wget_args.append(args[7])
wget_args.append(args[8])
wget_args.append(args[9])
wget_args.append(args[10])
wget_args.append(args[11])
wget_args.append(args[12])
wget_args.append(args[13])
wget_args.append(args[14])
wget_args.append(args[15])
wget_args.append(args[16])
wget_args.append(args[17])
wget_args.append(args[18])
wget_args.append(args[19])
wget_args.append(args[20])
wget_args.append(args[21])
wget_args.append(args[22])
wget_args.append(args[23])
wget_args.append(args[24])
wget_args.append(args[25])
wget_args.append(args[26])
wget_args.append(args[27])
wget_args.append(args[28])
wget_args.append(args[29])
wget_args.append(args[30])
wget_args.append(args[31])
wget_args.append(args[32])
wget_args.append(args[33])
wget_args.append(args[34])
wget_args.append(args[35])
elif item_type == 'bellatlantic1296pack':
suffixes = string.digits + string.lowercase
for args in [('http://members.bellatlantic.net/{0}0{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}1{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}2{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}3{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}4{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}5{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}6{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}7{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}8{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}9{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}a{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}b{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}c{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}d{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}e{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}f{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}g{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}h{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}i{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}j{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}k{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}l{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}m{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}n{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}o{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}p{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}q{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}r{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}s{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}t{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}u{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}v{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}w{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}x{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}y{1}/'.format(item_value, s), \
'http://members.bellatlantic.net/{0}z{1}/'.format(item_value, s)) for s in suffixes]:
wget_args.append(args[0])
wget_args.append(args[1])
wget_args.append(args[2])
wget_args.append(args[3])
wget_args.append(args[4])
wget_args.append(args[5])
wget_args.append(args[6])
wget_args.append(args[7])
wget_args.append(args[8])
wget_args.append(args[9])
wget_args.append(args[10])
wget_args.append(args[11])
wget_args.append(args[12])
wget_args.append(args[13])
wget_args.append(args[14])
wget_args.append(args[15])
wget_args.append(args[16])
wget_args.append(args[17])
wget_args.append(args[18])
wget_args.append(args[19])
wget_args.append(args[20])
wget_args.append(args[21])
wget_args.append(args[22])
wget_args.append(args[23])
wget_args.append(args[24])
wget_args.append(args[25])
wget_args.append(args[26])
wget_args.append(args[27])
wget_args.append(args[28])
wget_args.append(args[29])
wget_args.append(args[30])
wget_args.append(args[31])
wget_args.append(args[32])
wget_args.append(args[33])
wget_args.append(args[34])
wget_args.append(args[35])
else:
raise Exception('Unknown item')
if 'bind_address' in globals():
wget_args.extend(['--bind-address', globals()['bind_address']])
print('')
print('*** Wget will bind address at {0} ***'.format(
globals()['bind_address']))
print('')
return realize(wget_args, item)
###########################################################################
# Initialize the project.
#
# This will be shown in the warrior management panel. The logo should not
# be too big. The deadline is optional.
project = Project(
title="Verizon",
project_html="""
<img class="project-logo" alt="Project logo" src="http://archiveteam.org/images/thumb/b/bc/Verizon_Logo.png/320px-Verizon_Logo.png" height="50px" title=""/>
<h2>mysite.verizon.net <span class="links"><a href="http://mysite.verizon.net/">Website</a> · <a href="http://tracker.archiveteam.org/verizon/">Leaderboard</a></span></h2>
<h2>members.bellatlantic.net <span class="links"><a href="htp://members.bellatlantic.net/">Website</a> · <a href="http://tracker.archiveteam.org/verizon/">Leaderboard</a></span></h2>
<p>Archiving websites from mysite.verizon.net and members.bellatlantic.net.</p>
""",
utc_deadline=datetime.datetime(2014, 9, 30, 23, 59, 0)
)
pipeline = Pipeline(
CheckIP(),
GetItemFromTracker("http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader,
VERSION),
PrepareDirectories(warc_prefix="verizon"),
WgetDownload(
WgetArgs(),
max_tries=2,
accept_on_exit_code=[0, 4, 7, 8],
env={
"item_dir": ItemValue("item_dir"),
"item_value": ItemValue("item_value"),
"item_type": ItemValue("item_type"),
"downloader": downloader
}
),
PrepareStatsForTracker(
defaults={"downloader": downloader, "version": VERSION},
file_groups={
"data": [
ItemInterpolation("%(item_dir)s/%(warc_file_base)s.warc.gz")
]
},
id_function=stats_id_function,
),
MoveFiles(),
LimitConcurrent(NumberConfigValue(min=1, max=4, default="1",
name="shared:rsync_threads", title="Rsync threads",
description="The maximum number of concurrent uploads."),
UploadWithTracker(
"http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
downloader=downloader,
version=VERSION,
files=[
ItemInterpolation("%(data_dir)s/%(warc_file_base)s.warc.gz")
],
rsync_target_source_path=ItemInterpolation("%(data_dir)s/"),
rsync_extra_args=[
"--recursive",
"--partial",
"--partial-dir", ".rsync-tmp",
]
),
),
SendDoneToTracker(
tracker_url="http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
stats=ItemValue("stats")
)
)
|
|
# -*- coding: utf-8 -*-
"""Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <[email protected]>
__revision__ = "$Id: textwrap.py 9228 2008-12-13 04:50:49Z friedelwolff $"
import string, re
# Do the right thing with boolean values for all known Python versions
# (so this module can be copied to projects that don't depend on Python
# 2.3, e.g. Optik and Docutils).
try:
True, False
except NameError:
(True, False) = (1, 0)
__all__ = ['TextWrapper', 'wrap', 'fill']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
"""
whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace))
unicode_whitespace_trans = {}
uspace = ord(u' ')
for x in map(ord, _whitespace):
unicode_whitespace_trans[x] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'%|' # gettext handles % like whitespace
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[%s]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
% string.lowercase)
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
drop_whitespace=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.drop_whitespace = drop_whitespace
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
if isinstance(text, str):
text = text.translate(self.whitespace_trans)
elif isinstance(text, unicode):
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
"""
chunks = self.wordsep_re.split(text)
chunks = filter(None, chunks)
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
pat = self.sentence_end_re
while i < len(chunks)-1:
if chunks[i+1] == " " and pat.search(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
space_left = max(width - cur_len, 1)
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print dedent("Hello there.\n This is indented.")
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCrossConnectionsOperations(object):
"""ExpressRouteCrossConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteCrossConnectionListResult"]
"""Retrieves all the ExpressRouteCrossConnections in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCrossConnectionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_11_01.models.ExpressRouteCrossConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCrossConnections'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteCrossConnectionListResult"]
"""Retrieves all the ExpressRouteCrossConnections in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCrossConnectionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_11_01.models.ExpressRouteCrossConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections'} # type: ignore
def get(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCrossConnection"
"""Gets details about the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group (peering location of the circuit).
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection (service key of the
circuit).
:type cross_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCrossConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.ExpressRouteCrossConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
parameters, # type: "_models.ExpressRouteCrossConnection"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCrossConnection"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRouteCrossConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
parameters, # type: "_models.ExpressRouteCrossConnection"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCrossConnection"]
"""Update the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param parameters: Parameters supplied to the update express route crossConnection operation.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.ExpressRouteCrossConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCrossConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_11_01.models.ExpressRouteCrossConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
cross_connection_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCrossConnection"
"""Updates an express route cross connection tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the cross connection.
:type cross_connection_name: str
:param cross_connection_parameters: Parameters supplied to update express route cross
connection tags.
:type cross_connection_parameters: ~azure.mgmt.network.v2020_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCrossConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.ExpressRouteCrossConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(cross_connection_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore
def _list_arp_table_initial(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ExpressRouteCircuitsArpTableListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsArpTableListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._list_arp_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_arp_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
def begin_list_arp_table(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitsArpTableListResult"]
"""Gets the currently advertised ARP table associated with the express route cross connection in a
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitsArpTableListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_11_01.models.ExpressRouteCircuitsArpTableListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsArpTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_arp_table_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_arp_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
def _list_routes_table_summary_initial(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._list_routes_table_summary_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCrossConnectionsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_summary_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
def begin_list_routes_table_summary(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult"]
"""Gets the route table summary associated with the express route cross connection in a resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCrossConnectionsRoutesTableSummaryListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_11_01.models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_routes_table_summary_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table_summary.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
def _list_routes_table_initial(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._list_routes_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
def begin_list_routes_table(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitsRoutesTableListResult"]
"""Gets the currently advertised routes table associated with the express route cross connection
in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitsRoutesTableListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_11_01.models.ExpressRouteCircuitsRoutesTableListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsRoutesTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_routes_table_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
|
|
"""Implementation of :class:`Domain` class. """
from __future__ import print_function, division
from sympy.polys.domains.domainelement import DomainElement
from sympy.core import Basic, sympify
from sympy.core.compatibility import SYMPY_INTS, HAS_GMPY, integer_types, is_sequence
from sympy.polys.polyerrors import UnificationFailed, CoercionFailed, DomainError
from sympy.polys.orderings import lex
from sympy.polys.polyutils import _unify_gens
from sympy.utilities import default_sort_key, public
@public
class Domain(object):
"""Represents an abstract domain. """
dtype = None
zero = None
one = None
has_Ring = False
has_Field = False
has_assoc_Ring = False
has_assoc_Field = False
is_FiniteField = is_FF = False
is_IntegerRing = is_ZZ = False
is_RationalField = is_QQ = False
is_RealField = is_RR = False
is_ComplexField = is_CC = False
is_AlgebraicField = is_Algebraic = False
is_PolynomialRing = is_Poly = False
is_FractionField = is_Frac = False
is_SymbolicDomain = is_EX = False
is_Exact = True
is_Numerical = False
is_Simple = False
is_Composite = False
has_CharacteristicZero = False
rep = None
alias = None
def __init__(self):
raise NotImplementedError
def __str__(self):
return self.rep
def __repr__(self):
return str(self)
def __hash__(self):
return hash((self.__class__.__name__, self.dtype))
def new(self, *args):
return self.dtype(*args)
@property
def tp(self):
return self.dtype
def __call__(self, *args):
"""Construct an element of ``self`` domain from ``args``. """
return self.new(*args)
def normal(self, *args):
return self.dtype(*args)
def convert_from(self, element, base):
"""Convert ``element`` to ``self.dtype`` given the base domain. """
if base.alias is not None:
method = "from_" + base.alias
else:
method = "from_" + base.__class__.__name__
_convert = getattr(self, method)
if _convert is not None:
result = _convert(element, base)
if result is not None:
return result
raise CoercionFailed("can't convert %s of type %s from %s to %s" % (element, type(element), base, self))
def convert(self, element, base=None):
"""Convert ``element`` to ``self.dtype``. """
if base is not None:
return self.convert_from(element, base)
if self.of_type(element):
return element
from sympy.polys.domains import PythonIntegerRing, GMPYIntegerRing, GMPYRationalField, RealField, ComplexField
if isinstance(element, integer_types):
return self.convert_from(element, PythonIntegerRing())
if HAS_GMPY:
integers = GMPYIntegerRing()
if isinstance(element, integers.tp):
return self.convert_from(element, integers)
rationals = GMPYRationalField()
if isinstance(element, rationals.tp):
return self.convert_from(element, rationals)
if isinstance(element, float):
parent = RealField(tol=False)
return self.convert_from(parent(element), parent)
if isinstance(element, complex):
parent = ComplexField(tol=False)
return self.convert_from(parent(element), parent)
if isinstance(element, DomainElement):
return self.convert_from(element, element.parent())
# TODO: implement this in from_ methods
if self.is_Numerical and getattr(element, 'is_ground', False):
return self.convert(element.LC())
if isinstance(element, Basic):
try:
return self.from_sympy(element)
except (TypeError, ValueError):
pass
else: # TODO: remove this branch
if not is_sequence(element):
try:
element = sympify(element)
if isinstance(element, Basic):
return self.from_sympy(element)
except (TypeError, ValueError):
pass
raise CoercionFailed("can't convert %s of type %s to %s" % (element, type(element), self))
def of_type(self, element):
"""Check if ``a`` is of type ``dtype``. """
return isinstance(element, self.tp) # XXX: this isn't correct, e.g. PolyElement
def __contains__(self, a):
"""Check if ``a`` belongs to this domain. """
try:
self.convert(a)
except CoercionFailed:
return False
return True
def to_sympy(self, a):
"""Convert ``a`` to a SymPy object. """
raise NotImplementedError
def from_sympy(self, a):
"""Convert a SymPy object to ``dtype``. """
raise NotImplementedError
def from_FF_python(K1, a, K0):
"""Convert ``ModularInteger(int)`` to ``dtype``. """
return None
def from_ZZ_python(K1, a, K0):
"""Convert a Python ``int`` object to ``dtype``. """
return None
def from_QQ_python(K1, a, K0):
"""Convert a Python ``Fraction`` object to ``dtype``. """
return None
def from_FF_gmpy(K1, a, K0):
"""Convert ``ModularInteger(mpz)`` to ``dtype``. """
return None
def from_ZZ_gmpy(K1, a, K0):
"""Convert a GMPY ``mpz`` object to ``dtype``. """
return None
def from_QQ_gmpy(K1, a, K0):
"""Convert a GMPY ``mpq`` object to ``dtype``. """
return None
def from_RealField(K1, a, K0):
"""Convert a real element object to ``dtype``. """
return None
def from_ComplexField(K1, a, K0):
"""Convert a complex element to ``dtype``. """
return None
def from_AlgebraicField(K1, a, K0):
"""Convert an algebraic number to ``dtype``. """
return None
def from_PolynomialRing(K1, a, K0):
"""Convert a polynomial to ``dtype``. """
if a.is_ground:
return K1.convert(a.LC, K0.dom)
def from_FractionField(K1, a, K0):
"""Convert a rational function to ``dtype``. """
return None
def from_ExpressionDomain(K1, a, K0):
"""Convert a ``EX`` object to ``dtype``. """
return K1.from_sympy(a.ex)
def from_GlobalPolynomialRing(K1, a, K0):
"""Convert a polynomial to ``dtype``. """
if a.degree() <= 0:
return K1.convert(a.LC(), K0.dom)
def from_GeneralizedPolynomialRing(K1, a, K0):
return K1.from_FractionField(a, K0)
def unify_with_symbols(K0, K1, symbols):
if (K0.is_Composite and (set(K0.symbols) & set(symbols))) or (K1.is_Composite and (set(K1.symbols) & set(symbols))):
raise UnificationFailed("can't unify %s with %s, given %s generators" % (K0, K1, tuple(symbols)))
return K0.unify(K1)
def unify(K0, K1, symbols=None):
"""
Construct a minimal domain that contains elements of ``K0`` and ``K1``.
Known domains (from smallest to largest):
- ``GF(p)``
- ``ZZ``
- ``QQ``
- ``RR(prec, tol)``
- ``CC(prec, tol)``
- ``ALG(a, b, c)``
- ``K[x, y, z]``
- ``K(x, y, z)``
- ``EX``
"""
if symbols is not None:
return K0.unify_with_symbols(K1, symbols)
if K0 == K1:
return K0
if K0.is_EX:
return K0
if K1.is_EX:
return K1
if K0.is_Composite or K1.is_Composite:
K0_ground = K0.dom if K0.is_Composite else K0
K1_ground = K1.dom if K1.is_Composite else K1
K0_symbols = K0.symbols if K0.is_Composite else ()
K1_symbols = K1.symbols if K1.is_Composite else ()
domain = K0_ground.unify(K1_ground)
symbols = _unify_gens(K0_symbols, K1_symbols)
order = K0.order if K0.is_Composite else K1.order
if ((K0.is_FractionField and K1.is_PolynomialRing or
K1.is_FractionField and K0.is_PolynomialRing) and
(not K0_ground.has_Field or not K1_ground.has_Field) and domain.has_Field):
domain = domain.get_ring()
if K0.is_Composite and (not K1.is_Composite or K0.is_FractionField or K1.is_PolynomialRing):
cls = K0.__class__
else:
cls = K1.__class__
return cls(domain, symbols, order)
def mkinexact(cls, K0, K1):
prec = max(K0.precision, K1.precision)
tol = max(K0.tolerance, K1.tolerance)
return cls(prec=prec, tol=tol)
if K0.is_ComplexField and K1.is_ComplexField:
return mkinexact(K0.__class__, K0, K1)
if K0.is_ComplexField and K1.is_RealField:
return mkinexact(K0.__class__, K0, K1)
if K0.is_RealField and K1.is_ComplexField:
return mkinexact(K1.__class__, K1, K0)
if K0.is_RealField and K1.is_RealField:
return mkinexact(K0.__class__, K0, K1)
if K0.is_ComplexField or K0.is_RealField:
return K0
if K1.is_ComplexField or K1.is_RealField:
return K1
if K0.is_AlgebraicField and K1.is_AlgebraicField:
return K0.__class__(K0.dom.unify(K1.dom), *_unify_gens(K0.orig_ext, K1.orig_ext))
elif K0.is_AlgebraicField:
return K0
elif K1.is_AlgebraicField:
return K1
if K0.is_RationalField:
return K0
if K1.is_RationalField:
return K1
if K0.is_IntegerRing:
return K0
if K1.is_IntegerRing:
return K1
if K0.is_FiniteField and K1.is_FiniteField:
return K0.__class__(max(K0.mod, K1.mod, key=default_sort_key))
from sympy.polys.domains import EX
return EX
def __eq__(self, other):
"""Returns ``True`` if two domains are equivalent. """
return isinstance(other, Domain) and self.dtype == other.dtype
def __ne__(self, other):
"""Returns ``False`` if two domains are equivalent. """
return not self.__eq__(other)
def map(self, seq):
"""Rersively apply ``self`` to all elements of ``seq``. """
result = []
for elt in seq:
if isinstance(elt, list):
result.append(self.map(elt))
else:
result.append(self(elt))
return result
def get_ring(self):
"""Returns a ring associated with ``self``. """
raise DomainError('there is no ring associated with %s' % self)
def get_field(self):
"""Returns a field associated with ``self``. """
raise DomainError('there is no field associated with %s' % self)
def get_exact(self):
"""Returns an exact domain associated with ``self``. """
return self
def __getitem__(self, symbols):
"""The mathematical way to make a polynomial ring. """
if hasattr(symbols, '__iter__'):
return self.poly_ring(*symbols)
else:
return self.poly_ring(symbols)
def poly_ring(self, *symbols, **kwargs):
"""Returns a polynomial ring, i.e. `K[X]`. """
from sympy.polys.domains.polynomialring import PolynomialRing
return PolynomialRing(self, symbols, kwargs.get("order", lex))
def frac_field(self, *symbols, **kwargs):
"""Returns a fraction field, i.e. `K(X)`. """
from sympy.polys.domains.fractionfield import FractionField
return FractionField(self, symbols, kwargs.get("order", lex))
def old_poly_ring(self, *symbols, **kwargs):
"""Returns a polynomial ring, i.e. `K[X]`. """
from sympy.polys.domains.old_polynomialring import PolynomialRing
return PolynomialRing(self, *symbols, **kwargs)
def old_frac_field(self, *symbols, **kwargs):
"""Returns a fraction field, i.e. `K(X)`. """
from sympy.polys.domains.old_fractionfield import FractionField
return FractionField(self, *symbols, **kwargs)
def algebraic_field(self, *extension):
"""Returns an algebraic field, i.e. `K(\\alpha, \dots)`. """
raise DomainError("can't create algebraic field over %s" % self)
def inject(self, *symbols):
"""Inject generators into this domain. """
raise NotImplementedError
def is_zero(self, a):
"""Returns True if ``a`` is zero. """
return not a
def is_one(self, a):
"""Returns True if ``a`` is one. """
return a == self.one
def is_positive(self, a):
"""Returns True if ``a`` is positive. """
return a > 0
def is_negative(self, a):
"""Returns True if ``a`` is negative. """
return a < 0
def is_nonpositive(self, a):
"""Returns True if ``a`` is non-positive. """
return a <= 0
def is_nonnegative(self, a):
"""Returns True if ``a`` is non-negative. """
return a >= 0
def abs(self, a):
"""Absolute value of ``a``, implies ``__abs__``. """
return abs(a)
def neg(self, a):
"""Returns ``a`` negated, implies ``__neg__``. """
return -a
def pos(self, a):
"""Returns ``a`` positive, implies ``__pos__``. """
return +a
def add(self, a, b):
"""Sum of ``a`` and ``b``, implies ``__add__``. """
return a + b
def sub(self, a, b):
"""Difference of ``a`` and ``b``, implies ``__sub__``. """
return a - b
def mul(self, a, b):
"""Product of ``a`` and ``b``, implies ``__mul__``. """
return a * b
def pow(self, a, b):
"""Raise ``a`` to power ``b``, implies ``__pow__``. """
return a ** b
def exquo(self, a, b):
"""Exact quotient of ``a`` and ``b``, implies something. """
raise NotImplementedError
def quo(self, a, b):
"""Quotient of ``a`` and ``b``, implies something. """
raise NotImplementedError
def rem(self, a, b):
"""Remainder of ``a`` and ``b``, implies ``__mod__``. """
raise NotImplementedError
def div(self, a, b):
"""Division of ``a`` and ``b``, implies something. """
raise NotImplementedError
def invert(self, a, b):
"""Returns inversion of ``a mod b``, implies something. """
raise NotImplementedError
def revert(self, a):
"""Returns ``a**(-1)`` if possible. """
raise NotImplementedError
def numer(self, a):
"""Returns numerator of ``a``. """
raise NotImplementedError
def denom(self, a):
"""Returns denominator of ``a``. """
raise NotImplementedError
def half_gcdex(self, a, b):
"""Half extended GCD of ``a`` and ``b``. """
s, t, h = self.gcdex(a, b)
return s, h
def gcdex(self, a, b):
"""Extended GCD of ``a`` and ``b``. """
raise NotImplementedError
def cofactors(self, a, b):
"""Returns GCD and cofactors of ``a`` and ``b``. """
gcd = self.gcd(a, b)
cfa = self.quo(a, gcd)
cfb = self.quo(b, gcd)
return gcd, cfa, cfb
def gcd(self, a, b):
"""Returns GCD of ``a`` and ``b``. """
raise NotImplementedError
def lcm(self, a, b):
"""Returns LCM of ``a`` and ``b``. """
raise NotImplementedError
def log(self, a, b):
"""Returns b-base logarithm of ``a``. """
raise NotImplementedError
def sqrt(self, a):
"""Returns square root of ``a``. """
raise NotImplementedError
def evalf(self, a, prec=None, **args):
"""Returns numerical approximation of ``a``. """
if prec is None:
return self.to_sympy(a).evalf(**args)
else:
return self.to_sympy(a).evalf(prec, **args)
n = evalf
def real(self, a):
return a
def imag(self, a):
return self.zero
def almosteq(self, a, b, tolerance=None):
"""Check if ``a`` and ``b`` are almost equal. """
return a == b
def characteristic(self):
"""Return the characteristic of this domain. """
raise NotImplementedError('characteristic()')
|
|
import warnings
import numpy as np
import pandas as pd
from tlz import partition
from ._compat import PANDAS_GT_131
# preserve compatibility while moving dispatch objects
from .dispatch import ( # noqa: F401
concat,
concat_dispatch,
group_split_dispatch,
hash_object_dispatch,
is_categorical_dtype,
is_categorical_dtype_dispatch,
tolist,
tolist_dispatch,
union_categoricals,
)
from .utils import is_dataframe_like, is_index_like, is_series_like
# cuDF may try to import old dispatch functions
hash_df = hash_object_dispatch
group_split = group_split_dispatch
# ---------------------------------
# indexing
# ---------------------------------
def loc(df, iindexer, cindexer=None):
"""
.loc for known divisions
"""
if cindexer is None:
return df.loc[iindexer]
else:
return df.loc[iindexer, cindexer]
def iloc(df, cindexer=None):
return df.iloc[:, cindexer]
def try_loc(df, iindexer, cindexer=None):
"""
.loc for unknown divisions
"""
try:
return loc(df, iindexer, cindexer)
except KeyError:
return df.head(0).loc[:, cindexer]
def boundary_slice(df, start, stop, right_boundary=True, left_boundary=True, kind=None):
"""Index slice start/stop. Can switch include/exclude boundaries.
Examples
--------
>>> df = pd.DataFrame({'x': [10, 20, 30, 40, 50]}, index=[1, 2, 2, 3, 4])
>>> boundary_slice(df, 2, None)
x
2 20
2 30
3 40
4 50
>>> boundary_slice(df, 1, 3)
x
1 10
2 20
2 30
3 40
>>> boundary_slice(df, 1, 3, right_boundary=False)
x
1 10
2 20
2 30
Empty input DataFrames are returned
>>> df_empty = pd.DataFrame()
>>> boundary_slice(df_empty, 1, 3)
Empty DataFrame
Columns: []
Index: []
"""
if len(df.index) == 0:
return df
if PANDAS_GT_131:
if kind is not None:
warnings.warn(
"The `kind` argument is no longer used/supported. "
"It will be dropped in a future release.",
category=FutureWarning,
)
kind_opts = {}
kind = "loc"
else:
kind = kind or "loc"
kind_opts = {"kind": kind}
if kind == "loc" and not df.index.is_monotonic_increasing:
# Pandas treats missing keys differently for label-slicing
# on monotonic vs. non-monotonic indexes
# If the index is monotonic, `df.loc[start:stop]` is fine.
# If it's not, `df.loc[start:stop]` raises when `start` is missing
if start is not None:
if left_boundary:
df = df[df.index >= start]
else:
df = df[df.index > start]
if stop is not None:
if right_boundary:
df = df[df.index <= stop]
else:
df = df[df.index < stop]
return df
result = getattr(df, kind)[start:stop]
if not right_boundary and stop is not None:
right_index = result.index.get_slice_bound(stop, "left", **kind_opts)
result = result.iloc[:right_index]
if not left_boundary and start is not None:
left_index = result.index.get_slice_bound(start, "right", **kind_opts)
result = result.iloc[left_index:]
return result
def index_count(x):
# Workaround since Index doesn't implement `.count`
return pd.notnull(x).sum()
def mean_aggregate(s, n):
try:
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
return s / n
except ZeroDivisionError:
return np.float64(np.nan)
def wrap_var_reduction(array_var, index):
if isinstance(array_var, np.ndarray) or isinstance(array_var, list):
return pd.Series(array_var, index=index)
return array_var
def wrap_skew_reduction(array_skew, index):
if isinstance(array_skew, np.ndarray) or isinstance(array_skew, list):
return pd.Series(array_skew, index=index)
return array_skew
def wrap_kurtosis_reduction(array_kurtosis, index):
if isinstance(array_kurtosis, np.ndarray) or isinstance(array_kurtosis, list):
return pd.Series(array_kurtosis, index=index)
return array_kurtosis
def var_mixed_concat(numeric_var, timedelta_var, columns):
vars = pd.concat([numeric_var, timedelta_var])
return vars.reindex(index=columns)
def describe_aggregate(values):
assert len(values) > 0
# arrange categorical and numeric stats
names = []
values_indexes = sorted((x.index for x in values), key=len)
for idxnames in values_indexes:
for name in idxnames:
if name not in names:
names.append(name)
return pd.concat(values, axis=1, sort=False).reindex(names)
def describe_numeric_aggregate(
stats, name=None, is_timedelta_col=False, is_datetime_col=False
):
assert len(stats) == 6
count, mean, std, min, q, max = stats
if is_series_like(count):
typ = type(count.to_frame())
else:
typ = type(q)
if is_timedelta_col:
mean = pd.to_timedelta(mean)
std = pd.to_timedelta(std)
min = pd.to_timedelta(min)
max = pd.to_timedelta(max)
q = q.apply(lambda x: pd.to_timedelta(x))
if is_datetime_col:
# mean is not implemented for datetime
min = pd.to_datetime(min)
max = pd.to_datetime(max)
q = q.apply(lambda x: pd.to_datetime(x))
if is_datetime_col:
part1 = typ([count, min], index=["count", "min"])
else:
part1 = typ([count, mean, std, min], index=["count", "mean", "std", "min"])
q.index = [f"{l * 100:g}%" for l in tolist(q.index)]
if is_series_like(q) and typ != type(q):
q = q.to_frame()
part3 = typ([max], index=["max"])
result = concat([part1, q, part3], sort=False)
if is_series_like(result):
result.name = name
return result
def describe_nonnumeric_aggregate(stats, name):
args_len = len(stats)
is_datetime_column = args_len == 5
is_categorical_column = args_len == 3
assert is_datetime_column or is_categorical_column
if is_categorical_column:
nunique, count, top_freq = stats
else:
nunique, count, top_freq, min_ts, max_ts = stats
# input was empty dataframe/series
if len(top_freq) == 0:
data = [0, 0]
index = ["count", "unique"]
dtype = None
data.extend([None, None])
index.extend(["top", "freq"])
dtype = object
result = pd.Series(data, index=index, dtype=dtype, name=name)
return result
top = top_freq.index[0]
freq = top_freq.iloc[0]
index = ["unique", "count", "top", "freq"]
values = [nunique, count]
if is_datetime_column:
tz = top.tz
top = pd.Timestamp(top)
if top.tzinfo is not None and tz is not None:
# Don't tz_localize(None) if key is already tz-aware
top = top.tz_convert(tz)
else:
top = top.tz_localize(tz)
first = pd.Timestamp(min_ts, tz=tz)
last = pd.Timestamp(max_ts, tz=tz)
index.extend(["first", "last"])
values.extend([top, freq, first, last])
else:
values.extend([top, freq])
return pd.Series(values, index=index, name=name)
def _cum_aggregate_apply(aggregate, x, y):
"""Apply aggregation function within a cumulative aggregation
Parameters
----------
aggregate: function (a, a) -> a
The aggregation function, like add, which is used to and subsequent
results
x:
y:
"""
if y is None:
return x
else:
return aggregate(x, y)
def cumsum_aggregate(x, y):
if x is None:
return y
elif y is None:
return x
else:
return x + y
def cumprod_aggregate(x, y):
if x is None:
return y
elif y is None:
return x
else:
return x * y
def cummin_aggregate(x, y):
if is_series_like(x) or is_dataframe_like(x):
return x.where((x < y) | x.isnull(), y, axis=x.ndim - 1)
else: # scalar
return x if x < y else y
def cummax_aggregate(x, y):
if is_series_like(x) or is_dataframe_like(x):
return x.where((x > y) | x.isnull(), y, axis=x.ndim - 1)
else: # scalar
return x if x > y else y
def assign(df, *pairs):
# Only deep copy when updating an element
# (to avoid modifying the original)
pairs = dict(partition(2, pairs))
deep = bool(set(pairs) & set(df.columns))
df = df.copy(deep=bool(deep))
for name, val in pairs.items():
df[name] = val
return df
def unique(x, series_name=None):
out = x.unique()
# out can be either an np.ndarray or may already be a series
# like object. When out is an np.ndarray, it must be wrapped.
if not (is_series_like(out) or is_index_like(out)):
out = pd.Series(out, name=series_name)
return out
def value_counts_combine(x, sort=True, ascending=False, **groupby_kwargs):
# sort and ascending don't actually matter until the agg step
return x.groupby(level=0, **groupby_kwargs).sum()
def value_counts_aggregate(
x, sort=True, ascending=False, normalize=False, total_length=None, **groupby_kwargs
):
out = value_counts_combine(x, **groupby_kwargs)
if normalize:
out /= total_length if total_length is not None else out.sum()
if sort:
return out.sort_values(ascending=ascending)
return out
def nbytes(x):
return x.nbytes
def size(x):
return x.size
def values(df):
return df.values
def sample(df, state, frac, replace):
rs = np.random.RandomState(state)
return df.sample(random_state=rs, frac=frac, replace=replace) if len(df) > 0 else df
def drop_columns(df, columns, dtype):
df = df.drop(columns, axis=1)
df.columns = df.columns.astype(dtype)
return df
def fillna_check(df, method, check=True):
out = df.fillna(method=method)
if check and out.isnull().values.all(axis=0).any():
raise ValueError(
"All NaN partition encountered in `fillna`. Try "
"using ``df.repartition`` to increase the partition "
"size, or specify `limit` in `fillna`."
)
return out
# ---------------------------------
# reshape
# ---------------------------------
def pivot_agg(df):
return df.groupby(level=0).sum()
def pivot_agg_first(df):
return df.groupby(level=0).first()
def pivot_agg_last(df):
return df.groupby(level=0).last()
def pivot_sum(df, index, columns, values):
return pd.pivot_table(
df, index=index, columns=columns, values=values, aggfunc="sum", dropna=False
)
def pivot_count(df, index, columns, values):
# we cannot determine dtype until concatenationg all partitions.
# make dtype deterministic, always coerce to np.float64
return pd.pivot_table(
df, index=index, columns=columns, values=values, aggfunc="count", dropna=False
).astype(np.float64)
def pivot_first(df, index, columns, values):
return pd.pivot_table(
df, index=index, columns=columns, values=values, aggfunc="first", dropna=False
)
def pivot_last(df, index, columns, values):
return pd.pivot_table(
df, index=index, columns=columns, values=values, aggfunc="last", dropna=False
)
def assign_index(df, ind):
df = df.copy()
df.index = ind
return df
def monotonic_increasing_chunk(x):
data = x if is_index_like(x) else x.iloc
return pd.DataFrame(
data=[[x.is_monotonic_increasing, data[0], data[-1]]],
columns=["monotonic", "first", "last"],
)
def monotonic_increasing_aggregate(concatenated):
bounds_are_monotonic = pd.Series(
concatenated[["first", "last"]].to_numpy().ravel()
).is_monotonic_increasing
return concatenated["monotonic"].all() and bounds_are_monotonic
def monotonic_decreasing_chunk(x):
data = x if is_index_like(x) else x.iloc
return pd.DataFrame(
data=[[x.is_monotonic_decreasing, data[0], data[-1]]],
columns=["monotonic", "first", "last"],
)
def monotonic_decreasing_aggregate(concatenated):
bounds_are_monotonic = pd.Series(
concatenated[["first", "last"]].to_numpy().ravel()
).is_monotonic_decreasing
return concatenated["monotonic"].all() and bounds_are_monotonic
|
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for preservation of unknown fields in the pure Python implementation."""
__author__ = '[email protected] (Bohdan Koval)'
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import unittest_proto3_arena_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import encoder
from google.protobuf.internal import message_set_extensions_pb2
from google.protobuf.internal import missing_enum_values_pb2
from google.protobuf.internal import test_util
from google.protobuf.internal import type_checkers
def SkipIfCppImplementation(func):
return unittest.skipIf(
api_implementation.Type() == 'cpp' and api_implementation.Version() == 2,
'C++ implementation does not expose unknown fields to Python')(func)
class UnknownFieldsTest(unittest.TestCase):
def setUp(self):
self.descriptor = unittest_pb2.TestAllTypes.DESCRIPTOR
self.all_fields = unittest_pb2.TestAllTypes()
test_util.SetAllFields(self.all_fields)
self.all_fields_data = self.all_fields.SerializeToString()
self.empty_message = unittest_pb2.TestEmptyMessage()
self.empty_message.ParseFromString(self.all_fields_data)
def testSerialize(self):
data = self.empty_message.SerializeToString()
# Don't use assertEqual because we don't want to dump raw binary data to
# stdout.
self.assertTrue(data == self.all_fields_data)
def testSerializeProto3(self):
# Verify that proto3 doesn't preserve unknown fields.
message = unittest_proto3_arena_pb2.TestEmptyMessage()
message.ParseFromString(self.all_fields_data)
self.assertEqual(0, len(message.SerializeToString()))
def testByteSize(self):
self.assertEqual(self.all_fields.ByteSize(), self.empty_message.ByteSize())
def testListFields(self):
# Make sure ListFields doesn't return unknown fields.
self.assertEqual(0, len(self.empty_message.ListFields()))
def testSerializeMessageSetWireFormatUnknownExtension(self):
# Create a message using the message set wire format with an unknown
# message.
raw = unittest_mset_pb2.RawMessageSet()
# Add an unknown extension.
item = raw.item.add()
item.type_id = 98418603
message1 = message_set_extensions_pb2.TestMessageSetExtension1()
message1.i = 12345
item.message = message1.SerializeToString()
serialized = raw.SerializeToString()
# Parse message using the message set wire format.
proto = message_set_extensions_pb2.TestMessageSet()
proto.MergeFromString(serialized)
# Verify that the unknown extension is serialized unchanged
reserialized = proto.SerializeToString()
new_raw = unittest_mset_pb2.RawMessageSet()
new_raw.MergeFromString(reserialized)
self.assertEqual(raw, new_raw)
def testEquals(self):
message = unittest_pb2.TestEmptyMessage()
message.ParseFromString(self.all_fields_data)
self.assertEqual(self.empty_message, message)
self.all_fields.ClearField('optional_string')
message.ParseFromString(self.all_fields.SerializeToString())
self.assertNotEqual(self.empty_message, message)
def testDiscardUnknownFields(self):
self.empty_message.DiscardUnknownFields()
self.assertEqual(b'', self.empty_message.SerializeToString())
# Test message field and repeated message field.
message = unittest_pb2.TestAllTypes()
other_message = unittest_pb2.TestAllTypes()
other_message.optional_string = 'discard'
message.optional_nested_message.ParseFromString(
other_message.SerializeToString())
message.repeated_nested_message.add().ParseFromString(
other_message.SerializeToString())
self.assertNotEqual(
b'', message.optional_nested_message.SerializeToString())
self.assertNotEqual(
b'', message.repeated_nested_message[0].SerializeToString())
message.DiscardUnknownFields()
self.assertEqual(b'', message.optional_nested_message.SerializeToString())
self.assertEqual(
b'', message.repeated_nested_message[0].SerializeToString())
class UnknownFieldsAccessorsTest(unittest.TestCase):
def setUp(self):
self.descriptor = unittest_pb2.TestAllTypes.DESCRIPTOR
self.all_fields = unittest_pb2.TestAllTypes()
test_util.SetAllFields(self.all_fields)
self.all_fields_data = self.all_fields.SerializeToString()
self.empty_message = unittest_pb2.TestEmptyMessage()
self.empty_message.ParseFromString(self.all_fields_data)
if api_implementation.Type() != 'cpp':
# _unknown_fields is an implementation detail.
self.unknown_fields = self.empty_message._unknown_fields
# All the tests that use GetField() check an implementation detail of the
# Python implementation, which stores unknown fields as serialized strings.
# These tests are skipped by the C++ implementation: it's enough to check that
# the message is correctly serialized.
def GetField(self, name):
field_descriptor = self.descriptor.fields_by_name[name]
wire_type = type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type]
field_tag = encoder.TagBytes(field_descriptor.number, wire_type)
result_dict = {}
for tag_bytes, value in self.unknown_fields:
if tag_bytes == field_tag:
decoder = unittest_pb2.TestAllTypes._decoders_by_tag[tag_bytes][0]
decoder(value, 0, len(value), self.all_fields, result_dict)
return result_dict[field_descriptor]
@SkipIfCppImplementation
def testEnum(self):
value = self.GetField('optional_nested_enum')
self.assertEqual(self.all_fields.optional_nested_enum, value)
@SkipIfCppImplementation
def testRepeatedEnum(self):
value = self.GetField('repeated_nested_enum')
self.assertEqual(self.all_fields.repeated_nested_enum, value)
@SkipIfCppImplementation
def testVarint(self):
value = self.GetField('optional_int32')
self.assertEqual(self.all_fields.optional_int32, value)
@SkipIfCppImplementation
def testFixed32(self):
value = self.GetField('optional_fixed32')
self.assertEqual(self.all_fields.optional_fixed32, value)
@SkipIfCppImplementation
def testFixed64(self):
value = self.GetField('optional_fixed64')
self.assertEqual(self.all_fields.optional_fixed64, value)
@SkipIfCppImplementation
def testLengthDelimited(self):
value = self.GetField('optional_string')
self.assertEqual(self.all_fields.optional_string, value)
@SkipIfCppImplementation
def testGroup(self):
value = self.GetField('optionalgroup')
self.assertEqual(self.all_fields.optionalgroup, value)
def testCopyFrom(self):
message = unittest_pb2.TestEmptyMessage()
message.CopyFrom(self.empty_message)
self.assertEqual(message.SerializeToString(), self.all_fields_data)
def testMergeFrom(self):
message = unittest_pb2.TestAllTypes()
message.optional_int32 = 1
message.optional_uint32 = 2
source = unittest_pb2.TestEmptyMessage()
source.ParseFromString(message.SerializeToString())
message.ClearField('optional_int32')
message.optional_int64 = 3
message.optional_uint32 = 4
destination = unittest_pb2.TestEmptyMessage()
destination.ParseFromString(message.SerializeToString())
destination.MergeFrom(source)
# Check that the fields where correctly merged, even stored in the unknown
# fields set.
message.ParseFromString(destination.SerializeToString())
self.assertEqual(message.optional_int32, 1)
self.assertEqual(message.optional_uint32, 2)
self.assertEqual(message.optional_int64, 3)
def testClear(self):
self.empty_message.Clear()
# All cleared, even unknown fields.
self.assertEqual(self.empty_message.SerializeToString(), b'')
def testUnknownExtensions(self):
message = unittest_pb2.TestEmptyMessageWithExtensions()
message.ParseFromString(self.all_fields_data)
self.assertEqual(message.SerializeToString(), self.all_fields_data)
class UnknownEnumValuesTest(unittest.TestCase):
def setUp(self):
self.descriptor = missing_enum_values_pb2.TestEnumValues.DESCRIPTOR
self.message = missing_enum_values_pb2.TestEnumValues()
self.message.optional_nested_enum = (
missing_enum_values_pb2.TestEnumValues.ZERO)
self.message.repeated_nested_enum.extend([
missing_enum_values_pb2.TestEnumValues.ZERO,
missing_enum_values_pb2.TestEnumValues.ONE,
])
self.message.packed_nested_enum.extend([
missing_enum_values_pb2.TestEnumValues.ZERO,
missing_enum_values_pb2.TestEnumValues.ONE,
])
self.message_data = self.message.SerializeToString()
self.missing_message = missing_enum_values_pb2.TestMissingEnumValues()
self.missing_message.ParseFromString(self.message_data)
if api_implementation.Type() != 'cpp':
# _unknown_fields is an implementation detail.
self.unknown_fields = self.missing_message._unknown_fields
# All the tests that use GetField() check an implementation detail of the
# Python implementation, which stores unknown fields as serialized strings.
# These tests are skipped by the C++ implementation: it's enough to check that
# the message is correctly serialized.
def GetField(self, name):
field_descriptor = self.descriptor.fields_by_name[name]
wire_type = type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type]
field_tag = encoder.TagBytes(field_descriptor.number, wire_type)
result_dict = {}
for tag_bytes, value in self.unknown_fields:
if tag_bytes == field_tag:
decoder = missing_enum_values_pb2.TestEnumValues._decoders_by_tag[
tag_bytes][0]
decoder(value, 0, len(value), self.message, result_dict)
return result_dict[field_descriptor]
def testUnknownParseMismatchEnumValue(self):
just_string = missing_enum_values_pb2.JustString()
just_string.dummy = 'blah'
missing = missing_enum_values_pb2.TestEnumValues()
# The parse is invalid, storing the string proto into the set of
# unknown fields.
missing.ParseFromString(just_string.SerializeToString())
# Fetching the enum field shouldn't crash, instead returning the
# default value.
self.assertEqual(missing.optional_nested_enum, 0)
@SkipIfCppImplementation
def testUnknownEnumValue(self):
self.assertFalse(self.missing_message.HasField('optional_nested_enum'))
value = self.GetField('optional_nested_enum')
self.assertEqual(self.message.optional_nested_enum, value)
@SkipIfCppImplementation
def testUnknownRepeatedEnumValue(self):
value = self.GetField('repeated_nested_enum')
self.assertEqual(self.message.repeated_nested_enum, value)
@SkipIfCppImplementation
def testUnknownPackedEnumValue(self):
value = self.GetField('packed_nested_enum')
self.assertEqual(self.message.packed_nested_enum, value)
def testRoundTrip(self):
new_message = missing_enum_values_pb2.TestEnumValues()
new_message.ParseFromString(self.missing_message.SerializeToString())
self.assertEqual(self.message, new_message)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import *
from test_framework.mininode import *
def build_block_on_tip(node = None, txs = None, prev_height = None, prev_hash = None, prev_mtp = None):
prev_height = prev_height or node.getblockcount()
prev_hash = prev_hash or node.getbestblockhash()
prev_mtp = prev_mtp or node.getblockheader(prev_hash)['mediantime']
new_height = prev_height + 1
new_mtp = prev_mtp + 1
block = create_block(int(prev_hash, 16), create_coinbase(absoluteHeight = new_height), new_mtp)
block.nVersion = 4
if txs is not None:
for tx in txs:
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return { "block" : block, "height" : new_height, "hash" : block.hash, "mtp" : new_mtp }
def assert_tip_is(sha256, xt_node, test_node):
test_node.sync_with_ping()
assert_equal(int(xt_node.getbestblockhash(), 16), sha256)
def create_utxos(test_node, xt_node, num_utxos):
# Generate 100 blocks, so we get enough coinbase depth on first
blocks = [ build_block_on_tip(xt_node) ]
for _ in range(100):
prev = blocks[-1]
blocks.append(build_block_on_tip(
prev_height = prev["height"],
prev_hash = prev["hash"],
prev_mtp = prev["mtp"]))
for b in blocks:
test_node.send_message(msg_block(b["block"]))
assert_tip_is(blocks[-1]["block"].sha256, xt_node, test_node)
utxos = [ ]
UTXOS_PER_BLOCK = 100
pingpong = 0
while (len(utxos) < num_utxos):
coinbase = blocks.pop(0)["block"].vtx[0]
# Create anyone-can-spend utxos
total_value = coinbase.vout[0].nValue
out_value = total_value // UTXOS_PER_BLOCK
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(coinbase.sha256, 0), b''))
for i in range(UTXOS_PER_BLOCK):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
for i in range(UTXOS_PER_BLOCK):
utxos.append({ "sha256" : tx.sha256, "i" : i, "value" : out_value})
tip = blocks[-1]
blocks.append(build_block_on_tip(txs = [tx],
prev_height = tip["height"],
prev_hash = tip["hash"],
prev_mtp = tip["mtp"]))
new_tip = blocks[-1]["block"]
# pingpongs are slow, but we can't blast too many blocks at the node at a time
if pingpong % 100 == 0:
test_node.send_and_ping(msg_block(new_tip))
else:
test_node.send_message(msg_block(new_tip))
pingpong += 1
assert_tip_is(blocks[-1]["block"].sha256, xt_node, test_node)
assert_equal(int(xt_node.getbestblockhash(), 16), blocks[-1]["block"].sha256)
return utxos
# Creates a tx that spends one input (and has no outputs)
def create_small_tx(utxo):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo["sha256"], utxo["i"]), b''))
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx.rehash()
return tx
# TestNode: A peer we use to send messages to bitcoind, and store responses.
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.last_sendcmpct = None
self.last_getheaders = None
self.last_headers = None
self.last_getblocktxn = None
self.last_cmpctblock = None
self.last_blocktxn = None
def on_sendcmpct(self, conn, message):
self.last_sendcmpct = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
def on_headers(self, conn, message):
self.last_headers = message
def on_getblocktxn(self, conn, message):
self.last_getblocktxn = message
def on_cmpctblock(self, conn, message):
self.last_cmpctblock= message
def on_blocktxn(self, conn, message):
self.last_blocktxn = message
def on_inv(self, conn, message):
pass
def handshake(self):
self.wait_for_verack()
# Exchange sendcmpct
got_sendcmpt = wait_until(lambda: self.last_sendcmpct != None)
assert(got_sendcmpt)
sendcmpct = msg_sendcmpct()
sendcmpct.version = 1
sendcmpct.announce = True
self.send_and_ping(sendcmpct)
# Exchange headers (just mirror header request/response)
got_getheaders = wait_until(lambda: self.last_getheaders != None)
assert(got_getheaders)
self.send_message(self.last_getheaders)
got_headers = wait_until(lambda: self.last_headers != None)
assert(got_headers)
self.send_message(self.last_headers)
class HFBumpTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def run_test(self):
xt_node = self.nodes[0]
# generate a block to get out of IBD
xt_node.generate(1)
test_node = TestNode()
test_node.add_connection(NodeConn('127.0.0.1', p2p_port(0), xt_node, test_node))
NetworkThread().start()
test_node.handshake()
transactions = 128000
self._test_prefilled_limits(xt_node, test_node, transactions)
self._test_getblocktxn_limts(xt_node, test_node, transactions)
def setup_network(self, split = False):
self.extra_args = [['-debug=thin']]
self.nodes = self.setup_nodes()
self.is_network_split = split
def _prepare_block(self, xt_node, test_node, transactions):
print("Creating UTXOS...")
utxos = create_utxos(test_node, xt_node, transactions)
print("Generating transactions...")
txs = [ create_small_tx(u) for u in utxos ]
print("Building block with %d transactions..." % len(txs))
block = build_block_on_tip(node = xt_node, txs = txs)["block"]
return block
def _test_prefilled_limits(self, xt_node, test_node, transactions):
print("Testing prefilled limits")
block = self._prepare_block(xt_node, test_node, transactions)
print("Sending compact block...")
# Prefill coinbase + the last transaction.
# This checks that PrefilledTransaction::index can handle large offsets.
prefilled = [0, len(block.vtx) -1]
comp_block = HeaderAndShortIDs();
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
print("Wait for getblocktxn request...")
got_getblocktxn = wait_until(lambda: test_node.last_getblocktxn, timeout=30)
assert(got_getblocktxn)
absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute()
expected = [i for i in range(1, len(block.vtx))]
assert_equal(expected, absolute_indexes)
print("Sending blocktxn...")
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
for i in expected:
msg.block_transactions.transactions.append(block.vtx[i])
test_node.send_and_ping(msg)
assert_tip_is(block.sha256, xt_node, test_node)
def _test_getblocktxn_limts(self, xt_node, test_node, transactions):
print("Testing getblocktxn limits")
block = self._prepare_block(xt_node, test_node, transactions)
test_node.last_cmpctblock = None
test_node.last_blocktxn = None
print("Sending block...")
test_node.send_and_ping(msg_block(block))
assert_tip_is(block.sha256, xt_node, test_node)
print("Wait for compact block announcement...")
got_cmpctblock = wait_until(lambda: test_node.last_cmpctblock != None)
print("Sending getblocktxn, requesting coinbase + last transaction")
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0, len(block.vtx)-2])
test_node.send_message(msg)
print("Waiting for blocktxn")
got_blocktxn = wait_until(lambda: test_node.last_blocktxn != None)
assert_equal(2, len(test_node.last_blocktxn.block_transactions.transactions))
coinbase = test_node.last_blocktxn.block_transactions.transactions[0]
last = test_node.last_blocktxn.block_transactions.transactions[1]
coinbase.calc_sha256()
last.calc_sha256()
assert_equal(block.vtx[0].hash, coinbase.hash)
assert_equal(block.vtx[-1].hash, last.hash)
if __name__ == '__main__':
HFBumpTest().main()
|
|
from __future__ import print_function
import logging # debug < info < warn < error < critical # from https://docs.python.org/3/howto/logging-cookbook.html
import traceback
import theano
import theano.tensor as T
from tqdm import tqdm
logger_combinedtools = logging.getLogger('combined.tools')
logger_combinedtools.setLevel(logging.DEBUG)
from general_tools import *
import os
import time
import lasagne
import lasagne.layers as L
import lasagne.objectives as LO
import numpy as np
import preprocessingCombined
class NeuralNetwork:
network = None
training_fn = None
best_param = None
best_error = 100
curr_epoch, best_epoch = 0, 0
X = None
Y = None
network_train_info = [[], [], []]
def __init__(self, architecture, dataset=None, loadPerSpeaker = True,
batch_size=1, num_features=39, num_output_units=39,
lstm_hidden_list=(100,), bidirectional=True,
cnn_network="google", cnn_features='dense', lipRNN_hidden_list=None, lipRNN_bidirectional=True,
dense_hidden_list=(512,),
seed=int(time.time()), model_paths={}, debug=False, verbose=False, logger=logger_combinedtools):
self.loadPerSpeaker = loadPerSpeaker
self.model_paths = model_paths
self.num_output_units = num_output_units
self.num_features = num_features
self.batch_size = batch_size
self.epochsNotImproved = 0 # keep track, to know when to stop training
if architecture == "combined":
if dataset != None:
images_train, mfccs_train, audioLabels_train, validLabels_train, validAudioFrames_train = dataset
self.images = images_train[0] # images are stored per video file. batch_size is for audio
self.mfccs = mfccs_train[:batch_size]
self.audioLabels = audioLabels_train[:batch_size]
self.validLabels = validLabels_train[:batch_size]
self.validAudioFrames = validAudioFrames_train[:batch_size]
self.masks = generate_masks(inputs=self.mfccs, valid_frames=self.validAudioFrames,
batch_size=len(self.mfccs),
logger=logger_combinedtools)
self.mfccs = pad_sequences_X(self.mfccs) # shouldn't change shape because batch_size == 1
self.audioLabels = pad_sequences_y(self.audioLabels) # these aren't actually used
self.validLabels = pad_sequences_y(self.validLabels)
self.validAudioFrames = pad_sequences_y(self.validAudioFrames)
if verbose:
logger.debug('images.shape: %s', len(self.images))
logger.debug('images[0].shape: %s', self.images[0].shape)
logger.debug('images[0][0][0].type: %s', type(self.images[0][0][0]))
logger.debug('y.shape: %s', self.audioLabels.shape)
logger.debug('y[0].shape: %s', self.audioLabels[0].shape)
logger.debug('y[0][0].type: %s', type(self.audioLabels[0][0]))
logger.debug('masks.shape: %s', self.masks.shape)
logger.debug('masks[0].shape: %s', self.masks[0].shape)
logger.debug('masks[0][0].type: %s', type(self.masks[0][0]))
logger.info("NUM FEATURES: %s", num_features)
# create Theano variables and generate the networks
self.LR_var = T.scalar('LR', dtype=theano.config.floatX)
self.targets_var = T.imatrix('targets') # 2D for the RNN (1 many frames (and targets) per example)
self.CNN_targets_var = T.ivector('targets') # 1D for the CNN (1 target per example)
## AUDIO PART ##
self.audio_inputs_var = T.tensor3('audio_inputs')
self.audio_masks_var = T.matrix('audio_masks')
self.audio_valid_frames_var = T.imatrix('valid_indices')
self.audioNet_dict, self.audioNet_lout, self.audioNet_lout_flattened, self.audioNet_lout_features = \
self.build_audioRNN(n_hidden_list=lstm_hidden_list, bidirectional=bidirectional,
seed=seed, debug=debug, logger=logger)
# audioNet_lout_flattened output shape: (nbValidFrames, 39)
## LIPREADING PART ##
self.CNN_input_var = T.tensor4('cnn_input')
# batch size is number of valid frames in each video
self.CNN_dict, self.CNN_lout, self.CNN_lout_features = self.build_CNN()
# CNN_lout_features output shape = (nbValidFrames, 512x7x7)
# for CNN-LSTM combination networks
self.lipreadingType = 'CNN'
if lipRNN_hidden_list != None: #add LSTM layers on top of the CNN
self.lipreadingType = 'CNN_LSTM'
# input to LSTM network: conv features, or with dense softmax layer in between?
# direct conv outputs is 512x7x7 = 25.088 features -> huge networks. Might need to reduce size
if cnn_features == 'dense':
self.lipreadingRNN_dict, self.lipreading_lout_features = self.build_lipreadingRNN(self.CNN_lout,
lipRNN_hidden_list,
bidirectional=lipRNN_bidirectional)
else:
self.lipreadingRNN_dict, self.lipreading_lout_features = self.build_lipreadingRNN(self.CNN_lout_features,
lipRNN_hidden_list,
bidirectional=lipRNN_bidirectional)
# For lipreading only: input to softmax FC layer now not from conv layer, but from LSTM features that are put on top of the CNNs
self.lipreading_lout = self.build_softmax(self.lipreading_lout_features)
else: #only use the CNN
if cnn_features == 'dense':
self.lipreading_lout_features = self.CNN_lout
else:
self.lipreading_lout_features = self.CNN_lout_features
self.lipreading_lout = self.CNN_lout
## COMBINED PART ##
# batch size is number of valid frames in each video
self.combined_dict, self.combined_lout = self.build_combined(lipreading_lout=self.lipreading_lout_features,
audio_lout=self.audioNet_lout_features,
dense_hidden_list=dense_hidden_list)
allLayers= L.get_all_layers(self.lipreading_lout)
for layer in allLayers:
logger_combinedtools.debug("layer : %s \t %s", layer, layer.output_shape)
# [layer.output_shape for layer in allLayers[-5:-1]]
#import pdb;pdb.set_trace()
else:
print("ERROR: Invalid argument: The valid architecture arguments are: 'RNN'")
def build_audioRNN(self, n_hidden_list=(100,), bidirectional=False,
seed=int(time.time()), debug=False, logger=logger_combinedtools):
# some inspiration from http://colinraffel.com/talks/hammer2015recurrent.pdf
if debug:
logger.debug('\nInputs:');
logger.debug(' X.shape: %s', self.mfccs[0].shape)
logger.debug(' X[0].shape: %s %s %s \n%s', self.mfccs[0][0].shape, type(self.mfccs[0][0]),
type(self.mfccs[0][0][0]), self.mfccs[0][0][:5])
logger.debug('Targets: ');
logger.debug(' Y.shape: %s', self.validLabels.shape)
logger.debug(' Y[0].shape: %s %s %s \n%s', self.validLabels[0].shape, type(self.validLabels[0]),
type(self.validLabels[0][0]),
self.validLabels[0][:5])
logger.debug('Layers: ')
# fix these at initialization because it allows for compiler opimizations
num_output_units = self.num_output_units
num_features = self.num_features
batch_size = self.batch_size
audio_inputs = self.audio_inputs_var
audio_masks = self.audio_masks_var # set MATRIX, not iMatrix!! Otherwise all mask calculations are done by CPU, and everything will be ~2x slowed down!! Also in general_tools.generate_masks()
valid_frames = self.audio_valid_frames_var
net = {}
# shape = (batch_size, batch_max_seq_length, num_features)
net['l1_in'] = L.InputLayer(shape=(batch_size, None, num_features), input_var=audio_inputs)
net['l1_mask'] = L.InputLayer(shape=(batch_size, None), input_var=audio_masks)
if debug:
get_l_in = L.get_output(net['l1_in'])
l_in_val = get_l_in.eval({net['l1_in'].input_var: self.mfccs})
# logger.debug(l_in_val)
logger.debug(' l_in size: %s', l_in_val.shape);
get_l_mask = L.get_output(net['l1_mask'])
l_mask_val = get_l_mask.eval({net['l1_mask'].input_var: self.masks})
# logger.debug(l_in_val)
logger.debug(' l_mask size: %s', l_mask_val.shape);
n_batch, n_time_steps, n_features = net['l1_in'].input_var.shape
logger.debug(" n_batch: %s | n_time_steps: %s | n_features: %s", n_batch, n_time_steps,
n_features)
## LSTM parameters
gate_parameters = L.recurrent.Gate(
W_in=lasagne.init.Orthogonal(), W_hid=lasagne.init.Orthogonal(),
b=lasagne.init.Constant(0.))
cell_parameters = L.recurrent.Gate(
W_in=lasagne.init.Orthogonal(), W_hid=lasagne.init.Orthogonal(),
# Setting W_cell to None denotes that no cell connection will be used.
W_cell=None, b=lasagne.init.Constant(0.),
# By convention, the cell nonlinearity is tanh in an LSTM.
nonlinearity=lasagne.nonlinearities.tanh)
# generate layers of stacked LSTMs, possibly bidirectional
net['l2_lstm'] = []
for i in range(len(n_hidden_list)):
n_hidden = n_hidden_list[i]
if i == 0:
input = net['l1_in']
else:
input = net['l2_lstm'][i - 1]
nextForwardLSTMLayer = L.recurrent.LSTMLayer(
input, n_hidden,
# We need to specify a separate input for masks
mask_input=net['l1_mask'],
# Here, we supply the gate parameters for each gate
ingate=gate_parameters, forgetgate=gate_parameters,
cell=cell_parameters, outgate=gate_parameters,
# We'll learn the initialization and use gradient clipping
learn_init=True, grad_clipping=100.)
net['l2_lstm'].append(nextForwardLSTMLayer)
if bidirectional:
input = net['l2_lstm'][-1]
# Use backward LSTM
# The "backwards" layer is the same as the first,
# except that the backwards argument is set to True.
nextBackwardLSTMLayer = L.recurrent.LSTMLayer(
input, n_hidden, ingate=gate_parameters,
mask_input=net['l1_mask'], forgetgate=gate_parameters,
cell=cell_parameters, outgate=gate_parameters,
learn_init=True, grad_clipping=100., backwards=True)
net['l2_lstm'].append(nextBackwardLSTMLayer)
# We'll combine the forward and backward layer output by summing.
# Merge layers take in lists of layers to merge as input.
# The output of l_sum will be of shape (n_batch, max_n_time_steps, n_features)
net['l2_lstm'].append(L.ElemwiseSumLayer([net['l2_lstm'][-2], net['l2_lstm'][-1]]))
# we need to convert (batch_size, seq_length, num_features) to (batch_size * seq_length, num_features) because Dense networks can't deal with 2 unknown sizes
net['l3_reshape'] = L.ReshapeLayer(net['l2_lstm'][-1], (-1, n_hidden_list[-1]))
# Get the output features for passing to the combination network
net['l4_features'] = L.SliceLayer(net['l3_reshape'], indices=valid_frames, axis=0)
net['l4_features'] = L.ReshapeLayer(net['l4_features'], (-1, n_hidden_list[-1]))
# this will output shape(nbValidFrames, nbLSTMunits)
# add some extra layers to get an output for the audio network only
# Now we can apply feed-forward layers as usual for classification
net['l6_dense'] = L.DenseLayer(net['l3_reshape'], num_units=num_output_units,
nonlinearity=lasagne.nonlinearities.softmax)
# # Now, the shape will be (n_batch * n_timesteps, num_output_units). We can then reshape to
# # n_batch to get num_output_units values for each timestep from each sequence
# only use the valid indices
net['l7_out'] = L.ReshapeLayer(net['l6_dense'], (batch_size, -1, num_output_units))
net['l7_out_valid_basic'] = L.SliceLayer(net['l7_out'], indices=valid_frames, axis=1)
net['l7_out_valid_flattened'] = L.ReshapeLayer(net['l7_out_valid_basic'], (-1, num_output_units))
net['l7_out_valid'] = L.ReshapeLayer(net['l7_out_valid_basic'], (batch_size, -1, num_output_units))
if debug:
get_l_out = theano.function([net['l1_in'].input_var, net['l1_mask'].input_var], L.get_output(net['l7_out']))
l_out = get_l_out(self.mfccs, self.masks)
# this only works for batch_size == 1
get_l_out_valid = theano.function([audio_inputs, audio_masks, valid_frames],
L.get_output(net['l7_out_valid']))
try:
l_out_valid = get_l_out_valid(self.mfccs, self.masks, self.validAudioFrames)
logger.debug('\n\n\n l_out: %s | l_out_valid: %s', l_out.shape, l_out_valid.shape);
except:
logger.warning("batchsize not 1, get_valid not working")
if debug: self.print_RNN_network_structure(net)
if debug:import pdb;pdb.set_trace()
return net, net['l7_out_valid'], net['l7_out_valid_flattened'], net['l4_features']
# network from Oxford & Google BBC paper
def build_CNN(self, input=None, activation=T.nnet.relu, alpha=0.1, epsilon=1e-4):
input = self.CNN_input_var
nbClasses = self.num_output_units
cnnDict = {}
# input
# store each layer of the network in a dict, for quickly retrieving any layer
cnnDict['l0_in'] = lasagne.layers.InputLayer(
shape=(None, 1, 120, 120), # 5,120,120 (5 = #frames)
input_var=input)
cnnDict['l1_conv1'] = []
cnnDict['l1_conv1'].append(lasagne.layers.Conv2DLayer(
cnnDict['l0_in'],
num_filters=128,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity))
cnnDict['l1_conv1'].append(lasagne.layers.MaxPool2DLayer(cnnDict['l1_conv1'][-1], pool_size=(2, 2)))
cnnDict['l1_conv1'].append(lasagne.layers.BatchNormLayer(
cnnDict['l1_conv1'][-1],
epsilon=epsilon,
alpha=alpha))
cnnDict['l1_conv1'].append(lasagne.layers.NonlinearityLayer(
cnnDict['l1_conv1'][-1],
nonlinearity=activation))
# conv 2
cnnDict['l2_conv2'] = []
cnnDict['l2_conv2'].append(lasagne.layers.Conv2DLayer(
cnnDict['l1_conv1'][-1],
num_filters=256,
filter_size=(3, 3),
stride=(2, 2),
pad=1,
nonlinearity=lasagne.nonlinearities.identity))
cnnDict['l2_conv2'].append(lasagne.layers.MaxPool2DLayer(cnnDict['l2_conv2'][-1], pool_size=(2, 2)))
cnnDict['l2_conv2'].append(lasagne.layers.BatchNormLayer(
cnnDict['l2_conv2'][-1],
epsilon=epsilon,
alpha=alpha))
cnnDict['l2_conv2'].append(lasagne.layers.NonlinearityLayer(
cnnDict['l2_conv2'][-1],
nonlinearity=activation))
# conv3
cnnDict['l3_conv3'] = []
cnnDict['l3_conv3'].append(lasagne.layers.Conv2DLayer(
cnnDict['l2_conv2'][-1],
num_filters=512,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity))
cnnDict['l3_conv3'].append(lasagne.layers.NonlinearityLayer(
cnnDict['l3_conv3'][-1],
nonlinearity=activation))
# conv 4
cnnDict['l4_conv4'] = []
cnnDict['l4_conv4'].append(lasagne.layers.Conv2DLayer(
cnnDict['l3_conv3'][-1],
num_filters=512,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity))
cnnDict['l4_conv4'].append(lasagne.layers.NonlinearityLayer(
cnnDict['l4_conv4'][-1],
nonlinearity=activation))
# conv 5
cnnDict['l5_conv5'] = []
cnnDict['l5_conv5'].append(lasagne.layers.Conv2DLayer(
cnnDict['l4_conv4'][-1],
num_filters=512,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity))
cnnDict['l5_conv5'].append(lasagne.layers.MaxPool2DLayer(
cnnDict['l5_conv5'][-1],
pool_size=(2, 2)))
cnnDict['l5_conv5'].append(lasagne.layers.NonlinearityLayer(
cnnDict['l5_conv5'][-1],
nonlinearity=activation))
# now we have output shape (nbValidFrames, 512,7,7) -> Flatten it.
batch_size = cnnDict['l0_in'].input_var.shape[0]
cnnDict['l6_reshape'] = L.ReshapeLayer(cnnDict['l5_conv5'][-1], (batch_size, 25088))
# # conv 6
# cnnDict['l6_conv6'] = []
# cnnDict['l6_conv6'].append(lasagne.layers.Conv2DLayer(
# cnnDict['l5_conv5'][-1],
# num_filters=128,
# filter_size=(3, 3),
# pad=1,
# nonlinearity=lasagne.nonlinearities.identity))
# cnnDict['l6_conv6'].append(lasagne.layers.MaxPool2DLayer(
# cnnDict['l6_conv6'][-1],
# pool_size=(2, 2)))
# cnnDict['l6_conv6'].append(lasagne.layers.NonlinearityLayer(
# cnnDict['l6_conv6'][-1],
# nonlinearity=activation))
# # this will output shape (nbValidFrames, 512,7,7). Flatten it.
# batch_size = cnnDict['l0_in'].input_var.shape[0]
# cnnDict['l6_reshape'] = L.ReshapeLayer(cnnDict['l6_conv6'][-1], (batch_size, 25088))
# disable this layer for normal phoneme recognition
# FC layer
# cnnDict['l6_fc'] = []
# cnnDict['l6_fc'].append(lasagne.layers.DenseLayer(
# cnnDict['l5_conv5'][-1],
# nonlinearity=lasagne.nonlinearities.identity,
# num_units=256))
#
# cnnDict['l6_fc'].append(lasagne.layers.NonlinearityLayer(
# cnnDict['l6_fc'][-1],
# nonlinearity=activation))
cnnDict['l7_out'] = lasagne.layers.DenseLayer(
cnnDict['l5_conv5'][-1],
nonlinearity=lasagne.nonlinearities.softmax,
num_units=nbClasses)
# cnn = lasagne.layers.BatchNormLayer(
# cnn,
# epsilon=epsilon,
# alpha=alpha)
return cnnDict, cnnDict['l7_out'], cnnDict['l6_reshape']
def build_lipreadingRNN(self, input, n_hidden_list=(100,), bidirectional=False, debug=False, logger=logger_combinedtools):
net = {}
#CNN output: (time_seq, features)
# LSTM need (batch_size, time_seq, features). Batch_size = # videos processed in parallel = 1
nbFeatures = input.output_shape[1]
net['l1_in']= L.ReshapeLayer(input, (1, -1, nbFeatures))# 39 or 25088 (with dense softmax or direct conv outputs)
if debug:
n_batch, n_time_steps, n_features = net['l1_in'].output_shape
logger.debug(" n_batch: %s | n_time_steps: %s | n_features: %s", n_batch, n_time_steps, n_features)
## LSTM parameters
# All gates have initializers for the input-to-gate and hidden state-to-gate
# weight matrices, the cell-to-gate weight vector, the bias vector, and the nonlinearity.
# The convention is that gates use the standard sigmoid nonlinearity,
# which is the default for the Gate class.
gate_parameters = L.recurrent.Gate(
W_in=lasagne.init.Orthogonal(), W_hid=lasagne.init.Orthogonal(),
b=lasagne.init.Constant(0.))
cell_parameters = L.recurrent.Gate(
W_in=lasagne.init.Orthogonal(), W_hid=lasagne.init.Orthogonal(),
# Setting W_cell to None denotes that no cell connection will be used.
W_cell=None, b=lasagne.init.Constant(0.),
# By convention, the cell nonlinearity is tanh in an LSTM.
nonlinearity=lasagne.nonlinearities.tanh)
# generate layers of stacked LSTMs, possibly bidirectional
net['l2_lstm'] = []
for i in range(len(n_hidden_list)):
n_hidden = n_hidden_list[i]
if i == 0:
input = net['l1_in']
else:
input = net['l2_lstm'][i - 1]
nextForwardLSTMLayer = L.recurrent.LSTMLayer(
incoming=input, num_units=n_hidden,
# Here, we supply the gate parameters for each gate
ingate=gate_parameters, forgetgate=gate_parameters,
cell=cell_parameters, outgate=gate_parameters,
# We'll learn the initialization and use gradient clipping
learn_init=True, grad_clipping=100.)
net['l2_lstm'].append(nextForwardLSTMLayer)
if bidirectional:
input = net['l2_lstm'][-1]
# Use backward LSTM
# The "backwards" layer is the same as the first,
# except that the backwards argument is set to True.
nextBackwardLSTMLayer = L.recurrent.LSTMLayer(
input, n_hidden, ingate=gate_parameters,
forgetgate=gate_parameters,
cell=cell_parameters, outgate=gate_parameters,
learn_init=True, grad_clipping=100., backwards=True)
net['l2_lstm'].append(nextBackwardLSTMLayer)
# The output of l_sum will be of shape (n_batch, max_n_time_steps, n_features)
net['l2_lstm'].append(L.ElemwiseSumLayer([net['l2_lstm'][-2], net['l2_lstm'][-1]]))
# we need to convert (batch_size, seq_length, num_features) to (batch_size * seq_length, num_features) because Dense networks can't deal with 2 unknown sizes
net['l3_reshape'] = L.ReshapeLayer(net['l2_lstm'][-1], (-1, n_hidden_list[-1]))
if debug:
self.print_RNN_network_structure(net)
return net, net['l3_reshape'] #output shape: (nbFrames, nbHiddenLSTMunits)
def build_softmax(self, inputLayer, nbClasses=39):
softmaxLayer = lasagne.layers.DenseLayer(
inputLayer,
nonlinearity=lasagne.nonlinearities.softmax,
num_units=nbClasses)
return softmaxLayer
def build_combined(self, lipreading_lout, audio_lout, dense_hidden_list, debug=False):
# (we process one video at a time)
# CNN_lout and RNN_lout should be shaped (batch_size, nbFeatures) with batch_size = nb_valid_frames in this video
# for CNN_lout: nbFeatures = 512x7x7 = 25.088
# for RNN_lout: nbFeatures = nbUnits(last LSTM layer)
combinedNet = {}
combinedNet['l_concat'] = L.ConcatLayer([lipreading_lout, audio_lout], axis=1)
if debug:
logger_combinedtools.debug("CNN output shape: %s", lipreading_lout.output_shape)
logger_combinedtools.debug("RNN output shape: %s", audio_lout.output_shape)
import pdb;pdb.set_trace()
combinedNet['l_dense'] = []
for i in range(len(dense_hidden_list)):
n_hidden = dense_hidden_list[i]
if i == 0:
input = combinedNet['l_concat']
else:
input = combinedNet['l_dense'][i - 1]
nextDenseLayer = L.DenseLayer(input,
nonlinearity=lasagne.nonlinearities.rectify,
num_units=n_hidden)
#nextDenseLayer = L.DropoutLayer(nextDenseLayer, p=0.3)# TODO does dropout work?
combinedNet['l_dense'].append(nextDenseLayer)
# final softmax layer
if len(combinedNet['l_dense']) == 0: #if no hidden layers
combinedNet['l_out'] = L.DenseLayer(combinedNet['l_concat'], num_units=self.num_output_units,
nonlinearity=lasagne.nonlinearities.softmax)
else:
combinedNet['l_out'] = L.DenseLayer(combinedNet['l_dense'][-1], num_units=self.num_output_units,
nonlinearity=lasagne.nonlinearities.softmax)
return combinedNet, combinedNet['l_out']
def print_RNN_network_structure(self, net=None, logger=logger_combinedtools):
if net == None: net = self.audioNet_dict
logger.debug("\n PRINTING Audio RNN network: \n %s ", sorted(net.keys()))
for key in sorted(net.keys()):
if 'lstm' in key:
for layer in net['l2_lstm']:
try:
logger.debug(' %12s | in: %s | out: %s', key, layer.input_shape, layer.output_shape)
except:
logger.debug(' %12s | out: %s', key, layer.output_shape)
else:
try:
logger.debug(' %12s | in: %s | out: %s', key, net[key].input_shape, net[key].output_shape)
except:
logger.debug(' %12s | out: %s', key, net[key].output_shape)
return 0
def print_CNN_network_structure(self, net=None, logger=logger_combinedtools):
if net == None:
cnnDict = self.CNN_dict
else:
cnnDict = net
print("\n PRINTING image CNN structure: \n %s " % (sorted(cnnDict.keys())))
for key in sorted(cnnDict.keys()):
print(key)
if 'conv' in key and type(cnnDict[key]) == list:
for layer in cnnDict[key]:
try:
print(' %12s \nin: %s | out: %s' % (layer, layer.input_shape, layer.output_shape))
except:
print(' %12s \nout: %s' % (layer, layer.output_shape))
else:
try:
print(' %12s \nin: %s | out: %s' % (
cnnDict[key], cnnDict[key].input_shape, cnnDict[key].output_shape))
except:
print(' %12s \nout: %s' % (cnnDict[key], cnnDict[key].output_shape))
return 0
# return True if successful load, false otherwise
def load_model(self, model_type, logger=logger_combinedtools):
if not os.path.exists(self.model_paths[model_type]):
return False
# restore network weights
with np.load(self.model_paths[model_type]) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
if model_type == 'audio':
lout = self.audioNet_lout
elif model_type == 'CNN':
lout = self.CNN_lout
elif model_type == 'CNN_LSTM':
lout = self.lipreading_lout
elif model_type == 'combined':
lout = self.combined_lout
else:
logger.error('Wrong network type. No weights loaded')#.format(model_type))
return False
try:
lasagne.layers.set_all_param_values(lout, param_values)
except:
try:
lasagne.layers.set_all_param_values(lout, *param_values)
except:
logger.warning('Warning: %s', traceback.format_exc()) # , model_path)
import pdb;pdb.set_trace()
logger.info("Loading %s parameters successful.", model_type)
return True
# set as many network parameters as possible by hierarchical loading of subnetworks
# eg for combined: if no traied combined network, try to load subnets of audio and lipreading
def setNetworkParams(self, runType, logger=logger_combinedtools):
if runType == 'combined':
logger.info("\nAttempting to load combined model: %s", self.model_paths['combined'])
success = self.load_model(model_type='combined')
if (not success):
logger.warning("No complete network found, loading parts...")
logger.info("CNN : %s", self.model_paths['CNN'])
self.load_model(model_type='CNN')
if self.lipreadingType == 'CNN_LSTM': # LIP_RNN_HIDDEN_LIST != None:
logger.info("CNN_LSTM : %s", self.model_paths['CNN_LSTM'])
self.load_model(model_type='CNN_LSTM')
logger.info("Audio : %s", self.model_paths['audio'])
self.load_model(model_type='audio')
elif runType == 'lipreading':
if self.lipreadingType == 'CNN_LSTM':
logger.info("\nAttempting to load lipreading CNN_LSTM model: %s",
self.model_paths['CNN_LSTM'])
#try to load CNN_LSTM; if not works just load the CNN so you can train the LSTM based on that
success = self.load_model(model_type='CNN_LSTM')
if not success:
logger.warning("No complete network found, loading parts...")
self.load_model(model_type='CNN')
else:
logger.info("\nAttempting to load lipreading CNN model: %s", self.model_paths['CNN'])
success = self.load_model(model_type='CNN')
else: ## runType == 'audio':
logger.info("\nAttempting to load audio model: %s",
self.model_paths['audio'])
success = self.load_model(model_type='audio')
return success
def save_model(self, model_name, logger=logger_combinedtools):
if not os.path.exists(os.path.dirname(model_name)):
os.makedirs(os.path.dirname(model_name))
np.savez(model_name + '.npz', self.best_param)
def build_functions(self, runType, train=False, debug=False, logger=logger_combinedtools):
k = 3; # top k accuracy
##########################
## For Lipreading part ##
##########################
if runType == 'lipreading':
# Targets are 2D for the LSTM, but needs only 1D for the CNN -> need to flatten everywhere
#import pdb;pdb.set_trace()
# For information: only CNN classification, with softmax to 39 phonemes
CNN_test_network_output = L.get_output(self.CNN_lout, deterministic=True)
CNN_test_loss = LO.categorical_crossentropy(CNN_test_network_output, self.targets_var.flatten());
CNN_test_loss = CNN_test_loss.mean()
CNN_test_acc = T.mean(T.eq(T.argmax(CNN_test_network_output, axis=1), self.targets_var.flatten()),
dtype=theano.config.floatX)
CNN_top3_acc = T.mean(lasagne.objectives.categorical_accuracy(CNN_test_network_output, self.targets_var.flatten(), top_k=k))
self.CNN_val_fn = theano.function([self.CNN_input_var, self.targets_var], [CNN_test_loss,
CNN_test_acc,
CNN_top3_acc])
# The whole lipreading network (different if CNN-LSTM architecture, otherwise same as CNN-softmax)
# for validation: disable dropout etc layers -> deterministic
lipreading_test_network_output = L.get_output(self.lipreading_lout, deterministic=True)
lipreading_test_acc = T.mean(T.eq(T.argmax(lipreading_test_network_output, axis=1), self.targets_var.flatten()),
dtype=theano.config.floatX)
lipreading_test_loss = LO.categorical_crossentropy(lipreading_test_network_output, self.targets_var.flatten());
lipreading_test_loss = lipreading_test_loss.mean()
# Top k accuracy
lipreading_top3_acc = T.mean(lasagne.objectives.categorical_accuracy(lipreading_test_network_output,
self.targets_var.flatten(), top_k=k))
self.lipreading_top3acc_fn = theano.function([self.CNN_input_var, self.targets_var], lipreading_top3_acc)
self.lipreading_val_fn = theano.function([self.CNN_input_var, self.targets_var], [lipreading_test_loss,
lipreading_test_acc,
lipreading_top3_acc])
if debug:
CNN_test_loss, CNN_test_acc, CNN_top3_acc = self.CNN_val_fn(self.images, self.validLabels)
logger.debug("\n\nCNN network only: \ntest loss: %s \n test acc: %s \n top3_acc: %s",
CNN_test_loss, CNN_test_acc*100.0, CNN_top3_acc*100.0)
lipreading_test_loss, lipreading_test_acc, lipreading_top3_acc = self.lipreading_val_fn(self.images, self.validLabels)
logger.debug("\n\n Lipreading network: \ntest loss: %s \n test acc: %s \n top3_acc: %s",
lipreading_test_loss, lipreading_test_acc * 100.0, lipreading_top3_acc * 100.0)
# For training, use nondeterministic output
lipreading_network_output = L.get_output(self.lipreading_lout, deterministic=False)
self.lipreading_out_fn = theano.function([self.CNN_input_var], lipreading_network_output)
# cross-entropy loss
lipreading_loss_pointwise = LO.categorical_crossentropy(lipreading_network_output, self.targets_var.flatten());
lipreading_loss = lasagne.objectives.aggregate(lipreading_loss_pointwise)
# lipreading_loss = lipreading_loss_pointwise.mean()
# set all params to trainable
lipreading_params = L.get_all_params(self.lipreading_lout, trainable=True)
if self.lipreadingType == 'CNN_LSTM': #only train the LSTM network, don't touch the CNN
lipreading_params = list(set(lipreading_params) - set(L.get_all_params(self.CNN_lout, trainable=True)))
lipreading_updates = lasagne.updates.adam(loss_or_grads=lipreading_loss, params=lipreading_params, learning_rate=self.LR_var)
# Compile a function performing a training step on a mini-batch (by giving the updates dictionary)
# and returning the corresponding training loss:
self.lipreading_train_fn = theano.function([self.CNN_input_var, self.targets_var, self.LR_var], lipreading_loss, updates=lipreading_updates)
if debug:
output = self.lipreading_out_fn(self.images)
logger.debug(" lipreading output shape: %s", output.shape)
import pdb;pdb.set_trace()
####################
## For Audio Part ##
####################
if runType == 'audio':
# LSTM in lasagne: see https://github.com/craffel/Lasagne-tutorial/blob/master/examples/recurrent.py
# and also http://colinraffel.com/talks/hammer2015recurrent.pdf
if debug:
logger.debug("\n\n Audio Network")
self.print_RNN_network_structure()
# using the lasagne SliceLayer
audio_valid_network_output = L.get_output(self.audioNet_dict['l7_out_valid'])
self.audio_valid_network_output_fn = theano.function(
[self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var], audio_valid_network_output)
audio_valid_predictions = T.argmax(audio_valid_network_output, axis=2)
self.audio_predictions_fn = theano.function(
[self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var],
audio_valid_predictions, name='valid_predictions_fn')
audio_valid_network_output_flattened = L.get_output(self.audioNet_lout_flattened)
self.audio_network_output_flattened_fn = theano.function(
[self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var],
audio_valid_network_output_flattened)
# top k accuracy
audio_top1_acc = T.mean(lasagne.objectives.categorical_accuracy(
audio_valid_network_output_flattened, self.targets_var.flatten(), top_k=1))
self.audio_top1_acc_fn = theano.function(
[self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var,
self.targets_var], audio_top1_acc)
audio_top3_acc = T.mean(lasagne.objectives.categorical_accuracy(
audio_valid_network_output_flattened, self.targets_var.flatten(), top_k=k))
self.audio_top3_acc_fn = theano.function(
[self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var,
self.targets_var], audio_top3_acc)
# if debug:
# try:
# valid_out = self.audio_valid_network_output_fn(self.mfccs, self.masks, self.validAudioFrames)
# logger.debug('valid_out.shape: %s', valid_out.shape)
# # logger.debug('valid_out, value: \n%s', valid_out)
#
# valid_out_flattened = self.audio_network_output_flattened_fn(self.mfccs, self.masks,
# self.validAudioFrames)
# logger.debug('valid_out_flat.shape: %s', valid_out_flattened.shape)
# # logger.debug('valid_out_flat, value: \n%s', valid_out_flattened)
#
# valid_preds2 = self.audio_predictions_fn(self.mfccs, self.masks, self.validAudioFrames)
# logger.debug('valid_preds2.shape: %s', valid_preds2.shape)
# # logger.debug('valid_preds2, value: \n%s', valid_preds2)
#
# logger.debug('validAudioFrames.shape: %s', self.validAudioFrames.shape)
# logger.debug('valid_targets.shape: %s', self.validLabels.shape)
# logger.debug('valid_targets, value: %s', self.validLabels)
#
# top1 = self.audio_top1_acc_fn(self.mfccs, self.masks, self.validAudioFrames, self.validLabels)
# logger.debug("top 1 accuracy: %s", top1 * 100.0)
#
# top3 = self.audio_top3_acc_fn(self.mfccs, self.masks, self.validAudioFrames, self.validLabels)
# logger.debug("top 3 accuracy: %s", top3 * 100.0)
#
# except Exception as error:
# print('caught this error: ' + traceback.format_exc());
# import pdb;
# pdb.set_trace()
# with Lasagne SliceLayer outputs:
audio_cost_pointwise = lasagne.objectives.categorical_crossentropy(audio_valid_network_output_flattened,
self.targets_var.flatten())
audio_cost = lasagne.objectives.aggregate(audio_cost_pointwise)
# Functions for computing cost and training
self.audio_val_fn = theano.function(
[self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var, self.targets_var],
[audio_cost, audio_top1_acc, audio_top3_acc], name='validate_fn')
self.audio_cost_pointwise_fn = theano.function([self.audio_inputs_var, self.audio_masks_var,
self.audio_valid_frames_var, self.targets_var],
audio_cost_pointwise, name='cost_pointwise_fn')
if debug:
# logger.debug('cost pointwise: %s',
# self.audio_cost_pointwise_fn(self.mfccs, self.masks, self.validAudioFrames, self.validLabels))
evaluate_cost = self.audio_val_fn(self.mfccs, self.masks, self.validAudioFrames, self.validLabels)
logger.debug('cost: {:.3f}'.format(float(evaluate_cost[0])))
logger.debug('accuracy: {:.3f} %'.format(float(evaluate_cost[1]) * 100))
logger.debug('Top 3 accuracy: {:.3f} %'.format(float(evaluate_cost[2]) * 100))
# pdb.set_trace()
# Retrieve all trainable parameters from the network
audio_params = L.get_all_params(self.audioNet_lout, trainable=True)
self.audio_updates = lasagne.updates.adam(loss_or_grads=audio_cost, params=audio_params, learning_rate=self.LR_var)
self.audio_train_fn = theano.function([self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var,
self.targets_var, self.LR_var],
audio_cost, updates=self.audio_updates, name='train_fn')
#######################
### For Combined part ##
########################
if runType == 'combined':
if debug:
logger.debug("\n\n Combined Network")
RNN_features = L.get_output(self.audioNet_lout_features)
CNN_features = L.get_output(self.CNN_lout_features)
get_features = theano.function([self.CNN_input_var, self.audio_inputs_var, self.audio_masks_var,
self.audio_valid_frames_var], [RNN_features, CNN_features])
try:
RNN_feat, CNN_feat = get_features(self.images,
self.mfccs,
self.masks,
self.validAudioFrames)
logger.debug("RNN_feat.shape: %s", RNN_feat.shape)
logger.debug("CNN_feat.shape: %s", CNN_feat.shape)
except Exception as error:
print('caught this error: ' + traceback.format_exc());
import pdb;
pdb.set_trace()
# For training, use nondeterministic output
combined_network_output = L.get_output(self.combined_lout, deterministic=False)
# cross-entropy loss
combined_loss = LO.categorical_crossentropy(combined_network_output, self.targets_var.flatten())
combined_loss = combined_loss.mean()
# weight regularization
weight_decay = 1e-5
combined_weightsl2 = lasagne.regularization.regularize_network_params(self.combined_lout, lasagne.regularization.l2)
combined_loss += weight_decay * combined_weightsl2
# set all params to trainable
combined_params = L.get_all_params(self.combined_lout, trainable=True)
# remove subnet parameters so they are kept fixed (already pretrained)
combined_params = list(set(combined_params) - set(L.get_all_params(self.CNN_lout, trainable=True)))
combined_params = list(set(combined_params) - set(L.get_all_params(self.audioNet_lout, trainable=True)))
combined_updates = lasagne.updates.adam(loss_or_grads=combined_loss, params=combined_params, learning_rate=self.LR_var)
self.combined_train_fn = theano.function([self.CNN_input_var,self.audio_inputs_var, self.audio_masks_var,
self.audio_valid_frames_var,
self.targets_var, self.LR_var], combined_loss, updates=combined_updates)
# for validation: disable dropout etc layers -> deterministic
combined_test_network_output = L.get_output(self.combined_lout, deterministic=True)
combined_test_acc = T.mean(T.eq(T.argmax(combined_test_network_output, axis=1), self.targets_var.flatten()),
dtype=theano.config.floatX)
combined_test_loss = LO.categorical_crossentropy(combined_test_network_output, self.targets_var.flatten());
combined_test_loss = combined_test_loss.mean()
self.combined_output_fn = theano.function(
[self.CNN_input_var, self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var],
combined_test_network_output)
combined_top3_acc = T.mean(lasagne.objectives.categorical_accuracy(combined_test_network_output,
self.targets_var.flatten(), top_k=k))
self.combined_top3acc_fn = theano.function([self.CNN_input_var, self.audio_inputs_var, self.audio_masks_var,
self.audio_valid_frames_var,
self.targets_var], combined_top3_acc)
self.combined_val_fn = theano.function([self.CNN_input_var, self.audio_inputs_var, self.audio_masks_var,
self.audio_valid_frames_var,
self.targets_var], [combined_test_loss, combined_test_acc, combined_top3_acc])
if debug:
try:
comb_test_loss, comb_test_acc, comb_top3_acc = self.combined_val_fn(self.images,
self.mfccs,
self.masks,
self.validAudioFrames,
self.validLabels)
logger.debug("Combined network: \ntest loss: %s \n test acc: %s \n top3_acc: %s",
comb_test_loss, comb_test_acc * 100.0, comb_top3_acc * 100.0)
except Exception as error:
print('caught this error: ' + traceback.format_exc());
import pdb;
pdb.set_trace()
def shuffle(self, lst):
import random
c = list(zip(*lst))
random.shuffle(c)
shuffled = zip(*c)
for i in range(len(shuffled)):
shuffled[i] = list(shuffled[i])
return shuffled
# This function trains the model a full epoch (on the whole dataset)
def train_epoch(self, runType, images, mfccs, validLabels, valid_frames, LR, batch_size=-1):
if batch_size == -1: batch_size = self.batch_size # always 1
cost = 0;
nb_batches = len(mfccs) / batch_size
for i in tqdm(range(nb_batches), total=nb_batches):
batch_images = images[i * batch_size:(i + 1) * batch_size][0]
batch_mfccs = mfccs[i * batch_size:(i + 1) * batch_size]
batch_validLabels = validLabels[i * batch_size:(i + 1) * batch_size]
batch_valid_frames = valid_frames[i * batch_size:(i + 1) * batch_size]
batch_masks = generate_masks(batch_mfccs, valid_frames=batch_valid_frames, batch_size=batch_size)
# now pad inputs and target to maxLen
batch_mfccs = pad_sequences_X(batch_mfccs)
batch_valid_frames = pad_sequences_y(batch_valid_frames)
batch_validLabels = pad_sequences_y(batch_validLabels)
# print("batch_mfccs.shape: ", batch_mfccs.shape)
# print("batch_validLabels.shape: ", batch_validLabels.shape)
if runType == 'audio':
cst = self.audio_train_fn(batch_mfccs, batch_masks, batch_valid_frames,
batch_validLabels, LR) # training
elif runType == 'lipreading':
cst = self.lipreading_train_fn(batch_images, batch_validLabels, LR)
else: # train combined
cst = self.combined_train_fn(batch_images, batch_mfccs, batch_masks, batch_valid_frames,
batch_validLabels, LR)
cost += cst;
return cost, nb_batches
# This function trains the model a full epoch (on the whole dataset)
def val_epoch(self, runType, images, mfccs, validLabels, valid_frames, batch_size=-1):
if batch_size == -1: batch_size = self.batch_size
cost = 0;
accuracy = 0
top3_accuracy = 0
nb_batches = len(mfccs) / batch_size
for i in tqdm(range(nb_batches), total=nb_batches):
batch_images = images[i * batch_size:(i + 1) * batch_size][0]
batch_mfccs = mfccs[i * batch_size:(i + 1) * batch_size]
batch_validLabels = validLabels[i * batch_size:(i + 1) * batch_size]
batch_valid_frames = valid_frames[i * batch_size:(i + 1) * batch_size]
batch_masks = generate_masks(batch_mfccs, valid_frames=batch_valid_frames, batch_size=batch_size)
# now pad inputs and target to maxLen
batch_mfccs = pad_sequences_X(batch_mfccs)
batch_valid_frames = pad_sequences_y(batch_valid_frames)
batch_validLabels = pad_sequences_y(batch_validLabels)
# print("batch_mfccs.shape: ", batch_mfccs.shape)
# print("batch_validLabels.shape: ", batch_validLabels.shape)
#import pdb; pdb.set_trace()
if runType == 'audio':
cst, acc, top3_acc = self.audio_val_fn(batch_mfccs, batch_masks, batch_valid_frames,
batch_validLabels) # training
elif runType == 'lipreading':
cst, acc, top3_acc = self.lipreading_val_fn(batch_images, batch_validLabels)
else: # train combined
cst, acc, top3_acc = self.combined_val_fn(batch_images, batch_mfccs, batch_masks, batch_valid_frames,
batch_validLabels)
cost += cst;
accuracy += acc
top3_accuracy += top3_acc
return cost, accuracy, top3_accuracy, nb_batches
# evaluate many TRAINING speaker files -> train loss, val loss and val error. Load them in one by one (so they fit in memory)
def evalTRAINING(self, trainingSpeakerFiles, LR, runType='audio', shuffleEnabled=True, sourceDataDir=None,
storeProcessed=False, processedDir=None, verbose=False, logger=logger_combinedtools):
train_cost = 0;
val_acc = 0;
val_cost = 0;
val_topk_acc = 0;
nb_train_batches = 0;
nb_val_batches = 0;
# for each speaker, pass over the train set, then val set. (test is other files). save the results.
for speakerFile in tqdm(trainingSpeakerFiles, total=len(trainingSpeakerFiles)):
logger.debug("processing %s", speakerFile)
train, val, test = preprocessingCombined.getOneSpeaker(
speakerFile=speakerFile, sourceDataDir=sourceDataDir,
trainFraction=0.8, validFraction=0.2,
storeProcessed=storeProcessed, processedDir=processedDir, logger=logger)
if shuffleEnabled: train = self.shuffle(train)
images_train, mfccs_train, audioLabels_train, validLabels_train, validAudioFrames_train = train
images_val, mfccs_val, audioLabels_val, validLabels_val, validAudioFrames_val = val
images_test, mfccs_test, audioLabels_test, validLabels_test, validAudioFrames_test = test
if verbose:
logger.debug("the number of training examples is: %s", len(images_train))
logger.debug("the number of valid examples is: %s", len(images_val))
logger.debug("the number of test examples is: %s", len(images_test))
train_cost_one, train_batches_one = self.train_epoch(runType=runType,
images=images_train,
mfccs=mfccs_train,
validLabels=validLabels_train,
valid_frames=validAudioFrames_train,
LR=LR)
train_cost += train_cost_one;
nb_train_batches += train_batches_one
# get results for validation set
val_cost_one, val_acc_one, val_topk_acc_one, val_batches_one = self.val_epoch(runType=runType,
images=images_val,
mfccs=mfccs_val,
validLabels=validLabels_val,
valid_frames=validAudioFrames_val)
val_cost += val_cost_one;
val_acc += val_acc_one;
val_topk_acc += val_topk_acc_one
nb_val_batches += val_batches_one;
if verbose:
logger.debug(" this speaker results: ")
logger.debug("\ttraining cost: %s", train_cost_one / train_batches_one)
logger.debug("\tvalidation cost: %s", val_cost_one / val_batches_one)
logger.debug("\vvalidation acc rate: %s %%", val_acc_one / val_batches_one * 100)
logger.debug("\vvalidation top 3 acc rate: %s %%", val_topk_acc_one / val_batches_one * 100)
# get the average over all speakers
train_cost /= nb_train_batches
val_cost /= nb_val_batches
val_acc = val_acc / nb_val_batches * 100 # convert to %
val_topk_acc = val_topk_acc / nb_val_batches * 100 # convert to %
return train_cost, val_cost, val_acc, val_topk_acc
def evalTEST(self, testSpeakerFiles, runType='audio', sourceDataDir=None, storeProcessed=False, processedDir=None,
verbose=False, logger=logger_combinedtools):
test_acc = 0;
test_cost = 0;
test_topk_acc = 0;
nb_test_batches = 0;
# for each speaker, pass over the train set, then test set. (test is other files). save the results.
for speakerFile in tqdm(testSpeakerFiles, total=len(testSpeakerFiles)):
logger.debug("processing %s", speakerFile)
train, val, test = preprocessingCombined.getOneSpeaker(
speakerFile=speakerFile, sourceDataDir=sourceDataDir,
trainFraction=0.0, validFraction=0.0,
storeProcessed=storeProcessed, processedDir=processedDir, logger=logger)
images_train, mfccs_train, audioLabels_train, validLabels_train, validAudioFrames_train = train
images_val, mfccs_val, audioLabels_val, validLabels_val, validAudioFrames_val = val
images_test, mfccs_test, audioLabels_test, validLabels_test, validAudioFrames_test = test
if verbose:
logger.debug("the number of training examples is: %s", len(images_train))
logger.debug("the number of valid examples is: %s", len(images_val))
logger.debug("the number of test examples is: %s", len(images_test))
import pdb;pdb.set_trace()
# get results for testidation set
test_cost_one, test_acc_one, test_topk_acc_one, test_batches_one = self.val_epoch(runType=runType,
images=images_test,
mfccs=mfccs_test,
validLabels=validLabels_test,
valid_frames=validAudioFrames_test)
test_acc += test_acc_one;
test_cost += test_cost_one;
test_topk_acc += test_topk_acc_one
nb_test_batches += test_batches_one;
if verbose:
logger.debug(" this speaker results: ")
logger.debug("\ttest cost: %s", test_cost_one / test_batches_one)
logger.debug("\vtest acc rate: %s %%", test_acc_one / test_batches_one * 100)
logger.debug("\vtest top 3 acc rate: %s %%", test_topk_acc_one / test_batches_one * 100)
# get the average over all speakers
test_cost /= nb_test_batches
test_acc = test_acc / nb_test_batches * 100
test_topk_acc = test_topk_acc / nb_test_batches * 100
return test_cost, test_acc, test_topk_acc
def train(self, dataset, database_binaryDir, runType='combined', storeProcessed=False, processedDir=None,
save_name='Best_model',
num_epochs=40, batch_size=1, LR_start=1e-4, LR_decay=1,
shuffleEnabled=True, compute_confusion=False, debug=False, logger=logger_combinedtools):
trainingSpeakerFiles, testSpeakerFiles = dataset
logger.info("\n* Starting training...")
best_val_acc, test_acc = self.loadPreviousResults(save_name)
logger.info("Initial best Val acc: %s", best_val_acc)
logger.info("Initial best test acc: %s\n", test_acc)
# init some performance keepers
best_epoch = 1
LR = LR_start
# for storage of training info
self.network_train_info = {
'train_cost': [],
'val_cost': [], 'val_acc': [], 'val_topk_acc': [],
'test_cost': [], 'test_acc': [], 'test_topk_acc': []
} # used to be list of lists
self.epochsNotImproved = 0
# # TODO: remove this. should not be needed
if not self.loadPerSpeaker: #load all the lipspeakers in memory, then don't touch the files -> no reloading needed = faster training
allImages_train, allMfccs_train, allAudioLabels_train, allValidLabels_train, allValidAudioFrames_train = unpickle(
os.path.expanduser("~/TCDTIMIT/lipreading/TCDTIMIT/binaryPerVideo/allLipspeakersTrain.pkl"))
allImages_val, allMfccs_val, allAudioLabels_val, allValidLabels_val, allValidAudioFrames_val = unpickle(
os.path.expanduser("~/TCDTIMIT/lipreading/TCDTIMIT/binaryPerVideo/allLipspeakersVal.pkl"))
allImages_test, allMfccs_test, allAudioLabels_test, allValidLabels_test, allValidAudioFrames_test = unpickle(
os.path.expanduser("~/TCDTIMIT/lipreading/TCDTIMIT/binaryPerVideo/allLipspeakersTest.pkl"))
test_cost, test_acc, test_topk_acc, nb_test_batches = self.val_epoch(runType=runType,
images=allImages_test,
mfccs=allMfccs_test,
validLabels=allValidLabels_test,
valid_frames=allValidAudioFrames_test,
batch_size=1)
test_cost /= nb_test_batches
test_acc = test_acc / nb_test_batches * 100
test_topk_acc = test_topk_acc / nb_test_batches * 100
else:
test_cost, test_acc, test_topk_acc = self.evalTEST(testSpeakerFiles,
runType=runType,
sourceDataDir=database_binaryDir,
storeProcessed=storeProcessed,
processedDir=processedDir)
# # TODO: end remove
logger.info("TEST results: ")
logger.info("\t test cost: %s", test_cost)
logger.info("\t test acc rate: %s %%", test_acc)
logger.info("\t test top 3 acc: %s %%", test_topk_acc)
logger.info("starting training for %s epochs...", num_epochs)
# now run through the epochs
for epoch in range(num_epochs):
logger.info("\n\n\n Epoch %s started", epoch + 1)
start_time = time.time()
if self.loadPerSpeaker:
train_cost, val_cost, val_acc, val_topk_acc = self.evalTRAINING(trainingSpeakerFiles, LR=LR,
runType=runType,
shuffleEnabled=shuffleEnabled,
sourceDataDir=database_binaryDir,
storeProcessed=storeProcessed,
processedDir=processedDir)
else:
train_cost, nb_train_batches = self.train_epoch(runType=runType,
images=allImages_train,
mfccs=allMfccs_train,
validLabels=allValidLabels_train,
valid_frames=allValidAudioFrames_train,
LR=LR)
train_cost /= nb_train_batches
val_cost, val_acc, val_topk_acc, nb_val_batches = self.val_epoch(runType=runType,
images=allImages_val,
mfccs=allMfccs_val,
validLabels=allValidLabels_val,
valid_frames=allValidAudioFrames_val,
batch_size=1)
val_cost /= nb_val_batches
val_acc = val_acc / nb_val_batches * 100
val_topk_acc = val_topk_acc / nb_val_batches * 100
# test if validation acc went up
printTest = False
resetNetwork=False
if val_acc > best_val_acc:
printTest = True
best_val_acc = val_acc
best_epoch = epoch + 1
self.epochsNotImproved = 0
logger.info("\n\nBest ever validation score; evaluating TEST set...")
if self.loadPerSpeaker:
test_cost, test_acc, test_topk_acc = self.evalTEST(testSpeakerFiles, runType=runType,
sourceDataDir=database_binaryDir,
storeProcessed=storeProcessed,
processedDir=processedDir)
else:
test_cost, test_acc, test_topk_acc, nb_test_batches = self.val_epoch(runType=runType,
images=allImages_test,
mfccs=allMfccs_test,
validLabels=allValidLabels_test,
valid_frames=allValidAudioFrames_test,
batch_size=1)
test_cost /= nb_test_batches
test_acc = test_acc / nb_test_batches * 100
test_topk_acc = test_topk_acc / nb_test_batches * 100
logger.info("TEST results: ")
logger.info("\t test cost: %s", test_cost)
logger.info("\t test acc rate: %s %%", test_acc)
logger.info("\t test top 3 acc: %s %%", test_topk_acc)
self.best_cost = val_cost
self.best_epoch = self.curr_epoch
# get the parameters of the model we're training
if runType == 'audio': lout = self.audioNet_lout
elif runType == 'lipreading': lout = self.lipreading_lout
elif runType == 'combined': lout = self.combined_lout
else: raise IOError("can't save network params; network output not found")
self.best_param = L.get_all_param_values(lout)
logger.info("New best model found!")
if save_name is not None:
logger.info("Model saved as " + save_name)
self.save_model(save_name)
else: #reset to best model we had
resetNetwork= True
epoch_duration = time.time() - start_time
# Then we logger.info the results for this epoch:
logger.info("Epoch %s of %s took %s seconds", epoch + 1, num_epochs, epoch_duration)
logger.info(" LR: %s", LR)
logger.info(" training cost: %s", train_cost)
logger.info(" validation cost: %s", val_cost)
logger.info(" validation acc rate: %s %%", val_acc)
logger.info(" validation top 3 acc rate: %s %%", val_topk_acc)
logger.info(" best epoch: %s", best_epoch)
logger.info(" best validation acc rate: %s %%", best_val_acc)
if printTest:
logger.info(" test cost: %s", test_cost)
logger.info(" test acc rate: %s %%", test_acc)
logger.info(" test top 3 acc rate: %s %%", test_topk_acc)
# save the training info
self.network_train_info['train_cost'].append(train_cost)
self.network_train_info['val_cost'].append(val_cost)
self.network_train_info['val_acc'].append(val_acc)
self.network_train_info['val_topk_acc'].append(val_topk_acc)
self.network_train_info['test_cost'].append(test_cost)
self.network_train_info['test_acc'].append(test_acc)
self.network_train_info['test_topk_acc'].append(test_topk_acc)
store_path = save_name + '_trainInfo.pkl'
saveToPkl(store_path, self.network_train_info)
logger.info("Train info written to:\t %s", store_path)
# decay the LR
# LR *= LR_decay
LR = self.updateLR(LR, LR_decay)
if resetNetwork: self.setNetworkParams(runType)
if self.epochsNotImproved > 3:
logger.warning("\n\n NO MORE IMPROVEMENTS -> stop training")
self.finalNetworkEvaluation(save_name=save_name,
database_binaryDir=database_binaryDir,
processedDir=processedDir,
runType=runType,
storeProcessed=storeProcessed,
testSpeakerFiles=testSpeakerFiles)
break
logger.info("Done.")
def loadPreviousResults(self, save_name, logger=logger_combinedtools):
# try to load performance metrics of stored model
best_val_acc = 0
test_topk_acc = 0
test_cost = 0
test_acc = 0
try:
if os.path.exists(save_name + ".npz") and os.path.exists(save_name + "_trainInfo.pkl"):
old_train_info = unpickle(save_name + '_trainInfo.pkl')
# backward compatibility
if type(old_train_info) == list:
old_train_info = old_train_info[0]
best_val_acc = max(old_train_info[2])
test_cost = min(old_train_info[3])
test_acc = max(old_train_info[3])
elif type(old_train_info) == dict: # normal case
best_val_acc = max(old_train_info['val_acc'])
test_cost = min(old_train_info['test_cost'])
test_acc = max(old_train_info['test_acc'])
try:
test_topk_acc = max(old_train_info['test_topk_acc'])
except:
pass
else:
logger.warning("old trainInfo found, but wrong format: %s", save_name + "_trainInfo.pkl")
# do nothing
except:
pass
return best_val_acc, test_acc
# evaluate network on test set.
# Combined network -> evaluate audio, lipreading and then combined network
# Audio network -> evaluate audio
# Lipreading -> evaluate lipreading
def finalNetworkEvaluation(self, save_name, database_binaryDir, processedDir, runType, testSpeakerFiles, storeProcessed=False, logger=logger_combinedtools):
if runType == 'lipreading': networkType = "lipreading " + self.lipreadingType
else: networkType = runType
logger.info(" \n\n Running FINAL evaluation on Test set... (%s network type)", networkType)
store_path = save_name + '_trainInfo.pkl' #dictionary with lists that contain training info for each epoch (train/val/test accuracy, cost etc)
self.network_train_info = unpickle(store_path)
# for the lipspeaker files that are all loaded in memory at once, we still need to get the data
if not self.loadPerSpeaker: # load all the lipspeakers in memory, then don't touch the files -> no reloading needed = faster
allImages_test, allMfccs_test, allAudioLabels_test, allValidLabels_test, allValidAudioFrames_test = unpickle(
os.path.expanduser("~/TCDTIMIT/lipreading/TCDTIMIT/binaryPerVideo/allLipspeakersTest.pkl"))
if self.loadPerSpeaker:
test_cost, test_acc, test_topk_acc = self.evalTEST(testSpeakerFiles, runType=runType,
sourceDataDir=database_binaryDir,
storeProcessed=storeProcessed,
processedDir=processedDir)
else:
test_cost, test_acc, test_topk_acc, nb_test_batches = self.val_epoch(runType=runType,
images=allImages_test,
mfccs=allMfccs_test,
validLabels=allValidLabels_test,
valid_frames=allValidAudioFrames_test,
batch_size=1)
test_cost /= nb_test_batches
test_acc = test_acc / nb_test_batches * 100
test_topk_acc = test_topk_acc / nb_test_batches * 100
logger.info("FINAL TEST results on %s: ", runType)
logger.info("\t %s test cost: %s", runType, test_cost)
logger.info("\t %s test acc rate: %s %%", runType, test_acc)
logger.info("\t %s test top 3 acc: %s %%", runType, test_topk_acc)
self.network_train_info['final_test_cost'] = test_cost
self.network_train_info['final_test_acc'] = test_acc
self.network_train_info['final_test_top3_acc'] = test_topk_acc
saveToPkl(store_path, self.network_train_info)
def updateLR(self, LR, LR_decay, logger=logger_combinedtools):
this_acc = self.network_train_info['val_acc'][-1]
this_cost = self.network_train_info['val_cost'][-1]
try:
last_acc = self.network_train_info['val_acc'][-2]
last_cost = self.network_train_info['val_cost'][-2]
except:
last_acc = -10
last_cost = 10 * this_cost # first time it will fail because there is only 1 result stored
# only reduce LR if not much improvment anymore
if this_cost / float(last_cost) >= 0.98 and this_acc-last_acc < 0.2:
logger.info(" Error not much reduced: %s vs %s. Reducing LR: %s", this_cost, last_cost, LR * LR_decay)
self.epochsNotImproved += 1
return LR * LR_decay
else:
self.epochsNotImproved = max(self.epochsNotImproved - 1, 0) # reduce by 1, minimum 0
return LR
|
|
#!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgpoints"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osg
from osgpypp import osgDB
from osgpypp import osgUtil
from osgpypp import osgViewer
# Translated from file 'osgpoints.cpp'
# OpenSceneGraph example, osgpoints.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osgDB/ReadFile>
#include <osgUtil/Optimizer>
#include <osgViewer/Viewer>
#include <osg/Point>
#include <osg/BlendFunc>
#include <osg/Texture2D>
#include <osg/PointSprite>
#include <osg/PolygonMode>
#include <iostream>
class KeyboardEventHandler (osgGA.GUIEventHandler) :
KeyboardEventHandler(osg.StateSet* stateset):
_stateset(stateset)
_point = osg.Point()
_point.setDistanceAttenuation(osg.Vec3(0.0,0.0000,0.05))
_stateset.setAttribute(_point)
virtual bool handle( osgGA.GUIEventAdapter ea,osgGA.GUIActionAdapter)
switch(ea.getEventType())
case(osgGA.GUIEventAdapter.KEYDOWN):
if ea.getKey()==ord("+") or ea.getKey()==osgGA.GUIEventAdapter.KEY_KP_Add :
changePointSize(1.0)
return True
elif ea.getKey()==ord("-") or ea.getKey()==osgGA.GUIEventAdapter.KEY_KP_Subtract :
changePointSize(-1.0)
return True
elif ea.getKey()==ord("<") :
changePointAttenuation(1.1)
return True
elif ea.getKey()==ord(">") :
changePointAttenuation(1.0/1.1)
return True
break
default:
break
return False
def getPointSize():
return _point.getSize()
def setPointSize(psize):
if psize>0.0 :
_point.setSize(psize)
print "Point size ", psize
def changePointSize(delta):
setPointSize(getPointSize()+delta)
def changePointAttenuation(scale):
_point.setDistanceAttenuation(_point.getDistanceAttenuation()*scale)
_stateset = osg.StateSet()
_point = osg.Point()
def main(argv):
# use an ArgumentParser object to manage the program arguments.
arguments = osg.ArgumentParser(argv)
# set up the usage document, in case we need to print out how to use this program.
arguments.getApplicationUsage().setApplicationName(arguments.getApplicationName())
arguments.getApplicationUsage().setDescription(arguments.getApplicationName()+" example provides an interactive viewer for visualising point clouds..")
arguments.getApplicationUsage().setCommandLineUsage(arguments.getApplicationName()+" [options] filename ...")
arguments.getApplicationUsage().addCommandLineOption("-h or --help","Display this information")
arguments.getApplicationUsage().addCommandLineOption("--sprites","Point sprites.")
arguments.getApplicationUsage().addCommandLineOption("--points","Sets the polygon mode to GL_POINT for front and back faces.")
# construct the viewer.
viewer = osgViewer.Viewer()
shader = False
while arguments.read("--shader") : shader = True
# if user request help write it out to cout.
if arguments.read("-h") or arguments.read("--help") :
arguments.getApplicationUsage().write(std.cout)
return 1
usePointSprites = False
while arguments.read("--sprites") : usePointSprites = True
forcePointMode = False
while arguments.read("--points") : forcePointMode = True
if arguments.argc()<=1 :
arguments.getApplicationUsage().write(std.cout,osg.ApplicationUsage.COMMAND_LINE_OPTION)
return 1
# read the scene from the list of file specified commandline args.
loadedModel = osgDB.readNodeFiles(arguments)
# if no model has been successfully loaded report failure.
if not loadedModel :
print arguments.getApplicationName(), ": No data loaded"
return 1
# optimize the scene graph, remove redundant nodes and state etc.
optimizer = osgUtil.Optimizer()
optimizer.optimize(loadedModel)
# set the scene to render
viewer.setSceneData(loadedModel)
stateset = loadedModel.getOrCreateStateSet()
if usePointSprites :
#/ Setup cool blending
fn = osg.BlendFunc()
stateset.setAttributeAndModes(fn, osg.StateAttribute.ON)
#/ Setup the point sprites
sprite = osg.PointSprite()
stateset.setTextureAttributeAndModes(0, sprite, osg.StateAttribute.ON)
#/ The texture for the sprites
tex = osg.Texture2D()
tex.setImage(osgDB.readImageFile("Images/particle.rgb"))
stateset.setTextureAttributeAndModes(0, tex, osg.StateAttribute.ON)
if forcePointMode :
#/ Set polygon mode to GL_POINT
pm = osg.PolygonMode(
osg.PolygonMode.FRONT_AND_BACK, osg.PolygonMode.POINT )
stateset.setAttributeAndModes( pm, osg.StateAttribute.ON | osg.StateAttribute.OVERRIDE)
# register the handler for modifying the point size
viewer.addEventHandler(KeyboardEventHandler(viewer.getCamera().getOrCreateStateSet()))
if shader :
stateset = loadedModel.getOrCreateStateSet()
#################################/
# vertex shader using just Vec4 coefficients
char vertexShaderSource[] =
"void main(void) \n"
" \n"
"\n"
" gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex\n"
"\n"
program = osg.Program()
stateset.setAttribute(program)
vertex_shader = osg.Shader(osg.Shader.VERTEX, vertexShaderSource)
program.addShader(vertex_shader)
#if 0
#################################
# fragment shader
#
char fragmentShaderSource[] =
"void main(void) \n"
" \n"
" gl_FragColor = gl_Color \n"
"\n"
fragment_shader = osg.Shader(osg.Shader.FRAGMENT, fragmentShaderSource)
program.addShader(fragment_shader)
#endif
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
|
|
import os
import datetime
import xlsxwriter
def create_excel_report(meterId, sDate, eDate, OnePhase, hProfile, dProfile,
hEvents, dEvents):
""" Creates the excel file
"""
dest_path = 'static/reports/'
if not os.path.exists(dest_path):
os.makedirs(dest_path)
dest_filename = dest_path + meterId + '_' + sDate + '_' + eDate + '.xlsx'
workbook = xlsxwriter.Workbook(dest_filename)
#-----------------
#Excel Formats
#-----------------
sDateObj = datetime.datetime.strptime(sDate, '%Y-%m-%d')
eDateObj = datetime.datetime.strptime(eDate, '%Y-%m-%d')
cSiteName = ' for '+ meterId
date_format = workbook.add_format({'num_format': 'dd-mm-yy hh:mm'})
fBU = workbook.add_format()
fBU.set_bold()
fBU.set_underline()
#-----------------
#Profile
#-----------------
numProfileRows = str(len(dProfile)+1)
ws1 = workbook.add_worksheet('Profile')
ws1 = dump_date_data(ws1,hProfile,dProfile,date_format)
ws1.set_column('A:A',15)
#-----------------
#Events
#-----------------
ws2 = workbook.add_worksheet('Events')
ws2 = dump_data(ws2,hEvents,dEvents)
ws2.set_column('A:B',20)
##################################################################################
#-----------------
#V
#-----------------
ws3 = workbook.add_worksheet('V')
ws3 = create_histogram_V(ws3,numProfileRows,OnePhase,fBU)
#-----------------
#THD
#-----------------
ws4 = workbook.add_worksheet('THD')
ws4 = create_histogram_THD(ws4,numProfileRows,OnePhase,fBU)
#-----------------
#U
#-----------------
if OnePhase == False:
ws5 = workbook.add_worksheet('U')
ws5 = create_histogram_U(ws5,numProfileRows,fBU)
##################################################################################
cVars = numProfileRows,cSiteName, sDateObj, eDateObj
#-----------------
#ProfileVoltsG
#-----------------
wsc1 = workbook.add_chartsheet('ProfileVoltsG')
c1 = create_chart_scatter(workbook,'ProfileVoltsG',cVars,OnePhase)
wsc1.set_chart(c1) #Place Chart
#-----------------
#ProfileTHDG
#-----------------
wsc2 = workbook.add_chartsheet('ProfileTHDG')
c2 = create_chart_scatter(workbook,'ProfileTHDG',cVars,OnePhase)
wsc2.set_chart(c2) #Place Chart
#-----------------
#ProfileUG
#-----------------
if OnePhase == False:
wsc3 = workbook.add_chartsheet('ProfileUG')
c3 = create_chart_scatter(workbook,'ProfileUG',cVars,OnePhase)
wsc3.set_chart(c3) #Place Chart
##################################################################################
#-----------------
#VG
#-----------------
wsc4 = workbook.add_chartsheet('VG')
c4 = create_chart_histogram(workbook,'V','74','Voltage Frequency Distribution'+cSiteName,'Voltage')
wsc4.set_chart(c4) #Place Chart
#-----------------
#THDG
#-----------------
wsc5 = workbook.add_chartsheet('THDG')
c5 = create_chart_histogram(workbook,'THD','84','THD Frequency Distribution'+cSiteName,'THD (%)')
wsc5.set_chart(c5) #Place Chart
#-----------------
#UG
#-----------------
if OnePhase == False:
wsc6 = workbook.add_chartsheet('UG')
c6 = create_chart_histogram(workbook,'U','84','Unbalance Frequency Distribution'+cSiteName,'Unbalance')
wsc6.set_chart(c6) #Place Chart
#-----------------
#Save File
#-----------------
workbook.close()
return dest_filename
def create_chart_scatter(workbook,cname,cVars,OnePhase):
"""Create scatter plot
"""
numProfileRows,cSiteName, sDateObj, eDateObj = cVars
c = workbook.add_chart({'type': 'scatter',
'subtype': 'straight'})
if cname == 'ProfileUG':
c.add_series({
'name': '=Profile!$H$1',
'categories': '=Profile!$A$2:$A$'+numProfileRows,
'values': '=Profile!$H$2:$H$'+numProfileRows,
})
c.set_y_axis({'name': 'Unbalance (%)'})
c.set_title ({'name': 'Unbalance Profile'+cSiteName})
elif cname == 'ProfileTHDG':
c.add_series({
'name': '=Profile!$E$1',
'categories': '=Profile!$A$2:$A$'+numProfileRows,
'values': '=Profile!$E$2:$E$'+numProfileRows,
})
if OnePhase == False:
c.add_series({
'name': '=Profile!$F$1',
'categories': '=Profile!$A$2:$A$'+numProfileRows,
'values': '=Profile!$F$2:$F$'+numProfileRows,
})
c.add_series({
'name': '=Profile!$G$1',
'categories': '=Profile!$A$2:$A$'+numProfileRows,
'values': '=Profile!$G$2:$G$'+numProfileRows,
})
c.set_title ({'name': 'THD Profile'+cSiteName})
c.set_y_axis({'name': 'THD (%)'})
elif cname == 'ProfileVoltsG':
c.add_series({
'name': '=Profile!$B$1',
'categories': '=Profile!$A$2:$A$'+numProfileRows,
'values': '=Profile!$B$2:$B$'+numProfileRows,
})
if OnePhase == False:
c.add_series({
'name': '=Profile!$C$1',
'categories': '=Profile!$A$2:$A$'+numProfileRows,
'values': '=Profile!$C$2:$C$'+numProfileRows,
})
c.add_series({
'name': '=Profile!$D$1',
'categories': '=Profile!$A$2:$A$'+numProfileRows,
'values': '=Profile!$D$2:$D$'+numProfileRows,
})
c.set_title ({'name': 'Voltage Profile'+cSiteName})
c.set_y_axis({'name': 'Voltage'})
#Generic settings
c.set_x_axis({'name': 'Date',
'num_font': {'rotation': -45},
'date_axis': True,
'min': sDateObj,
'max': eDateObj,
'num_format': 'dd/mm/yyyy',
})
c.set_legend({'position': 'top'})
c.set_style(2)
c.set_size({'width': 900, 'height': 500})
return c
def create_chart_histogram(workbook,sheetName,lastRow,title,axisTitle):
""" Create histogram chart object
"""
c = workbook.add_chart({'type': 'column',
'subtype': 'stacked'})
c.set_style(2)
c.add_series({
'name': '='+sheetName+'!$F$3',
'categories': '='+sheetName+'!$A$4:$A$'+lastRow,
'values': '='+sheetName+'!$F$4:$F$'+lastRow,
'fill': {'color': 'green'},
'border': {'color': 'black'},
'gap': 0,
})
c.add_series({
'name': '='+sheetName+'!$G$3',
'categories': '='+sheetName+'!$A$4:$A$'+lastRow,
'values': '='+sheetName+'!$G$4:$G$'+lastRow,
'fill': {'color': 'red'},
'border': {'color': 'black'},
'gap': 0,
})
c.set_title ({'name': title})
c.set_x_axis({'name': axisTitle,
'num_font': {'rotation': -45},
})
c.set_y_axis({'name': 'Freq. of Occurance (%)',
'num_format': '0%',
})
c.set_plotarea({
'border': {'color': 'black', 'width': 1},
'fill': {'color': '#FFFFC2'}
})
c.set_legend({'position': 'top'})
return c
def dump_data(ws,headings,data):
""" Iterate over the data and write it out row by row.
"""
for i, colVal in enumerate(headings):
ws.write(0,i,colVal)
for i, row in enumerate(data):
for j, colVal in enumerate(row):
ws.write(i+1,j,colVal)
return ws
def dump_vertical_data(ws,headings,data,sRow):
""" Iterate over the data and write it out row by row.
"""
for i, colVal in enumerate(headings):
ws.write(i+sRow,0,colVal)
for i, row in enumerate(data):
for j, colVal in enumerate(row):
ws.write(j+sRow,i+1,colVal)
return ws
def dump_date_data(ws,headings,data,date_format):
""" Iterate over the data and write it out row by row.
First column should be treated as a date
"""
for i, colVal in enumerate(headings):
ws.write(0,i,colVal)
for i, row in enumerate(data):
for j, colVal in enumerate(row):
if j == 0:
try:
date_time = datetime.datetime.strptime(colVal, '%Y-%m-%d %H:%M:%S')
except ValueError:
date_time = datetime.datetime.strptime(colVal, '%Y-%m-%d')
ws.write_datetime('A'+str(i+2), date_time, date_format)
else:
ws.write(i+1,j,colVal)
return ws
def create_histogram_V(ws,numProfileRows,OnePhase,fBU):
""" Iterate over the data and write it out row by row.
"""
#Headings
ws.write(1,0,'Voltage Distribution Data',fBU)
cLR = 73 #Last row to apply formulas to
if OnePhase == True:
ws.write_formula('D2', '=COUNT(Profile!B2:B'+numProfileRows+')')
arFormula = '{=FREQUENCY(Profile!B2:B'+numProfileRows +',V!A4:A'+str(cLR)+')}'
else:
ws.write_formula('D2', '=3*COUNT(Profile!B2:B'+numProfileRows+')')
arFormula = '{=FREQUENCY(Profile!B2:D'+numProfileRows +',V!A4:A'+str(cLR)+')}'
ws.write(1,5,'Limit')
ws.write(1,6,0.94)
ws.write(1,7,1.06)
icFormula = '=IF(AND(Arow>=(240*$G$2),Arow<=(240*$H$2)),Drow,0)'
ocFormula = '=IF(AND(Arow>=(240*$G$2),Arow<=(240*$H$2)),0,Drow)'
#A - Bins
for i, colVal in enumerate(range(200,271)):
ws.write(i+3,0,colVal)
#Rows B to G
ws = create_histogram_ext(ws,cLR,arFormula,icFormula,ocFormula)
return ws
def create_histogram_THD(ws,numProfileRows,OnePhase,fBU):
""" Iterate over the data and write it out row by row.
"""
#Headings
ws.write(1,0,'THD Distribution Data',fBU)
cLR = 83 #Last row to apply formulas to
if OnePhase == True:
ws.write_formula('D2', '=COUNT(Profile!B2:B'+numProfileRows+')')
arFormula = '{=FREQUENCY(Profile!E2:E'+numProfileRows +',THD!A4:A'+str(cLR)+')}'
else:
ws.write_formula('D2', '=3*COUNT(Profile!B2:B'+numProfileRows+')')
arFormula = '{=FREQUENCY(Profile!E2:G'+numProfileRows +',THD!A4:A'+str(cLR)+')}'
ws.write(1,5,'Limit')
ws.write(1,6,8)
icFormula = '=IF(Arow<$G$2,Drow,0)'
ocFormula = '=IF(Arow<$G$2,0,Drow)'
#A - Bins
bin_list = []
i = 0
while i <= 10:
bin_list.append(i)
i += 0.125
for i, colVal in enumerate(bin_list):
ws.write(i+3,0,colVal)
#Rows B to G
ws = create_histogram_ext(ws,cLR,arFormula,icFormula,ocFormula)
return ws
def create_histogram_U(ws,numProfileRows,fBU):
""" Iterate over the data and write it out row by row.
"""
#Headings
ws.write(1,0,'Unbalance Distribution Data',fBU)
cLR = 83 #Last row to apply formulas to
ws.write_formula('D2', '=COUNT(Profile!B2:B'+numProfileRows+')')
arFormula = '{=FREQUENCY(Profile!H2:H'+numProfileRows +',U!A4:A'+str(cLR)+')}'
ws.write(1,5,'Limit')
ws.write(1,6,2.55)
icFormula = '=IF(Arow<$G$2,Drow,0)'
ocFormula = '=IF(Arow<$G$2,0,Drow)'
#A - Bins
bin_list = []
i = 0
while i <= 4:
bin_list.append(i)
i += 0.05
for i, colVal in enumerate(bin_list):
ws.write(i+3,0,colVal)
#Rows B to G
ws = create_histogram_ext(ws,cLR,arFormula,icFormula,ocFormula)
return ws
def create_histogram_ext(ws,cLR,arFormula,icFormula,ocFormula):
""" Extra Columns other than Bins (which are sheet specific)
"""
#headings
headings = ['bins','occurances in bin','cumulative total','occurances in bin (%)','cumulative total (%)','in compliance','out of compliance']
for i, colVal in enumerate(headings):
ws.write(2,i,colVal)
#B - Occurrence Array
ws.write_array_formula('B4:B'+str(cLR)+'',arFormula)
ws.write_formula('B'+str(cLR+1)+'','=IF(C'+str(cLR)+'<D2,D2-C'+str(cLR)+',0)')
#C - Cumulative
ws.write_formula('C4', '=B4')
for i in range(5,cLR+2):
cFormula = '=B'+str(i)+'+C'+str(i-1)
ws.write_formula('C'+str(i),cFormula)
for i in range(4,cLR+2):
#D - occurances in bin (%)
cFormula = '=B'+str(i)+'/D2'
ws.write_formula('D'+str(i),cFormula)
#E - Cumulative Perc
cFormula = '=C'+str(i)+'/D2'
ws.write_formula('E'+str(i),cFormula)
#F - In Compliance
cFormula = icFormula.replace('row',str(i))
ws.write_formula('F'+str(i),cFormula)
#G - Out of Compliance
cFormula = ocFormula.replace('row',str(i))
ws.write_formula('G'+str(i),cFormula)
return ws
|
|
from django.db import models
from django.contrib.auth.models import User
from django import forms
from django.core.exceptions import ObjectDoesNotExist
class CustomListField(models.TextField):
'''
for creating custom list field override some model.fields methods
TODO: improve the description of what this is, and why you would want to use it
'''
__metaclass__ = models.SubfieldBase#TODO: document what the significance of this is
def __init__(self, *args, **kwargs):
self.token = kwargs.pop('token', ',')
kwargs={'default':None,'null':True,'blank':True,
'help_text':'Enter option for select Field Type seperated by comma e.g No ,Yes,Not Applicable . TO EDIT EXISTING OPTIONS CLEAR THE OPTIONS AND TYPE AFRESH '}
super(CustomListField, self).__init__(*args, **kwargs)
def to_python(self, value):
'''
@return: list if it exist
TODO:consider imporoving this to give more detail, ie what do you pass in to value and how does the conversion to list happen?
'''
if not value: return #TODO: is this the same as return None?
if isinstance(value, list):
return value
return value.split(self.token)
def get_db_prep_value(self, value,connection=None,prepared=False):
'''
@return string separated by token as stored in database
'''
if not value: return
assert(isinstance(value, list) or isinstance(value, tuple))
return self.token.join([unicode(s) for s in value])
#TODO: what is this tuple used for?
FIELD_TYPE_CHOICES=(('charfield','charfield'),('textfield','textfield'),('booleanfield','boolean'),('select_dropdown_field','select_dropdown_field'),('radioselectfield','radioselectfield'),('multiplechoicefield','multiplechoicefield'))
class Question(models.Model):
'''
model question objects attributes
define attributes of a question:
1.label : the actual question eg what is your name?
2.field_type: type of questions or type of answers you expect or require for the question e.g
booleanfield -if answer require is True or False
charfield-if answer require typing some info in a form field
textfield- if answer require typing more detail info in a form text field
select_dropdown_field- if answer require selecting one answer from some options
multiplechoicefield-if answer require selecting one or more answer from some options
radioselectfield-if answer require selecting only one answer from some options
3.selectoptions :list of choices or options available for question .Required for field type is choicefields i.e select_dropdown_field,radioselectfield, multiplechoicefield
otherwise selectoptions is None .options stored as comma ","seperarted strings
e.g selectoptions for a question of field_type-radioselectfield may be 'Yes',' No' ,'Not Applicable'
'''
class Meta():
db_table ='question'
label=models.CharField('question',max_length=255)
field_type=models.CharField(choices=FIELD_TYPE_CHOICES,max_length=100)
selectoptions=CustomListField()
def __unicode__(self):
return 'Question:%s FieldType:%s Selectoptions:%s' %(self.label, self.field_type,str(self.selectoptions))
def save(self,*args,**kwgs):
'''
ensure selectoption for non choicefield is saved as None
only choicefields require selectoptions i.e select_dropdown_field,radioselectfield, multiplechoicefield should have options
'''
if not self.id:
if not self.field_type in ['select_dropdown_field','radioselectfield', 'multiplechoicefield'] :
self.selectoptions = None
super(Question,self).save(*args,**kwgs)
#TODO: Move this to forms.py
class CustomListWidget(forms.Textarea):
'''
create flatten custom widget use to render CustomList Field
displays selectoptions List as string of strings separated by comma
e.g customList field [A,B,C] will be displayed and stored as A,B,C string
'''
def render(self, name, value, attrs=None):
if value :
value = ','.join(str(v) for v in value)
return super(CustomListWidget, self).render(name, value, attrs)
#TODO: move this to forms.py
class QuestionAdminForm(forms.ModelForm):
'''
overide admin form validation for Question selectoptions attribute
ensure user enter valid selectionoptions/choices for all field types
1.check selectoptions field for choicefield i.e multiplechoice ,radioselectfield and select_dropdown_field field_type
is not empty and are "," separated string
2.check the selectioptions for Non choice field types are None or Empty i.e charfield,textfield ,booleanfield
if error appropriate error message will be displayed
Questions are reuseable
'''
class Meta:
model = Question
widgets = {'selectoptions': CustomListWidget(),}
def clean(self):
'''
custom clean for select options validation
@return: cleaned_data
'''
field_type=self.cleaned_data["field_type"]
selectoptions = self.cleaned_data["selectoptions"]
if field_type in ['select_dropdown_field','radioselectfield', 'multiplechoicefield'] :
if not selectoptions:
raise forms.ValidationError("Select Options is required for "+ str(field_type)+ " enter valid options seperated with commas e.g No,Yes,Not Applicable")
elif "," not in selectoptions :
raise forms.ValidationError("Enter valid options seperated with comma e.g No,Yes,Not Applicable")
elif field_type in ['charfield','textfield','booleanfield']:
if selectoptions :
raise forms.ValidationError("Select Options is not required for " + str(field_type) + " Must Be Left Empty")
return self.cleaned_data
class QuestionGroup(models.Model):
'''
reponsible for question groups ,each group set can have one to many set of questions
order_info store the order or sequence the question group is to be rendered in a form .e.g order_info = 2 will be rendered before order_info =3
'''
class Meta():
db_table ='questiongroup'
name = models.CharField('questiongroupname',max_length=255,unique=True)
questions = models.ManyToManyField(Question, through = 'Question_order')
#context fields
_context = None
def get_ordered_questions(self):
'''
@return: questions in question group ordered by order_info
'''
return [order.question for order in Question_order.objects.filter(questiongroup=self).order_by('order_info')]
def set_context(self, answer_set):
'''
A Question group can be assigned to many Questionnaires. And this questionnaire can be taken by many
Users. Therefore there will be many different combinations of questionnaire, user and answerset
associated with any question group.
Sometimes you will want to see the group in a specific context, and the best way to do this
is to associate an instance to a singel answer set this will give you access the contextualised
questionnaier, its user and the answers to its questions
This is not saved into the database or persisted in any other way, it is on an instance basis
'''
if not isinstance(answer_set, AnswerSet) :
raise AttributeError
self._context = answer_set
def clear_context(self):
'''
This allows you to clears the context fields for this instance.
'''
self._context = None
def __unicode__(self):
return self.name
class Questionnaire(models.Model):
'''
This class models the Questionnaire and its attributes
name : name for the questionnaire
questiongroups: the question groups in the named questionnaire
questiongroups are reuseable i.e a given questiongroup can be reused in one or more questionnaire
'''
name=models.CharField(max_length=250)
questiongroup=models.ManyToManyField(QuestionGroup, through='QuestionGroup_order')
def get_ordered_groups(self):
'''
@return: the questiongroups in a questionnaire order by the order_info
'''
return [order.questiongroup for order in QuestionGroup_order.objects.filter(questionnaire=self).order_by('order_info')]
def get_group_for_index(self, index):
'''
Returns the question group that is at the position in the ordered sequence of groups
represented by the index argument
If there is not a group at this index in the ordered_groups then an index error will be thrown.
'''
ordered_groups = self.get_ordered_groups()
return (self.get_ordered_groups()[index], (len(ordered_groups) - index) -1)
def add_question_group(self, questiongroup):
'''
This function properly adds the questiongroup to the ordered sequence of groups
the group you pass in will be added to the end of the list
'''
if not isinstance(questiongroup, QuestionGroup):
raise AttributeError
if len(self.get_ordered_groups()) > 0:
latest_group = QuestionGroup_order.objects.filter(questionnaire=self).latest('order_info')
next_group_info = latest_group.order_info +1
else:
next_group_info = 1
QuestionGroup_order.objects.create(questionnaire=self, questiongroup=questiongroup, order_info= next_group_info)
def __unicode__(self):
return self.name
class QuestionGroup_order(models.Model):
'''
This class stores the ordering of the questiongroups rendered in a questinnaire
order_info store the order or sequence the questiongroup is to be rendered in a form .e.g order_info = 2 will be rendered before order_info =3
'''
questiongroup=models.ForeignKey(QuestionGroup)
questionnaire=models.ForeignKey(Questionnaire)
order_info=models.IntegerField(max_length=3)
def __unicode__(self):
return 'group:%s order:%s' %(self.questiongroup, str(self.order_info))
class Question_order(models.Model):
'''
This class is responsible in storing the ordering relationship between the question and questiongroup
order_info store the order or sequence the questions in a questiongroup is to be rendered in a form .e.g order_info = 2 will be rendered before order_info =3
'''
questiongroup =models.ForeignKey(QuestionGroup)
question = models.ForeignKey(Question)
order_info = models.IntegerField(max_length=3)
def __unicode__(self):
return 'group:%s order:%s' %(self.question, str(self.order_info))
class AnswerSet(models.Model):
'''
model store relationship for users answer for questiongroup in a questionnaire
associates a user to a questiongroup in a questionnaire when answers the questionnaire
'''
class Meta():
db_table ='answer_set'
user=models.ForeignKey(User)
questionnaire=models.ForeignKey(Questionnaire)
questiongroup=models.ForeignKey(QuestionGroup)
def __unicode__(self):
return 'user:%s questionnaire:%s questiongroup:%s ' %(str(self.user), str(self.questionnaire),str(self.questiongroup))
def get_latest_question_answers(self):
'''
Convenience function that returns a list of the latest QuestionAnswer objects (bearing in mind that you could
have more than one QuestionAnswer for each question in a given answer set).
'''
return [record.question_answer for record in LatestQuestionAnswer.objects.filter(answer_set=self)]
def get_latest_question_answer_in_order(self):
'''
This function will return a list of QuestionAnswer objects in th same order that the questions are defined in
the group. Where an answer is not present for a question it will simply be missing from the list
and the sequence will move up
'''
answer_dict = {record.question:record for record in self.get_latest_question_answers()}
ordered_answers = []
for question in self.questiongroup.get_ordered_questions():
if question in answer_dict:
ordered_answers.append(answer_dict[question])
return ordered_answers
def is_complete(self):
'''
This function will return True is there is an answer for each of the
questions defined in the questiongroup. Otherwise it will return False
'''
answers = self.get_latest_question_answers()
questions = self.questiongroup.get_ordered_questions()
#get a list of the answered questions
answered_questions = []
for answer in answers:
answered_questions.append(answer.question)
for question in questions:
if question not in answered_questions:
return False
return True
class QuestionAnswer(models.Model):
'''
This model stores questions, answers and related answer_set
'''
class Meta():
db_table ='questionanswer'
question = models.ForeignKey(Question)
answer = models.CharField(max_length=255)
answer_set = models.ForeignKey(AnswerSet)
created = models.DateTimeField(auto_now_add=True)
def save(self, force_insert=False, force_update=False, using=None):
super(QuestionAnswer, self).save(force_insert=force_insert, force_update=force_update, using=using)
#now update the LatestQuestionAnswer table
try:
record = LatestQuestionAnswer.objects.get(question=self.question, answer_set=self.answer_set)
if record.question_answer == self:
return#nothing to do no point updating the record as it is already correct
except ObjectDoesNotExist:
record = LatestQuestionAnswer(question=self.question, answer_set= self.answer_set)
record.question_answer = self
record.save()
def __unicode__(self):
return 'question:%s answer:%s answer_set:%s' %(str(self.question), str(self.answer), str(self.answer_set))
class LatestQuestionAnswer(models.Model):
question = models.ForeignKey(Question)
question_answer = models.ForeignKey(QuestionAnswer)
answer_set = models.ForeignKey(AnswerSet)
created = created = models.DateTimeField(auto_now_add=True)
|
|
"""Tests for letsencrypt.renewer."""
import datetime
import os
import tempfile
import pkg_resources
import shutil
import unittest
import configobj
import mock
import pytz
from letsencrypt.storage import ALL_FOUR
def unlink_all(rc_object):
"""Unlink all four items associated with this RenewableCert."""
for kind in ALL_FOUR:
os.unlink(getattr(rc_object, kind))
def fill_with_sample_data(rc_object):
"""Put dummy data into all four files of this RenewableCert."""
for kind in ALL_FOUR:
with open(getattr(rc_object, kind), "w") as f:
f.write(kind)
class RenewableCertTests(unittest.TestCase):
# pylint: disable=too-many-public-methods
"""Tests for letsencrypt.renewer.*."""
def setUp(self):
from letsencrypt import storage
self.tempdir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.tempdir, "live", "example.org"))
os.makedirs(os.path.join(self.tempdir, "archive", "example.org"))
os.makedirs(os.path.join(self.tempdir, "configs"))
defaults = configobj.ConfigObj()
defaults["live_dir"] = os.path.join(self.tempdir, "live")
defaults["archive_dir"] = os.path.join(self.tempdir, "archive")
defaults["renewal_configs_dir"] = os.path.join(self.tempdir,
"configs")
config = configobj.ConfigObj()
for kind in ALL_FOUR:
config[kind] = os.path.join(self.tempdir, "live", "example.org",
kind + ".pem")
config.filename = os.path.join(self.tempdir, "configs",
"example.org.conf")
self.defaults = defaults # for main() test
self.test_rc = storage.RenewableCert(config, defaults)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_initialization(self):
self.assertEqual(self.test_rc.lineagename, "example.org")
for kind in ALL_FOUR:
self.assertEqual(
getattr(self.test_rc, kind), os.path.join(
self.tempdir, "live", "example.org", kind + ".pem"))
def test_renewal_bad_config(self):
"""Test that the RenewableCert constructor will complain if
the renewal configuration file doesn't end in ".conf" or if it
isn't a ConfigObj."""
from letsencrypt import storage
defaults = configobj.ConfigObj()
config = configobj.ConfigObj()
# These files don't exist and aren't created here; the point of the test
# is to confirm that the constructor rejects them outright because of
# the configfile's name.
for kind in ALL_FOUR:
config["cert"] = "nonexistent_" + kind + ".pem"
config.filename = "nonexistent_sillyfile"
self.assertRaises(ValueError, storage.RenewableCert, config, defaults)
self.assertRaises(TypeError, storage.RenewableCert, "fun", defaults)
def test_renewal_incomplete_config(self):
"""Test that the RenewableCert constructor will complain if
the renewal configuration file is missing a required file element."""
from letsencrypt import storage
defaults = configobj.ConfigObj()
config = configobj.ConfigObj()
config["cert"] = "imaginary_cert.pem"
# Here the required privkey is missing.
config["chain"] = "imaginary_chain.pem"
config["fullchain"] = "imaginary_fullchain.pem"
config.filename = "imaginary_config.conf"
self.assertRaises(ValueError, storage.RenewableCert, config, defaults)
def test_consistent(self): # pylint: disable=too-many-statements
oldcert = self.test_rc.cert
self.test_rc.cert = "relative/path"
# Absolute path for item requirement
self.assertFalse(self.test_rc.consistent())
self.test_rc.cert = oldcert
# Items must exist requirement
self.assertFalse(self.test_rc.consistent())
# Items must be symlinks requirements
fill_with_sample_data(self.test_rc)
self.assertFalse(self.test_rc.consistent())
unlink_all(self.test_rc)
# Items must point to desired place if they are relative
for kind in ALL_FOUR:
os.symlink(os.path.join("..", kind + "17.pem"),
getattr(self.test_rc, kind))
self.assertFalse(self.test_rc.consistent())
unlink_all(self.test_rc)
# Items must point to desired place if they are absolute
for kind in ALL_FOUR:
os.symlink(os.path.join(self.tempdir, kind + "17.pem"),
getattr(self.test_rc, kind))
self.assertFalse(self.test_rc.consistent())
unlink_all(self.test_rc)
# Items must point to things that exist
for kind in ALL_FOUR:
os.symlink(os.path.join("..", "..", "archive", "example.org",
kind + "17.pem"),
getattr(self.test_rc, kind))
self.assertFalse(self.test_rc.consistent())
# This version should work
fill_with_sample_data(self.test_rc)
self.assertTrue(self.test_rc.consistent())
# Items must point to things that follow the naming convention
os.unlink(self.test_rc.fullchain)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"fullchain_17.pem"), self.test_rc.fullchain)
with open(self.test_rc.fullchain, "w") as f:
f.write("wrongly-named fullchain")
self.assertFalse(self.test_rc.consistent())
def test_current_target(self):
# Relative path logic
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert17.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write("cert")
self.assertTrue(os.path.samefile(self.test_rc.current_target("cert"),
os.path.join(self.tempdir, "archive",
"example.org",
"cert17.pem")))
# Absolute path logic
os.unlink(self.test_rc.cert)
os.symlink(os.path.join(self.tempdir, "archive", "example.org",
"cert17.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write("cert")
self.assertTrue(os.path.samefile(self.test_rc.current_target("cert"),
os.path.join(self.tempdir, "archive",
"example.org",
"cert17.pem")))
def test_current_version(self):
for ver in (1, 5, 10, 20):
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert{0}.pem".format(ver)),
self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write("cert")
os.unlink(self.test_rc.cert)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert10.pem"), self.test_rc.cert)
self.assertEqual(self.test_rc.current_version("cert"), 10)
def test_no_current_version(self):
self.assertEqual(self.test_rc.current_version("cert"), None)
def test_latest_and_next_versions(self):
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertEqual(self.test_rc.latest_common_version(), 5)
self.assertEqual(self.test_rc.next_free_version(), 6)
# Having one kind of file of a later version doesn't change the
# result
os.unlink(self.test_rc.privkey)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"privkey7.pem"), self.test_rc.privkey)
with open(self.test_rc.privkey, "w") as f:
f.write("privkey")
self.assertEqual(self.test_rc.latest_common_version(), 5)
# ... although it does change the next free version
self.assertEqual(self.test_rc.next_free_version(), 8)
# Nor does having three out of four change the result
os.unlink(self.test_rc.cert)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert7.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write("cert")
os.unlink(self.test_rc.fullchain)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"fullchain7.pem"), self.test_rc.fullchain)
with open(self.test_rc.fullchain, "w") as f:
f.write("fullchain")
self.assertEqual(self.test_rc.latest_common_version(), 5)
# If we have everything from a much later version, it does change
# the result
ver = 17
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertEqual(self.test_rc.latest_common_version(), 17)
self.assertEqual(self.test_rc.next_free_version(), 18)
def test_update_link_to(self):
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertEqual(ver, self.test_rc.current_version(kind))
self.test_rc.update_link_to("cert", 3)
self.test_rc.update_link_to("privkey", 2)
self.assertEqual(3, self.test_rc.current_version("cert"))
self.assertEqual(2, self.test_rc.current_version("privkey"))
self.assertEqual(5, self.test_rc.current_version("chain"))
self.assertEqual(5, self.test_rc.current_version("fullchain"))
# Currently we are allowed to update to a version that doesn't exist
self.test_rc.update_link_to("chain", 3000)
# However, current_version doesn't allow querying the resulting
# version (because it's a broken link).
self.assertEqual(os.path.basename(os.readlink(self.test_rc.chain)),
"chain3000.pem")
def test_version(self):
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert12.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write("cert")
# TODO: We should probably test that the directory is still the
# same, but it's tricky because we can get an absolute
# path out when we put a relative path in.
self.assertEqual("cert8.pem",
os.path.basename(self.test_rc.version("cert", 8)))
def test_update_all_links_to(self):
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertEqual(ver, self.test_rc.current_version(kind))
self.assertEqual(self.test_rc.latest_common_version(), 5)
for ver in xrange(1, 6):
self.test_rc.update_all_links_to(ver)
for kind in ALL_FOUR:
self.assertEqual(ver, self.test_rc.current_version(kind))
self.assertEqual(self.test_rc.latest_common_version(), 5)
def test_has_pending_deployment(self):
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertEqual(ver, self.test_rc.current_version(kind))
for ver in xrange(1, 6):
self.test_rc.update_all_links_to(ver)
for kind in ALL_FOUR:
self.assertEqual(ver, self.test_rc.current_version(kind))
if ver < 5:
self.assertTrue(self.test_rc.has_pending_deployment())
else:
self.assertFalse(self.test_rc.has_pending_deployment())
def _test_notafterbefore(self, function, timestamp):
test_cert = pkg_resources.resource_string(
"letsencrypt.tests", "testdata/cert.pem")
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert12.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write(test_cert)
desired_time = datetime.datetime.utcfromtimestamp(timestamp)
desired_time = desired_time.replace(tzinfo=pytz.UTC)
for result in (function(), function(12)):
self.assertEqual(result, desired_time)
self.assertEqual(result.utcoffset(), datetime.timedelta(0))
def test_notbefore(self):
self._test_notafterbefore(self.test_rc.notbefore, 1418337285)
# 2014-12-11 22:34:45+00:00 = Unix time 1418337285
def test_notafter(self):
self._test_notafterbefore(self.test_rc.notafter, 1418942085)
# 2014-12-18 22:34:45+00:00 = Unix time 1418942085
@mock.patch("letsencrypt.storage.datetime")
def test_time_interval_judgments(self, mock_datetime):
"""Test should_autodeploy() and should_autorenew() on the basis
of expiry time windows."""
test_cert = pkg_resources.resource_string(
"letsencrypt.tests", "testdata/cert.pem")
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}12.pem".format(kind)), where)
with open(where, "w") as f:
f.write(kind)
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}11.pem".format(kind)), where)
with open(where, "w") as f:
f.write(kind)
self.test_rc.update_all_links_to(12)
with open(self.test_rc.cert, "w") as f:
f.write(test_cert)
self.test_rc.update_all_links_to(11)
with open(self.test_rc.cert, "w") as f:
f.write(test_cert)
mock_datetime.timedelta = datetime.timedelta
for (current_time, interval, result) in [
# 2014-12-13 12:00:00+00:00 (about 5 days prior to expiry)
# Times that should result in autorenewal/autodeployment
(1418472000, "2 months", True), (1418472000, "1 week", True),
# Times that should not
(1418472000, "4 days", False), (1418472000, "2 days", False),
# 2009-05-01 12:00:00+00:00 (about 5 years prior to expiry)
# Times that should result in autorenewal/autodeployment
(1241179200, "7 years", True),
(1241179200, "11 years 2 months", True),
# Times that should not
(1241179200, "8 hours", False), (1241179200, "2 days", False),
(1241179200, "40 days", False), (1241179200, "9 months", False),
# 2015-01-01 (after expiry has already happened, so all
# intervals should cause autorenewal/autodeployment)
(1420070400, "0 seconds", True),
(1420070400, "10 seconds", True),
(1420070400, "10 minutes", True),
(1420070400, "10 weeks", True), (1420070400, "10 months", True),
(1420070400, "10 years", True), (1420070400, "99 months", True),
]:
sometime = datetime.datetime.utcfromtimestamp(current_time)
mock_datetime.datetime.utcnow.return_value = sometime
self.test_rc.configuration["deploy_before_expiry"] = interval
self.test_rc.configuration["renew_before_expiry"] = interval
self.assertEqual(self.test_rc.should_autodeploy(), result)
self.assertEqual(self.test_rc.should_autorenew(), result)
def test_should_autodeploy(self):
"""Test should_autodeploy() on the basis of reasons other than
expiry time window."""
# pylint: disable=too-many-statements
# Autodeployment turned off
self.test_rc.configuration["autodeploy"] = "0"
self.assertFalse(self.test_rc.should_autodeploy())
self.test_rc.configuration["autodeploy"] = "1"
# No pending deployment
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertFalse(self.test_rc.should_autodeploy())
@mock.patch("letsencrypt.storage.RenewableCert.ocsp_revoked")
def test_should_autorenew(self, mock_ocsp):
"""Test should_autorenew on the basis of reasons other than
expiry time window."""
# pylint: disable=too-many-statements
# Autorenewal turned off
self.test_rc.configuration["autorenew"] = "0"
self.assertFalse(self.test_rc.should_autorenew())
self.test_rc.configuration["autorenew"] = "1"
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}12.pem".format(kind)), where)
with open(where, "w") as f:
f.write(kind)
# Mandatory renewal on the basis of OCSP revocation
mock_ocsp.return_value = True
self.assertTrue(self.test_rc.should_autorenew())
mock_ocsp.return_value = False
def test_save_successor(self):
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.test_rc.update_all_links_to(3)
self.assertEqual(6, self.test_rc.save_successor(3, "new cert", None,
"new chain"))
with open(self.test_rc.version("cert", 6)) as f:
self.assertEqual(f.read(), "new cert")
with open(self.test_rc.version("chain", 6)) as f:
self.assertEqual(f.read(), "new chain")
with open(self.test_rc.version("fullchain", 6)) as f:
self.assertEqual(f.read(), "new cert" + "new chain")
# version 6 of the key should be a link back to version 3
self.assertFalse(os.path.islink(self.test_rc.version("privkey", 3)))
self.assertTrue(os.path.islink(self.test_rc.version("privkey", 6)))
# Let's try two more updates
self.assertEqual(7, self.test_rc.save_successor(6, "again", None,
"newer chain"))
self.assertEqual(8, self.test_rc.save_successor(7, "hello", None,
"other chain"))
# All of the subsequent versions should link directly to the original
# privkey.
for i in (6, 7, 8):
self.assertTrue(os.path.islink(self.test_rc.version("privkey", i)))
self.assertEqual("privkey3.pem", os.path.basename(os.readlink(
self.test_rc.version("privkey", i))))
for kind in ALL_FOUR:
self.assertEqual(self.test_rc.available_versions(kind), range(1, 9))
self.assertEqual(self.test_rc.current_version(kind), 3)
# Test updating from latest version rather than old version
self.test_rc.update_all_links_to(8)
self.assertEqual(9, self.test_rc.save_successor(8, "last", None,
"attempt"))
for kind in ALL_FOUR:
self.assertEqual(self.test_rc.available_versions(kind),
range(1, 10))
self.assertEqual(self.test_rc.current_version(kind), 8)
with open(self.test_rc.version("fullchain", 9)) as f:
self.assertEqual(f.read(), "last" + "attempt")
# Test updating when providing a new privkey. The key should
# be saved in a new file rather than creating a new symlink.
self.assertEqual(10, self.test_rc.save_successor(9, "with", "a",
"key"))
self.assertTrue(os.path.exists(self.test_rc.version("privkey", 10)))
self.assertFalse(os.path.islink(self.test_rc.version("privkey", 10)))
def test_new_lineage(self):
"""Test for new_lineage() class method."""
from letsencrypt import storage
config_dir = self.defaults["renewal_configs_dir"]
archive_dir = self.defaults["archive_dir"]
live_dir = self.defaults["live_dir"]
result = storage.RenewableCert.new_lineage("the-lineage.com", "cert",
"privkey", "chain", None,
self.defaults)
# This consistency check tests most relevant properties about the
# newly created cert lineage.
self.assertTrue(result.consistent())
self.assertTrue(os.path.exists(os.path.join(config_dir,
"the-lineage.com.conf")))
with open(result.fullchain) as f:
self.assertEqual(f.read(), "cert" + "chain")
# Let's do it again and make sure it makes a different lineage
result = storage.RenewableCert.new_lineage("the-lineage.com", "cert2",
"privkey2", "chain2", None,
self.defaults)
self.assertTrue(os.path.exists(
os.path.join(config_dir, "the-lineage.com-0001.conf")))
# Now trigger the detection of already existing files
os.mkdir(os.path.join(live_dir, "the-lineage.com-0002"))
self.assertRaises(ValueError, storage.RenewableCert.new_lineage,
"the-lineage.com", "cert3", "privkey3", "chain3",
None, self.defaults)
os.mkdir(os.path.join(archive_dir, "other-example.com"))
self.assertRaises(ValueError, storage.RenewableCert.new_lineage,
"other-example.com", "cert4", "privkey4", "chain4",
None, self.defaults)
# Make sure it can accept renewal parameters
params = {"stuff": "properties of stuff", "great": "awesome"}
result = storage.RenewableCert.new_lineage("the-lineage.com", "cert2",
"privkey2", "chain2",
params, self.defaults)
# TODO: Conceivably we could test that the renewal parameters actually
# got saved
def test_new_lineage_nonexistent_dirs(self):
"""Test that directories can be created if they don't exist."""
from letsencrypt import storage
config_dir = self.defaults["renewal_configs_dir"]
archive_dir = self.defaults["archive_dir"]
live_dir = self.defaults["live_dir"]
shutil.rmtree(config_dir)
shutil.rmtree(archive_dir)
shutil.rmtree(live_dir)
storage.RenewableCert.new_lineage("the-lineage.com", "cert2",
"privkey2", "chain2",
None, self.defaults)
self.assertTrue(os.path.exists(
os.path.join(config_dir, "the-lineage.com.conf")))
self.assertTrue(os.path.exists(
os.path.join(live_dir, "the-lineage.com", "privkey.pem")))
self.assertTrue(os.path.exists(
os.path.join(archive_dir, "the-lineage.com", "privkey1.pem")))
@mock.patch("letsencrypt.storage.le_util.unique_lineage_name")
def test_invalid_config_filename(self, mock_uln):
from letsencrypt import storage
mock_uln.return_value = "this_does_not_end_with_dot_conf", "yikes"
self.assertRaises(ValueError, storage.RenewableCert.new_lineage,
"example.com", "cert", "privkey", "chain",
None, self.defaults)
def test_bad_kind(self):
self.assertRaises(ValueError, self.test_rc.current_target, "elephant")
self.assertRaises(ValueError, self.test_rc.current_version, "elephant")
self.assertRaises(ValueError, self.test_rc.version, "elephant", 17)
self.assertRaises(ValueError, self.test_rc.available_versions,
"elephant")
self.assertRaises(ValueError, self.test_rc.newest_available_version,
"elephant")
self.assertRaises(ValueError, self.test_rc.update_link_to,
"elephant", 17)
def test_ocsp_revoked(self):
# XXX: This is currently hardcoded to False due to a lack of an
# OCSP server to test against.
self.assertFalse(self.test_rc.ocsp_revoked())
def test_parse_time_interval(self):
from letsencrypt import storage
# XXX: I'm not sure if intervals related to years and months
# take account of the current date (if so, some of these
# may fail in the future, like in leap years or even in
# months of different lengths!)
intended = {"": 0, "17 days": 17, "23": 23, "1 month": 31,
"7 weeks": 49, "1 year 1 day": 366, "1 year-1 day": 364,
"4 years": 1461}
for time in intended:
self.assertEqual(storage.parse_time_interval(time),
datetime.timedelta(intended[time]))
@mock.patch("letsencrypt.renewer.plugins_disco")
@mock.patch("letsencrypt.client.determine_account")
@mock.patch("letsencrypt.client.Client")
def test_renew(self, mock_c, mock_da, mock_pd):
"""Tests for renew()."""
from letsencrypt import renewer
test_cert = pkg_resources.resource_string(
"letsencrypt.tests", "testdata/cert-san.pem")
for kind in ALL_FOUR:
os.symlink(os.path.join("..", "..", "archive", "example.org",
kind + "1.pem"),
getattr(self.test_rc, kind))
fill_with_sample_data(self.test_rc)
with open(self.test_rc.cert, "w") as f:
f.write(test_cert)
# Fails because renewalparams are missing
self.assertFalse(renewer.renew(self.test_rc, 1))
self.test_rc.configfile["renewalparams"] = {"some": "stuff"}
# Fails because there's no authenticator specified
self.assertFalse(renewer.renew(self.test_rc, 1))
self.test_rc.configfile["renewalparams"]["rsa_key_size"] = "2048"
self.test_rc.configfile["renewalparams"]["server"] = "acme.example.com"
self.test_rc.configfile["renewalparams"]["authenticator"] = "fake"
mock_auth = mock.MagicMock()
mock_pd.PluginsRegistry.find_all.return_value = {"apache": mock_auth}
# Fails because "fake" != "apache"
self.assertFalse(renewer.renew(self.test_rc, 1))
self.test_rc.configfile["renewalparams"]["authenticator"] = "apache"
mock_client = mock.MagicMock()
mock_client.obtain_certificate.return_value = ("cert", "key", "chain")
mock_c.return_value = mock_client
self.assertEqual(2, renewer.renew(self.test_rc, 1))
# TODO: We could also make several assertions about calls that should
# have been made to the mock functions here.
self.assertEqual(mock_da.call_count, 1)
mock_client.obtain_certificate.return_value = (None, None, None)
# This should fail because the renewal itself appears to fail
self.assertFalse(renewer.renew(self.test_rc, 1))
@mock.patch("letsencrypt.renewer.notify")
@mock.patch("letsencrypt.storage.RenewableCert")
@mock.patch("letsencrypt.renewer.renew")
def test_main(self, mock_renew, mock_rc, mock_notify):
"""Test for main() function."""
from letsencrypt import renewer
mock_rc_instance = mock.MagicMock()
mock_rc_instance.should_autodeploy.return_value = True
mock_rc_instance.should_autorenew.return_value = True
mock_rc_instance.latest_common_version.return_value = 10
mock_rc.return_value = mock_rc_instance
with open(os.path.join(self.defaults["renewal_configs_dir"],
"README"), "w") as f:
f.write("This is a README file to make sure that the renewer is")
f.write("able to correctly ignore files that don't end in .conf.")
with open(os.path.join(self.defaults["renewal_configs_dir"],
"example.org.conf"), "w") as f:
# This isn't actually parsed in this test; we have a separate
# test_initialization that tests the initialization, assuming
# that configobj can correctly parse the config file.
f.write("cert = cert.pem\nprivkey = privkey.pem\n")
f.write("chain = chain.pem\nfullchain = fullchain.pem\n")
with open(os.path.join(self.defaults["renewal_configs_dir"],
"example.com.conf"), "w") as f:
f.write("cert = cert.pem\nprivkey = privkey.pem\n")
f.write("chain = chain.pem\nfullchain = fullchain.pem\n")
renewer.main(self.defaults)
self.assertEqual(mock_rc.call_count, 2)
self.assertEqual(mock_rc_instance.update_all_links_to.call_count, 2)
self.assertEqual(mock_notify.notify.call_count, 4)
self.assertEqual(mock_renew.call_count, 2)
# If we have instances that don't need any work done, no work should
# be done (call counts associated with processing deployments or
# renewals should not increase).
mock_happy_instance = mock.MagicMock()
mock_happy_instance.should_autodeploy.return_value = False
mock_happy_instance.should_autorenew.return_value = False
mock_happy_instance.latest_common_version.return_value = 10
mock_rc.return_value = mock_happy_instance
renewer.main(self.defaults)
self.assertEqual(mock_rc.call_count, 4)
self.assertEqual(mock_happy_instance.update_all_links_to.call_count, 0)
self.assertEqual(mock_notify.notify.call_count, 4)
self.assertEqual(mock_renew.call_count, 2)
def test_bad_config_file(self):
from letsencrypt import renewer
with open(os.path.join(self.defaults["renewal_configs_dir"],
"bad.conf"), "w") as f:
f.write("incomplete = configfile\n")
renewer.main(self.defaults)
# The ValueError is caught inside and nothing happens.
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
|
import numpy as np
import pandas as pd
import pytest
from vivarium import InteractiveContext
from vivarium.framework.lookup import LookupTable, validate_parameters
from vivarium.testing_utilities import TestPopulation, build_table
@pytest.mark.skip(reason="only order 0 interpolation with age bin edges currently supported")
def test_interpolated_tables(base_config):
year_start = base_config.time.start.year
year_end = base_config.time.end.year
years = build_table(lambda age, sex, year: year, year_start, year_end)
ages = build_table(lambda age, sex, year: age, year_start, year_end)
one_d_age = ages.copy()
del one_d_age["year"]
one_d_age = one_d_age.drop_duplicates()
base_config.update(
{"population": {"population_size": 10000}, "interpolation": {"order": 1}}
) # the results we're checking later assume interp order 1
simulation = InteractiveContext(components=[TestPopulation()], configuration=base_config)
manager = simulation._tables
years = manager.build_table(
years,
key_columns=("sex",),
parameter_columns=(
"age",
"year",
),
value_columns=None,
)
ages = manager.build_table(
ages,
key_columns=("sex",),
parameter_columns=(
"age",
"year",
),
value_columns=None,
)
one_d_age = manager.build_table(
one_d_age, key_columns=("sex",), parameter_columns=("age",), value_columns=None
)
pop = simulation.get_population(untracked=True)
result_years = years(pop.index)
result_ages = ages(pop.index)
result_ages_1d = one_d_age(pop.index)
fractional_year = simulation._clock.time.year
fractional_year += simulation._clock.time.timetuple().tm_yday / 365.25
assert np.allclose(result_years, fractional_year)
assert np.allclose(result_ages, pop.age)
assert np.allclose(result_ages_1d, pop.age)
simulation._clock._time += pd.Timedelta(30.5 * 125, unit="D")
simulation._population._population.age += 125 / 12
result_years = years(pop.index)
result_ages = ages(pop.index)
result_ages_1d = one_d_age(pop.index)
fractional_year = simulation._clock.time.year
fractional_year += simulation._clock.time.timetuple().tm_yday / 365.25
assert np.allclose(result_years, fractional_year)
assert np.allclose(result_ages, pop.age)
assert np.allclose(result_ages_1d, pop.age)
@pytest.mark.skip(reason="only order 0 interpolation with age bin edges currently supported")
def test_interpolated_tables_without_uninterpolated_columns(base_config):
year_start = base_config.time.start.year
year_end = base_config.time.end.year
years = build_table(lambda age, sex, year: year, year_start, year_end)
del years["sex"]
years = years.drop_duplicates()
base_config.update(
{"population": {"population_size": 10000}, "interpolation": {"order": 1}}
) # the results we're checking later assume interp order 1
simulation = InteractiveContext(components=[TestPopulation()], configuration=base_config)
manager = simulation._tables
years = manager.build_table(
years,
key_columns=(),
parameter_columns=(
"year",
"age",
),
value_columns=None,
)
result_years = years(simulation.get_population().index)
fractional_year = simulation._clock.time.year
fractional_year += simulation._clock.time.timetuple().tm_yday / 365.25
assert np.allclose(result_years, fractional_year)
simulation._clock._time += pd.Timedelta(30.5 * 125, unit="D")
result_years = years(simulation.get_population().index)
fractional_year = simulation._clock.time.year
fractional_year += simulation._clock.time.timetuple().tm_yday / 365.25
assert np.allclose(result_years, fractional_year)
def test_interpolated_tables__exact_values_at_input_points(base_config):
year_start = base_config.time.start.year
year_end = base_config.time.end.year
years = build_table(lambda age, sex, year: year, year_start, year_end)
input_years = years.year_start.unique()
base_config.update({"population": {"population_size": 10000}})
simulation = InteractiveContext(components=[TestPopulation()], configuration=base_config)
manager = simulation._tables
years = manager._build_table(
years, key_columns=["sex"], parameter_columns=["age", "year"], value_columns=None
)
for year in input_years:
simulation._clock._time = pd.Timestamp(year, 1, 1)
assert np.allclose(
years(simulation.get_population().index), simulation._clock.time.year + 1 / 365
)
def test_lookup_table_scalar_from_list(base_config):
simulation = InteractiveContext(components=[TestPopulation()], configuration=base_config)
manager = simulation._tables
table = manager._build_table(
(1, 2), key_columns=None, parameter_columns=None, value_columns=["a", "b"]
)(simulation.get_population().index)
assert isinstance(table, pd.DataFrame)
assert table.columns.values.tolist() == ["a", "b"]
assert np.all(table.a == 1)
assert np.all(table.b == 2)
def test_lookup_table_scalar_from_single_value(base_config):
simulation = InteractiveContext(components=[TestPopulation()], configuration=base_config)
manager = simulation._tables
table = manager._build_table(
1, key_columns=None, parameter_columns=None, value_columns=["a"]
)(simulation.get_population().index)
assert isinstance(table, pd.Series)
assert np.all(table == 1)
def test_invalid_data_type_build_table(base_config):
simulation = InteractiveContext(components=[TestPopulation()], configuration=base_config)
manager = simulation._tables
with pytest.raises(TypeError):
manager._build_table(
"break", key_columns=None, parameter_columns=None, value_columns=None
)
def test_lookup_table_interpolated_return_types(base_config):
year_start = base_config.time.start.year
year_end = base_config.time.end.year
data = build_table(lambda age, sex, year: year, year_start, year_end)
simulation = InteractiveContext(components=[TestPopulation()], configuration=base_config)
manager = simulation._tables
table = manager._build_table(
data, key_columns=["sex"], parameter_columns=["age", "year"], value_columns=None
)(simulation.get_population().index)
# make sure a single value column is returned as a series
assert isinstance(table, pd.Series)
# now add a second value column to make sure the result is a df
data["value2"] = data.value
table = manager._build_table(
data, key_columns=["sex"], parameter_columns=["age", "year"], value_columns=None
)(simulation.get_population().index)
assert isinstance(table, pd.DataFrame)
@pytest.mark.parametrize(
"data", [None, pd.DataFrame(), pd.DataFrame(columns=["a", "b", "c"]), [], tuple()]
)
def test_validate_parameters_no_data(data):
with pytest.raises(ValueError, match="supply some data"):
validate_parameters(data, [], [], [])
@pytest.mark.parametrize(
"key_cols, param_cols, val_cols, match",
[
(None, None, None, "supply value_columns"),
(None, None, [], "supply value_columns"),
(None, None, ["a", "b"], "match the number of values"),
],
)
def test_validate_parameters_error_scalar_data(key_cols, param_cols, val_cols, match):
with pytest.raises(ValueError, match=match):
validate_parameters([1, 2, 3], key_cols, param_cols, val_cols)
@pytest.mark.parametrize(
"key_cols, param_cols, val_cols, match",
[(["a", "b"], ["b"], ["c"], "no overlap"), ([], ["b"], ["c"], "do not match")],
)
def test_validate_parameters_error_dataframe(key_cols, param_cols, val_cols, match):
data = pd.DataFrame({"a": [1, 2], "b_start": [0, 5], "b_end": [5, 10], "c": [100, 150]})
with pytest.raises(ValueError, match=match):
validate_parameters(data, key_cols, param_cols, val_cols)
@pytest.mark.parametrize(
"data", ["FAIL", pd.Interval(5, 10), "2019-05-17", {"a": 5, "b": 10}]
)
def test_validate_parameters_fail_other_data(data):
with pytest.raises(TypeError, match="only allowable types"):
validate_parameters(data, [], [], [])
@pytest.mark.parametrize(
"key_cols, param_cols, val_cols",
[
(None, None, ["a", "b", "c"]),
(None, ["d"], ["one", "two", "three"]),
(["KEY"], None, ["a", "b", "c"]),
(["KEY"], ["d"], ["a", "b", "c"]),
],
)
def test_validate_parameters_pass_scalar_data(key_cols, param_cols, val_cols):
validate_parameters([1, 2, 3], key_cols, param_cols, val_cols)
@pytest.mark.parametrize(
"key_cols, param_cols, val_cols",
[(["a"], ["b"], ["c"]), ([], ["b"], ["c", "a"]), ([], ["b"], ["a", "c"])],
)
def test_validate_parameters_pass_dataframe(key_cols, param_cols, val_cols):
data = pd.DataFrame({"a": [1, 2], "b_start": [0, 5], "b_end": [5, 10], "c": [100, 150]})
validate_parameters(data, key_cols, param_cols, val_cols)
@pytest.mark.parametrize("validate", [True, False])
def test_validate_option_invalid_data(validate):
if validate:
with pytest.raises(ValueError, match="supply some data"):
lookup = LookupTable(0, [], None, [], [], [], 0, None, True, validate)
else:
lookup = LookupTable(0, [], None, [], [], [], 0, None, True, validate)
@pytest.mark.parametrize("validate", [True, False])
def test_validate_option_valid_data(validate):
data = [1, 2, 3]
key_cols = ["KEY"]
param_cols = ["d"]
val_cols = ["a", "b", "c"]
lookup = LookupTable(
0, data, None, key_cols, param_cols, val_cols, 0, None, True, validate
)
|
|
"""
LLDB AppKit formatters
Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
See https://llvm.org/LICENSE.txt for license information.
SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""
# example summary provider for NSNumber
# the real summary is now C++ code built into LLDB
from __future__ import print_function
import lldb
import ctypes
import lldb.runtime.objc.objc_runtime
import lldb.formatters.metrics
import struct
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but the port number of an NSNumber, so they need not
# obey the interface specification for synthetic children providers
class NSTaggedNumber_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, info_bits, data, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
self.info_bits = info_bits
self.data = data
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def value(self):
logger = lldb.formatters.Logger.Logger()
# in spite of the plenty of types made available by the public NSNumber API
# only a bunch of these are actually used in the internal implementation
# unfortunately, the original type information appears to be lost
# so we try to at least recover the proper magnitude of the data
if self.info_bits == 0:
return '(char)' + \
str(ord(ctypes.c_char(chr(self.data % 256)).value))
if self.info_bits == 4:
return '(short)' + \
str(ctypes.c_short(self.data % (256 * 256)).value)
if self.info_bits == 8:
return '(int)' + str(ctypes.c_int(self.data %
(256 * 256 * 256 * 256)).value)
if self.info_bits == 12:
return '(long)' + str(ctypes.c_long(self.data).value)
else:
return 'unexpected value:(info=' + str(self.info_bits) + \
", value = " + str(self.data) + ')'
class NSUntaggedNumber_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.char):
self.sys_params.types_cache.char = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeChar)
if not(self.sys_params.types_cache.short):
self.sys_params.types_cache.short = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeShort)
if not(self.sys_params.types_cache.ushort):
self.sys_params.types_cache.ushort = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedShort)
if not(self.sys_params.types_cache.int):
self.sys_params.types_cache.int = self.valobj.GetType().GetBasicType(lldb.eBasicTypeInt)
if not(self.sys_params.types_cache.long):
self.sys_params.types_cache.long = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeLong)
if not(self.sys_params.types_cache.ulong):
self.sys_params.types_cache.ulong = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
if not(self.sys_params.types_cache.longlong):
self.sys_params.types_cache.longlong = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeLongLong)
if not(self.sys_params.types_cache.ulonglong):
self.sys_params.types_cache.ulonglong = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLongLong)
if not(self.sys_params.types_cache.float):
self.sys_params.types_cache.float = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeFloat)
if not(self.sys_params.types_cache.double):
self.sys_params.types_cache.double = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeDouble)
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def value(self):
logger = lldb.formatters.Logger.Logger()
global statistics
# we need to skip the ISA, then the next byte tells us what to read
# we then skip one other full pointer worth of data and then fetch the contents
# if we are fetching an int64 value, one more pointer must be skipped
# to get at our data
data_type_vo = self.valobj.CreateChildAtOffset(
"dt", self.sys_params.pointer_size, self.sys_params.types_cache.char)
data_type = ((data_type_vo.GetValueAsUnsigned(0) % 256) & 0x1F)
data_offset = 2 * self.sys_params.pointer_size
if data_type == 0B00001:
data_vo = self.valobj.CreateChildAtOffset(
"data", data_offset, self.sys_params.types_cache.char)
statistics.metric_hit('code_notrun', self.valobj)
return '(char)' + \
str(ord(ctypes.c_char(chr(data_vo.GetValueAsUnsigned(0))).value))
elif data_type == 0B0010:
data_vo = self.valobj.CreateChildAtOffset(
"data", data_offset, self.sys_params.types_cache.short)
statistics.metric_hit('code_notrun', self.valobj)
return '(short)' + str(
ctypes.c_short(
data_vo.GetValueAsUnsigned(0) %
(256 * 256)).value)
# IF tagged pointers are possible on 32bit+v2 runtime
# (of which the only existing instance should be iOS)
# then values of this type might be tagged
elif data_type == 0B0011:
data_vo = self.valobj.CreateChildAtOffset(
"data", data_offset, self.sys_params.types_cache.int)
statistics.metric_hit('code_notrun', self.valobj)
return '(int)' + str(ctypes.c_int(data_vo.GetValueAsUnsigned(0) %
(256 * 256 * 256 * 256)).value)
# apparently, on is_64_bit architectures, these are the only values that will ever
# be represented by a non tagged pointers
elif data_type == 0B10001:
data_offset = data_offset + 8 # 8 is needed even if we are on 32bit
data_vo = self.valobj.CreateChildAtOffset(
"data", data_offset, self.sys_params.types_cache.longlong)
statistics.metric_hit('code_notrun', self.valobj)
return '(long)' + \
str(ctypes.c_long(data_vo.GetValueAsUnsigned(0)).value)
elif data_type == 0B0100:
if self.sys_params.is_64_bit:
data_offset = data_offset + self.sys_params.pointer_size
data_vo = self.valobj.CreateChildAtOffset(
"data", data_offset, self.sys_params.types_cache.longlong)
statistics.metric_hit('code_notrun', self.valobj)
return '(long)' + \
str(ctypes.c_long(data_vo.GetValueAsUnsigned(0)).value)
elif data_type == 0B0101:
data_vo = self.valobj.CreateChildAtOffset(
"data", data_offset, self.sys_params.types_cache.longlong)
data_plain = int(
str(data_vo.GetValueAsUnsigned(0) & 0x00000000FFFFFFFF))
packed = struct.pack('I', data_plain)
data_float = struct.unpack('f', packed)[0]
statistics.metric_hit('code_notrun', self.valobj)
return '(float)' + str(data_float)
elif data_type == 0B0110:
data_vo = self.valobj.CreateChildAtOffset(
"data", data_offset, self.sys_params.types_cache.longlong)
data_plain = data_vo.GetValueAsUnsigned(0)
data_double = struct.unpack('d', struct.pack('Q', data_plain))[0]
statistics.metric_hit('code_notrun', self.valobj)
return '(double)' + str(data_double)
statistics.metric_hit(
'unknown_class', str(
valobj.GetName()) + " had unknown data_type " + str(data_type))
return 'unexpected: dt = ' + str(data_type)
class NSUnknownNumber_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def value(self):
logger = lldb.formatters.Logger.Logger()
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
expr = "(NSString*)[" + stream.GetData() + " stringValue]"
num_children_vo = self.valobj.CreateValueFromExpression("str", expr)
if num_children_vo.IsValid():
return num_children_vo.GetSummary()
return '<variable is not NSNumber>'
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSNumber' or name_string == '__NSCFNumber':
if class_data.is_tagged():
wrapper = NSTaggedNumber_SummaryProvider(
valobj, class_data.info_bits(), class_data.value(), class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
else:
# the wrapper might be unable to decipher what is into the NSNumber
# and then have to run code on it
wrapper = NSUntaggedNumber_SummaryProvider(
valobj, class_data.sys_params)
else:
wrapper = NSUnknownNumber_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
return wrapper
def NSNumber_SummaryProvider(valobj, dict):
logger = lldb.formatters.Logger.Logger()
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
return provider.message()
try:
summary = provider.value()
except Exception as foo:
print(foo)
# except:
summary = None
logger >> "got summary " + str(summary)
if summary is None:
summary = '<variable is not NSNumber>'
return str(summary)
return 'Summary Unavailable'
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F NSNumber.NSNumber_SummaryProvider NSNumber")
debugger.HandleCommand(
"type summary add -F NSNumber.NSNumber_SummaryProvider __NSCFBoolean")
debugger.HandleCommand(
"type summary add -F NSNumber.NSNumber_SummaryProvider __NSCFNumber")
|
|
# Copyright 2018 NTT Data.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_versionedobjects import fields
# Import fields from oslo.versionedobjects
StringField = fields.StringField
ListOfObjectsField = fields.ListOfObjectsField
ListOfStringsField = fields.ListOfStringsField
DictOfStringsField = fields.DictOfStringsField
DictOfNullableStringsField = fields.DictOfNullableStringsField
DateTimeField = fields.DateTimeField
BooleanField = fields.BooleanField
BaseEnumField = fields.BaseEnumField
Enum = fields.Enum
ObjectField = fields.ObjectField
IntegerField = fields.IntegerField
FieldType = fields.FieldType
class BaseTackerEnum(Enum):
def __init__(self):
super(BaseTackerEnum, self).__init__(valid_values=self.__class__.ALL)
class ContainerFormat(BaseTackerEnum):
AKI = 'AKI'
AMI = 'AMI'
ARI = 'ARI'
BARE = 'BARE'
DOCKER = 'DOCKER'
OVA = 'OVA'
OVF = 'OVF'
ALL = (AKI, AMI, ARI, BARE, DOCKER, OVA, OVF)
class ContainerFormatFields(BaseEnumField):
AUTO_TYPE = ContainerFormat()
class DiskFormat(BaseTackerEnum):
AKI = 'AKI'
AMI = 'AMI'
ARI = 'ARI'
ISO = 'ISO'
QCOW2 = 'QCOW2'
RAW = 'RAW'
VDI = 'VDI'
VHD = 'VHD'
VHDX = 'VHDX'
VMDK = 'VMDK'
ALL = (AKI, AMI, ARI, ISO, QCOW2, RAW, VDI, VHD, VHDX, VMDK)
class DiskFormatFields(BaseEnumField):
AUTO_TYPE = DiskFormat()
class PackageOnboardingStateType(BaseTackerEnum):
CREATED = 'CREATED'
UPLOADING = 'UPLOADING'
PROCESSING = 'PROCESSING'
ONBOARDED = 'ONBOARDED'
ALL = (CREATED, UPLOADING, PROCESSING, ONBOARDED)
class PackageOnboardingStateTypeField(BaseEnumField):
AUTO_TYPE = PackageOnboardingStateType()
class PackageOperationalStateType(BaseTackerEnum):
ENABLED = 'ENABLED'
DISABLED = 'DISABLED'
ALL = (ENABLED, DISABLED)
class PackageOperationalStateTypeField(BaseEnumField):
AUTO_TYPE = PackageOperationalStateType()
class PackageUsageStateType(BaseTackerEnum):
IN_USE = 'IN_USE'
NOT_IN_USE = 'NOT_IN_USE'
ALL = (IN_USE, NOT_IN_USE)
class PackageUsageStateTypeField(BaseEnumField):
AUTO_TYPE = PackageUsageStateType()
class DictOfNullableField(fields.AutoTypedField):
AUTO_TYPE = fields.Dict(fields.FieldType(), nullable=True)
class UUID(fields.UUID):
def coerce(self, obj, attr, value):
uuid.UUID(str(value))
return str(value)
class UUIDField(fields.AutoTypedField):
AUTO_TYPE = UUID()
class VnfInstanceState(BaseTackerEnum):
INSTANTIATED = 'INSTANTIATED'
NOT_INSTANTIATED = 'NOT_INSTANTIATED'
ALL = (INSTANTIATED, NOT_INSTANTIATED)
class VnfInstanceStateField(BaseEnumField):
AUTO_TYPE = VnfInstanceState()
class VnfInstanceTaskState(BaseTackerEnum):
INSTANTIATING = 'INSTANTIATING'
HEALING = 'HEALING'
TERMINATING = 'TERMINATING'
SCALING = 'SCALING'
ERROR = 'ERROR'
ALL = (INSTANTIATING, HEALING, TERMINATING, SCALING, ERROR)
class VnfInstanceTaskStateField(BaseEnumField):
AUTO_TYPE = VnfInstanceTaskState()
class VnfOperationalStateType(BaseTackerEnum):
STARTED = 'STARTED'
STOPPED = 'STOPPED'
ALL = (STARTED, STOPPED)
class VnfOperationalStateTypeField(BaseEnumField):
AUTO_TYPE = VnfOperationalStateType()
class IpAddressType(BaseTackerEnum):
IPV4 = 'IPV4'
IPV6 = 'IPV6'
ALL = (IPV4, IPV6)
class IpAddressTypeField(BaseEnumField):
AUTO_TYPE = IpAddressType()
class VnfInstanceTerminationType(BaseTackerEnum):
FORCEFUL = 'FORCEFUL'
GRACEFUL = 'GRACEFUL'
ALL = (FORCEFUL, GRACEFUL)
class VnfInstanceTerminationTypeField(BaseEnumField):
AUTO_TYPE = VnfInstanceTerminationType()
# SOL003 5.5.4.6 Enumeration: CancelModeType
class VnfInstanceCancelModeType(BaseTackerEnum):
FORCEFUL = 'FORCEFUL'
GRACEFUL = 'GRACEFUL'
ALL = (FORCEFUL, GRACEFUL)
class VnfInstanceCancelTypeField(BaseEnumField):
AUTO_TYPE = VnfInstanceCancelModeType()
class VnfcState(BaseTackerEnum):
STARTED = 'STARTED'
STOPPED = 'STOPPED'
ALL = (STARTED, STOPPED)
class InstanceOperationalState(BaseTackerEnum):
STARTING = 'STARTING'
PROCESSING = 'PROCESSING'
COMPLETED = 'COMPLETED'
FAILED_TEMP = 'FAILED_TEMP'
ROLLING_BACK = 'ROLLING_BACK'
ROLLED_BACK = 'ROLLED_BACK'
ALL = (STARTING, PROCESSING, COMPLETED, FAILED_TEMP,
ROLLING_BACK, ROLLED_BACK)
class InstanceOperationalStateField(BaseEnumField):
AUTO_TYPE = InstanceOperationalState()
class InstanceOperation(BaseTackerEnum):
INSTANTIATE = 'INSTANTIATE'
SCALE = 'SCALE'
TERMINATE = 'TERMINATE'
HEAL = 'HEAL'
MODIFY_INFO = 'MODIFY_INFO'
ALL = (INSTANTIATE, SCALE,
TERMINATE, HEAL, MODIFY_INFO)
class InstanceOperationField(BaseEnumField):
AUTO_TYPE = InstanceOperation()
class LcmOccsOperationState(BaseTackerEnum):
STARTING = 'STARTING'
PROCESSING = 'PROCESSING'
COMPLETED = 'COMPLETED'
FAILED_TEMP = 'FAILED_TEMP'
FAILED = 'FAILED'
ALL = (STARTING, PROCESSING, COMPLETED, FAILED_TEMP, FAILED)
class LcmOccsOperationType(BaseTackerEnum):
INSTANTIATE = 'INSTANTIATE'
TERMINATE = 'TERMINATE'
HEAL = 'HEAL'
SCALE = 'SCALE'
CHANGE_EXT_CONN = 'CHANGE_EXT_CONN'
ALL = (INSTANTIATE, TERMINATE, HEAL, SCALE, CHANGE_EXT_CONN)
class LcmOccsNotificationStatus(BaseTackerEnum):
START = 'START'
RESULT = 'RESULT'
ALL = (START, RESULT)
class ResourceChangeType(BaseTackerEnum):
ADDED = 'ADDED'
REMOVED = 'REMOVED'
MODIFIED = 'MODIFIED'
TEMPORARY = 'TEMPORARY'
ALL = (ADDED, REMOVED, MODIFIED, TEMPORARY)
class LcmOccsNotificationType(BaseTackerEnum):
VNF_OP_OCC_NOTIFICATION = 'VnfLcmOperationOccurrenceNotification'
VNF_ID_CREATION_NOTIFICATION = 'VnfIdentifierCreationNotification'
ALL = (VNF_OP_OCC_NOTIFICATION)
class VnfStatus(BaseTackerEnum):
ACTIVE = 'ACTIVE'
INACTIVE = 'INACTIVE'
ALL = (ACTIVE, INACTIVE)
class InstanceOperation(BaseTackerEnum):
MODIFY_INFO = 'MODIFY_INFO'
class ErrorPoint(BaseTackerEnum):
INITIAL = 0
NOTIFY_PROCESSING = 1
VNF_CONFIG_START = 2
PRE_VIM_CONTROL = 3
POST_VIM_CONTROL = 4
INTERNAL_PROCESSING = 5
VNF_CONFIG_END = 6
NOTIFY_COMPLETED = 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.