gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import os
from astropy import units as u
from astropy import wcs
from astropy.coordinates import EarthLocation
from astropy.coordinates import FK5
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.time import Time
from collections import namedtuple
from . import PanBase
from .utils import images as img_utils
OffsetError = namedtuple('OffsetError', ['delta_ra', 'delta_dec', 'magnitude'])
class Image(PanBase):
def __init__(self, fits_file, wcs_file=None, location=None):
"""Object to represent a single image from a PANOPTES camera.
Args:
fits_file (str): Name of FITS file to be read (can be .fz)
wcs_file (str, optional): Name of FITS file to use for WCS
"""
super().__init__()
assert os.path.exists(fits_file), self.logger.warning('File does not exist: {}'.format(fits_file))
if fits_file.endswith('.fz'):
fits_file = img_utils.fpack(fits_file, unpack=True)
assert fits_file.lower().endswith(('.fits')), self.logger.warning('File must end with .fits')
self.wcs = None
self._wcs_file = None
self.fits_file = fits_file
if wcs_file is not None:
self.wcs_file = wcs_file
else:
self.wcs_file = fits_file
with fits.open(self.fits_file, 'readonly') as hdu:
self.header = hdu[0].header
assert 'DATE-OBS' in self.header, self.logger.warning('FITS file must contain the DATE-OBS keyword')
assert 'EXPTIME' in self.header, self.logger.warning('FITS file must contain the EXPTIME keyword')
# Location Information
if location is None:
cfg_loc = self.config['location']
location = EarthLocation(lat=cfg_loc['latitude'],
lon=cfg_loc['longitude'],
height=cfg_loc['elevation'],
)
# Time Information
self.starttime = Time(self.header['DATE-OBS'], location=location)
self.exptime = float(self.header['EXPTIME']) * u.second
self.midtime = self.starttime + (self.exptime / 2.0)
self.sidereal = self.midtime.sidereal_time('apparent')
self.FK5_Jnow = FK5(equinox=self.midtime)
# Coordinates from header keywords
self.header_pointing = None
self.header_ra = None
self.header_dec = None
self.header_ha = None
# Coordinates from WCS
self.pointing = None
self.ra = None
self.dec = None
self.ha = None
self.get_header_pointing()
self.get_wcs_pointing()
self._luminance = None
self._pointing = None
self._pointing_error = None
@property
def wcs_file(self):
"""WCS file name
When setting the WCS file name, the WCS information will be read,
setting the `wcs` property.
"""
return self._wcs_file
@wcs_file.setter
def wcs_file(self, filename):
if filename is not None:
try:
w = wcs.WCS(filename)
assert w.is_celestial
self.wcs = w
self._wcs_file = filename
except Exception:
self.logger.debug("Can't get WCS from FITS file (try solve_field)")
@property
def pointing_error(self):
"""Pointing error namedtuple (delta_ra, delta_dec, magnitude)
Returns pointing error information. The first time this is accessed
this will solve the field if not previously solved.
Returns:
namedtuple: Pointing error information
"""
if self._pointing_error is None:
assert self.pointing is not None, self.logger.warn("No WCS, can't get pointing_error")
assert self.header_pointing is not None
if self.wcs is None:
self.solve_field()
mag = self.pointing.separation(self.header_pointing)
d_dec = self.pointing.dec - self.header_pointing.dec
d_ra = self.pointing.ra - self.header_pointing.ra
self._pointing_error = OffsetError(d_ra.to(u.arcsec), d_dec.to(u.arcsec), mag.to(u.arcsec))
return self._pointing_error
def get_header_pointing(self):
"""Get the pointing information from the header
The header should contain the `RA-MNT` and `DEC-MNT` keywords, from which
the header pointing coordinates are built.
"""
try:
self.header_pointing = SkyCoord(ra=float(self.header['RA-MNT']) * u.degree,
dec=float(self.header['DEC-MNT']) * u.degree)
self.header_ra = self.header_pointing.ra.to(u.hourangle)
self.header_dec = self.header_pointing.dec.to(u.degree)
# Precess to the current equinox otherwise the RA - LST method will be off.
self.header_ha = self.header_pointing.transform_to(self.FK5_Jnow).ra.to(u.hourangle) - self.sidereal
except Exception as e:
self.logger.warning('Cannot get header pointing information: {}'.format(e))
def get_wcs_pointing(self):
"""Get the pointing information from the WCS
Builds the pointing coordinates from the plate-solved WCS. These will be
compared with the coordinates stored in the header.
"""
if self.wcs is not None:
ra = self.wcs.celestial.wcs.crval[0]
dec = self.wcs.celestial.wcs.crval[1]
self.pointing = SkyCoord(ra=ra * u.degree, dec=dec * u.degree)
self.ra = self.pointing.ra.to(u.hourangle)
self.dec = self.pointing.dec.to(u.degree)
# Precess to the current equinox otherwise the RA - LST method will be off.
self.ha = self.pointing.transform_to(self.FK5_Jnow).ra.to(u.hourangle) - self.sidereal
def solve_field(self, **kwargs):
""" Solve field and populate WCS information
Args:
**kwargs (dict): Options to be passed to `get_solve_field`
"""
solve_info = img_utils.get_solve_field(self.fits_file,
ra=self.header_pointing.ra.value,
dec=self.header_pointing.dec.value,
**kwargs)
self.wcs_file = solve_info['solved_fits_file']
self.get_wcs_pointing()
# Remove some fields
for header in ['COMMENT', 'HISTORY']:
try:
del solve_info[header]
except KeyError:
pass
return solve_info
def compute_offset(self, ref_image):
assert isinstance(ref_image, Image), self.logger.warning("Must pass an Image class for reference")
mag = self.pointing.separation(ref_image.pointing)
d_dec = self.pointing.dec - ref_image.pointing.dec
d_ra = self.pointing.ra - ref_image.pointing.ra
return OffsetError(d_ra.to(u.arcsec), d_dec.to(u.arcsec), mag.to(u.arcsec))
##################################################################################################
# Private Methods
##################################################################################################
def __str__(self):
return "{}: {}".format(self.fits_file, self.header_pointing)
|
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import uuid
import mock
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import timeutils
from nova.compute import flavors
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import objects
from nova.objects import base as obj_base
from nova import rpc
from nova import test
from nova.tests.unit.compute.monitors import test_monitors
from nova.tests.unit.objects import test_migration
from nova.tests.unit.pci import fakes as pci_fakes
from nova.virt import driver
from nova.virt import hardware
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[]),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[])])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = hardware.VirtNUMALimitTopology(
cells=[hardware.VirtNUMATopologyCellLimit(
0, set([1, 2]), 3072, 4, 10240),
hardware.VirtNUMATopologyCellLimit(
1, set([3, 4]), 3072, 4, 10240)])
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.1',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1'
},
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.2',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1'
},
{
'label': 'label_8086_0443',
'dev_type': 'type-PF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1'
},
{
'label': 'label_8086_0123',
'dev_type': 'type-PCI',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0123',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1'
},
{
'label': 'label_8086_7891',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '7891',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1'
},
] if self.pci_support else []
self.pci_stats = [
{
'count': 2,
'vendor_id': '8086',
'product_id': '0443'
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '7891'
},
] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology._to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(pci_passthrough_whitelist=[
'{"vendor_id": "8086", "product_id": "0443"}',
'{"vendor_id": "8086", "product_id": "7891"}'])
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._numa_topologies = {}
self._instance_types = {}
self.stubs.Set(self.conductor.db,
'instance_get_all_by_host_and_node',
self._fake_instance_get_all_by_host_and_node)
self.stubs.Set(db, 'instance_extra_get_by_instance_uuid',
self._fake_instance_extra_get_by_instance_uuid)
self.stubs.Set(self.conductor.db,
'instance_update_and_get_original',
self._fake_instance_update_and_get_original)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
def _create_compute_node(self, values=None):
compute = {
"id": 1,
"service_id": 1,
"host": "fakehost",
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": {
"num_instances": "1",
},
"hypervisor_hostname": "fakenode",
}
if values:
compute.update(values)
return compute
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
}
return service
def _fake_instance_system_metadata(self, instance_type, prefix=''):
sys_meta = []
for key in flavors.system_metadata_flavor_props.keys():
sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key),
'value': instance_type[key]})
return sys_meta
def _fake_instance(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
sys_meta = self._fake_instance_system_metadata(flavor)
if stash:
# stash instance types in system metadata.
sys_meta = (sys_meta +
self._fake_instance_system_metadata(flavor, 'new_') +
self._fake_instance_system_metadata(flavor, 'old_'))
instance_uuid = str(uuid.uuid1())
instance = {
'uuid': instance_uuid,
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': sys_meta,
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
}
extra = {
'id': 1, 'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': None,
'instance_uuid': instance['uuid'],
'numa_topology': None,
'pci_requests': None,
}
numa_topology = kwargs.pop('numa_topology', None)
if numa_topology:
extra['numa_topology'] = numa_topology._to_json()
instance.update(kwargs)
instance['extra'] = extra
self._instances[instance_uuid] = instance
self._numa_topologies[instance_uuid] = extra
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_all_by_host_and_node(self, context, host, nodename,
columns_to_join=None):
return [i for i in self._instances.values() if i['host'] == host]
def _fake_instance_extra_get_by_instance_uuid(self, context,
instance_uuid, columns=None):
return self._numa_topologies.get(instance_uuid)
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_instance_update_and_get_original(self, context, instance_uuid,
values, columns_to_join=None):
instance = self._instances[instance_uuid]
instance.update(values)
# the test doesn't care what the original instance values are, it's
# only used in the subsequent notification:
return (instance, instance)
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_context_claim(self):
# instance context manager variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = context.get_admin_context()
self.tracker = self._tracker()
def test_missing_service(self):
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node()
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def test_create_compute_node(self):
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.updated = False
self.deleted = False
self.update_call_count = 0
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.compute = self._create_compute_node()
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology.to_json() if numa_topology else None
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, objects.NUMATopology.obj_from_db_obj(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
jsonutils.loads(self.tracker.compute_node['pci_stats']))
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def test_create_resource(self):
self.tracker._write_ext_resources = mock.Mock()
self.tracker.conductor_api.compute_node_create = mock.Mock(
return_value=dict(id=1))
values = {'stats': {}, 'foo': 'bar', 'baz_count': 0}
self.tracker._create(self.context, values)
expected = {'stats': '{}', 'foo': 'bar', 'baz_count': 0,
'id': 1}
self.tracker.scheduler_client.update_resource_stats.\
assert_called_once_with(self.context,
("fakehost", "fakenode"),
expected)
def test_update_resource(self):
self.tracker._write_ext_resources = mock.Mock()
values = {'stats': {}, 'foo': 'bar', 'baz_count': 0}
self.tracker._update(self.context, values)
expected = {'stats': '{}', 'foo': 'bar', 'baz_count': 0,
'id': 1}
self.tracker.scheduler_client.update_resource_stats.\
assert_called_once_with(self.context,
("fakehost", "fakenode"),
expected)
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
jsonutils.loads(self.tracker.compute_node['pci_stats']))
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def setUp(self):
super(TrackerExtraResourcesTestCase, self).setUp()
self.driver = self._driver()
def _driver(self):
return FakeVirtDriver()
def test_set_empty_ext_resources(self):
resources = self.driver.get_available_resource(self.tracker.nodename)
self.assertNotIn('stats', resources)
self.tracker._write_ext_resources(resources)
self.assertIn('stats', resources)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self.driver.get_available_resource(self.tracker.nodename)
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": 12}
self.assertEqual(sorted(expected),
sorted(resources['stats']))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=mem),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return objects.NUMATopology(
cells=[objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[]),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[])])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_usage_only_for_tracked(self, mock_get):
flavor = self._fake_flavor_create()
claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
claim_topology = self._claim_topology(claim_mem / 2)
instance_topology = self._instance_topology(claim_mem / 2)
instance = self._fake_instance(
flavor=flavor, task_state=None,
numa_topology=instance_topology)
self.tracker.update_usage(self.context, instance)
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'current_workload')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(0, claim.memory_mb)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
self._assert(1, 'current_workload')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_audit(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
claim_topology = self._claim_topology(claim_mem_total / 2)
instance_topology = self._instance_topology(claim_mem_total / 2)
instance = self._fake_instance(memory_mb=claim_mem, root_gb=claim_disk,
ephemeral_gb=0, numa_topology=instance_topology)
self.tracker.instance_claim(self.context, instance, self.limits)
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["memory_mb"])
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
claim_topology, objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["local_gb"])
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
# 1st pretend that the compute operation finished and claimed the
# desired resources from the virt layer
driver = self.tracker.driver
driver.memory_mb_used = claim_mem
driver.local_gb_used = claim_disk
self.tracker.update_available_resource(self.context)
# confirm tracker is adding in host_ip
self.assertIsNotNone(self.compute.get('host_ip'))
# confirm that resource usage is derived from instance usages,
# not virt layer:
self.assertEqual(claim_mem_total, self.compute['memory_mb_used'])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute['free_ram_mb'])
self.assertEqualNUMAHostTopology(
claim_topology, objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(claim_disk, self.compute['local_gb_used'])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute['free_disk_gb'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_abort(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
claim_topology = self._claim_topology(claim_mem_total / 2)
instance_topology = self._instance_topology(claim_mem_total / 2)
instance = self._fake_instance(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0,
numa_topology=instance_topology)
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertIsNotNone(claim)
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
claim_topology, objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
claim.abort()
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(3)
instance_topology = self._instance_topology(3)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD.to_json()}
instance = self._fake_instance(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node['local_gb_used'])
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node['vcpus_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_context_claim_with_exception(self, mock_get):
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_context_claim(self, mock_get):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
claim_topology = self._claim_topology(1)
instance_topology = self._instance_topology(1)
instance = self._fake_instance(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance(task_state=task_states.SCHEDULING)
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node['current_workload'])
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_cpu_stats(self, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
vcpus = 1
instance = self._fake_instance(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
class ResizeClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
def _fake_migration_create(mig_self):
self._migrations[mig_self.instance_uuid] = mig_self
mig_self.obj_reset_changes()
self.stubs.Set(objects.Migration, 'create',
_fake_migration_create)
self.instance = self._fake_instance()
self.instance_type = self._fake_flavor_create()
def _fake_migration_create(self, values=None):
instance_uuid = str(uuid.uuid1())
mig_dict = test_migration.fake_db_migration()
mig_dict.update({
'id': 1,
'source_compute': 'host1',
'source_node': 'fakenode',
'dest_compute': 'host2',
'dest_node': 'fakenode',
'dest_host': '127.0.0.1',
'old_instance_type_id': 1,
'new_instance_type_id': 2,
'instance_uuid': instance_uuid,
'status': 'pre-migrating',
'updated_at': timeutils.utcnow()
})
if values:
mig_dict.update(values)
migration = objects.Migration(context='fake')
migration.update(mig_dict)
# This hits the stub in setUp()
migration.create()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, limits)
instance2 = self._fake_instance()
self.tracker.resize_claim(self.context, instance2, self.instance_type,
limits)
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_audit(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_same_host(self, mock_get):
self.limits['vcpu'] = 3
src_dict = {
'memory_mb': 1, 'root_gb': 1, 'ephemeral_gb': 0, 'vcpus': 1}
dest_dict = dict((k, v + 1) for (k, v) in src_dict.iteritems())
src_type = self._fake_flavor_create(
id=10, name="srcflavor", **src_dict)
dest_type = self._fake_flavor_create(
id=11, name="destflavor", **dest_dict)
# make an instance of src_type:
instance = self._fake_instance(flavor=src_type)
instance['system_metadata'] = self._fake_instance_system_metadata(
dest_type)
self.tracker.instance_claim(self.context, instance, self.limits)
# resize to dest_type:
claim = self.tracker.resize_claim(self.context, instance,
dest_type, self.limits)
self._assert(src_dict['memory_mb'] + dest_dict['memory_mb']
+ 2 * FAKE_VIRT_MEMORY_OVERHEAD, 'memory_mb_used')
self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb']
+ dest_dict['root_gb'] + dest_dict['ephemeral_gb'],
'local_gb_used')
self._assert(src_dict['vcpus'] + dest_dict['vcpus'], 'vcpus_used')
self.tracker.update_available_resource(self.context)
claim.abort()
# only the original instance should remain, not the migration:
self._assert(src_dict['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
'memory_mb_used')
self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb'],
'local_gb_used')
self._assert(src_dict['vcpus'], 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, {}, self.limits)
self.tracker.drop_resize_claim(self.context, self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert_reserve_source(self, mock_get):
# if a revert has started at the API and audit runs on
# the source compute before the instance flips back to source,
# resources should still be held at the source based on the
# migration:
dest = "desthost"
dest_tracker = self._tracker(host=dest)
dest_tracker.update_available_resource(self.context)
self.instance = self._fake_instance(memory_mb=FAKE_VIRT_MEMORY_MB,
root_gb=FAKE_VIRT_LOCAL_GB, ephemeral_gb=0,
vcpus=FAKE_VIRT_VCPUS, instance_type_id=1)
values = {'source_compute': self.host, 'dest_compute': dest,
'old_instance_type_id': 1, 'new_instance_type_id': 1,
'status': 'post-migrating',
'instance_uuid': self.instance['uuid']}
self._fake_migration_create(values)
# attach an instance to the destination host tracker:
dest_tracker.instance_claim(self.context, self.instance)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
'memory_mb_used', tracker=dest_tracker)
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
tracker=dest_tracker)
# audit and recheck to confirm migration doesn't get double counted
# on dest:
dest_tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
'memory_mb_used', tracker=dest_tracker)
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
tracker=dest_tracker)
# apply the migration to the source host tracker:
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
# flag the instance and migration as reverting and re-audit:
self.instance['vm_state'] = vm_states.RESIZED
self.instance['task_state'] = task_states.RESIZE_REVERTING
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_MB + 1, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
def test_resize_filter(self):
instance = self._fake_instance(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
for task_state in states:
instance = self._fake_instance(vm_state=vm_state,
task_state=task_state)
result = self.tracker._instance_in_resize_state(instance)
self.assertTrue(result)
def test_dupe_filter(self):
instance = self._fake_instance(host=self.host)
values = {'source_compute': self.host, 'dest_compute': self.host,
'instance_uuid': instance['uuid'], 'new_instance_type_id': 2}
self._fake_flavor_create(id=2)
self._fake_migration_create(values)
self._fake_migration_create(values)
self.tracker.update_available_resource(self.context)
self.assertEqual(1, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_set_instance_host_and_node(self, mock_get):
instance = self._fake_instance()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class NoInstanceTypesInSysMetadata(ResizeClaimTestCase):
"""Make sure we handle the case where the following are true:
#) Compute node C gets upgraded to code that looks for instance types in
system metadata. AND
#) C already has instances in the process of migrating that do not have
stashed instance types.
bug 1164110
"""
def setUp(self):
super(NoInstanceTypesInSysMetadata, self).setUp()
self.instance = self._fake_instance(stash=False)
def test_get_instance_type_stash_false(self):
with (mock.patch.object(objects.Flavor, 'get_by_id',
return_value=self.instance_type)):
flavor = self.tracker._get_instance_type(self.context,
self.instance, "new_")
self.assertEqual(self.instance_type, flavor)
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
fake_monitors = [
'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass1',
'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass2']
self.flags(compute_available_monitors=fake_monitors)
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.flags(compute_monitors=['FakeMontorClass1', 'FakeMonitorClass4'])
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
def test_get_host_metrics_one_failed(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass4'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class4 = test_monitors.FakeMonitorClass4(self.tracker)
self.tracker.monitors = [class1, class4]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertTrue(len(metrics) > 0)
@mock.patch.object(resource_tracker.LOG, 'warning')
def test_get_host_metrics_exception(self, mock_LOG_warning):
self.flags(compute_monitors=['FakeMontorClass1'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
self.tracker.monitors = [class1]
with mock.patch.object(class1, 'get_metrics',
side_effect=test.TestingException()):
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_LOG_warning.assert_called_once_with(
u'Cannot get the metrics from %s.', class1)
self.assertEqual(0, len(metrics))
def test_get_host_metrics(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class2 = test_monitors.FakeMonitorClass2(self.tracker)
self.tracker.monitors = [class1, class2]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [{
'timestamp': 1232,
'name': 'key1',
'value': 2600,
'source': 'libvirt'
}, {
'name': 'key2',
'source': 'libvirt',
'timestamp': 123,
'value': 1600
}]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a json string.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
# and add rt stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsInvalidJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats='this is not json')
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for string that does not parse as json
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
|
|
import sys
import pickle
import os
import numpy as np
import gensim
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from keras.models import Model, load_model
from keras.utils import np_utils
from keras.models import Model
from keras.layers import Conv2D, AveragePooling2D, MaxPooling2D, Activation, Flatten, Input, Dense, Dropout
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import text_to_word_sequence
from sklearn.preprocessing import LabelEncoder
from keras.layers import Reshape
import re
from keras.preprocessing.text import Tokenizer
import warnings
from operator import itemgetter
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from tqdm import tqdm
import math
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from tqdm import tqdm
import math
from sklearn.model_selection import train_test_split
##
LOWER_CASE = False
DATA_DIR = 'data/'
Goog_w2v = DATA_DIR+"embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin"
dropout_prob=0.5
N_EPOCHS = 200
PREFIX="cnn_"
VALIDATION_SPLIT=0.001
##
def process_text(text):
text = re.sub(r"\'s", " is ", text)
text = re.sub(r"\'s", " is ", text)
text = re.sub(r"\'ve", " have ", text)
return text
##
print(os.listdir(DATA_DIR))
train_df = pd.read_csv(DATA_DIR+"train.csv")
test_df = pd.read_csv(DATA_DIR+"test.csv")
model_word_embed = gensim.models.KeyedVectors.load_word2vec_format(Goog_w2v,binary=True)
SEQ_LEN_TR = len(max(train_df['question_text'], key=len).split())
SEQ_LEN_TS = len(max(test_df['question_text'], key=len).split())
SEQ_LEN = max(SEQ_LEN_TR,SEQ_LEN_TS)
print("SEQ_LEN:",SEQ_LEN)
assert SEQ_LEN == 45
##
train_cat_list, train_text_list, train_questions = [], [], []
test_text_list, test_questions = [], []
for i in range(len(train_df)):
quest = train_df.loc[i,'question_text']
train_questions.append(quest)
train_cat_list.append(train_df.loc[i,'target'])
train_text_list.append(text_to_word_sequence(process_text(quest),lower=LOWER_CASE))
for i in range(len(test_df)):
quest = test_df.loc[i,'question_text']
test_questions.append(quest)
test_text_list.append(text_to_word_sequence(process_text(quest),lower=LOWER_CASE))
assert len(train_cat_list) == len(train_text_list)
assert len(train_cat_list) == len(train_questions)
assert len(test_questions) == len(test_text_list)
print(">> train_size:",len(train_cat_list))
print(">> train sample:",train_cat_list[44] , train_text_list[44], train_questions[44])
print(">> test_size:",len(test_questions))
print(">> test sample:", test_text_list[44] , test_questions[44])
##
tokenizer = Tokenizer(num_words=None,char_level=False,lower=False)
tokenizer.fit_on_texts(train_text_list + test_text_list)
sequences_train = tokenizer.texts_to_sequences(train_text_list) # ... train , test ..
sequences_test = tokenizer.texts_to_sequences(test_text_list) # ... train , test ..
data_train = pad_sequences(sequences_train, maxlen=SEQ_LEN,padding='post')
data_test = pad_sequences(sequences_test, maxlen=SEQ_LEN,padding='post')
labels = np.array(train_cat_list)
nb_words = len(tokenizer.word_index)+1
print(">> Number of words:",nb_words)
print(">> data_train:",data_train.shape)
print(">> train sample:",sequences_train[44] , data_train[44] , train_text_list[44] , train_questions[44])
print(">> data_test:",data_test.shape)
print(">> test sample:",sequences_test[44] , data_test[44] , test_text_list[44] , test_questions[44])
########################################
## sample train/validation data
########################################
np.random.seed(17)
perm = np.random.permutation(len(data_train))
idx_train = perm[:int(len(data_train)*(1-VALIDATION_SPLIT))]
idx_val = perm[int(len(data_train)*(1-VALIDATION_SPLIT)):]
data_tr = data_train[idx_train]
data_val = data_train[idx_val]
labels_tr = labels[idx_train]
labels_val = labels[idx_val]
del data_train
##
embedding_matrix = np.zeros((nb_words, 300))
print('>>>>>>>>>>> OUT LOG:',file=open('out.log','w'))
for word, i in tokenizer.word_index.items():
if word in model_word_embed.vocab:
#print('IN:',word)
embedding_matrix[i] = model_word_embed.word_vec(word)
else:
print('>>> OUT <<<:',word,file=open('out.log','a'))
print('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))
## Null word embeddings: 116,788 (LOWER_CASE = False)
## Null word embeddings: 141,480 (LOWER_CASE = True) e.g. autria, gennifer
EMBEDDING_DIM = len(embedding_matrix[1])
print("EMBEDDING_DIM:",EMBEDDING_DIM)
#### Model
embedding_layer = Embedding(embedding_matrix.shape[0],EMBEDDING_DIM,weights=[embedding_matrix],input_length=SEQ_LEN,trainable=False)
sequence_input = Input(shape=(SEQ_LEN,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
embedded_sequences_rh = Reshape((SEQ_LEN,EMBEDDING_DIM,1))(embedded_sequences)
### -------------------------- MAIN CAT
# 2-gram
conv_1 = Conv2D(500, (2, EMBEDDING_DIM), activation="relu") (embedded_sequences_rh)
max_pool_1 = MaxPooling2D(pool_size=(SEQ_LEN-2, 1 ))(conv_1) # 30
# 3-gram
conv_2 = Conv2D(500, (3, EMBEDDING_DIM), activation="relu") (embedded_sequences_rh)
max_pool_2 = MaxPooling2D(pool_size=(SEQ_LEN-3, 1 ))(conv_2) # 29
# 4-gram
conv_3 = Conv2D(500, (4, EMBEDDING_DIM), activation="relu") (embedded_sequences_rh)
max_pool_3 = MaxPooling2D(pool_size=(SEQ_LEN-4, 1 ))(conv_3) # 28
# 5-gram
conv_4 = Conv2D(500, (5, EMBEDDING_DIM), activation="relu") (embedded_sequences_rh)
max_pool_4 = MaxPooling2D(pool_size=(SEQ_LEN-5, 1))(conv_4) # 27
# concat
merged = concatenate([max_pool_1, max_pool_2, max_pool_3,max_pool_4])
#merged = Reshape((1,-1))(merged)
#flatten = Attention_CNN(1)(merged)
flatten = Flatten()(merged)
# full-connect -- MAIN
full_conn = Dense(128, activation= 'tanh')(flatten)
dropout_1 = Dropout(dropout_prob)(full_conn)
full_conn_2 = Dense(64, activation= 'tanh')(dropout_1)
dropout_2 = Dropout(dropout_prob)(full_conn_2)
output = Dense(1, activation= 'sigmoid')(dropout_2)
model = Model(sequence_input,output)
########
model.compile(optimizer= 'adam', loss='binary_crossentropy', metrics= ['accuracy'])
model.summary()
earlystopper = EarlyStopping(patience=20, verbose=1,monitor='val_acc',mode='max')
checkpointer = ModelCheckpoint(PREFIX+'model.h5', verbose=1, save_best_only=True,monitor='val_acc',mode='max')
reduce_lr = ReduceLROnPlateau(factor=0.2, patience=5, min_lr=0.00001, verbose=1,monitor='val_acc',mode='max')
results = model.fit(data_tr,labels_tr,
validation_data=[data_val,labels_val],
batch_size=66, epochs=N_EPOCHS,
callbacks=[earlystopper, checkpointer,reduce_lr])
#learning_curve_df = plot_learn_curve(results,do_plot=False)
#learning_curve_df.to_csv(PREFIX+'learning_curve.csv')
print(">> TEST ...")
model = load_model(PREFIX+'model.h5')
print("> Sub category:")
th_best=0.35
f1_best=0
for th in np.linspace(0.1,0.9,20).tolist():
pred_val = model.predict(data_val)
pred_val = (np.array(pred_val) > th).astype(np.int)
f1 = f1_score(labels_val,pred_val)
if f1 > f1_best:
f1_best = f1
th_best = th
print("f1_best:",f1_best," --- th_best:",th_best)
###
pred = model.predict(data_test)
pred = (np.array(pred) > th_best).astype(np.int)
submit_df = pd.DataFrame({"qid": test_df["qid"], "prediction": np.squeeze(pred)})
submit_df.to_csv("submission.csv", index=False)
|
|
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from mock import Mock, MagicMock, ANY
from cassandra import ConsistencyLevel, Unavailable, SchemaTargetType, SchemaChangeType
from cassandra.cluster import Session, ResponseFuture, NoHostAvailable
from cassandra.connection import Connection, ConnectionException
from cassandra.protocol import (ReadTimeoutErrorMessage, WriteTimeoutErrorMessage,
UnavailableErrorMessage, ResultMessage, QueryMessage,
OverloadedErrorMessage, IsBootstrappingErrorMessage,
PreparedQueryNotFound, PrepareMessage,
RESULT_KIND_ROWS, RESULT_KIND_SET_KEYSPACE,
RESULT_KIND_SCHEMA_CHANGE, ProtocolHandler)
from cassandra.policies import RetryPolicy
from cassandra.pool import NoConnectionsAvailable
from cassandra.query import SimpleStatement
class ResponseFutureTests(unittest.TestCase):
def make_basic_session(self):
return Mock(spec=Session, row_factory=lambda *x: list(x))
def make_session(self):
session = self.make_basic_session()
session._load_balancer.make_query_plan.return_value = ['ip1', 'ip2']
session._pools.get.return_value.is_shutdown = False
return session
def make_response_future(self, session):
query = SimpleStatement("SELECT * FROM foo")
message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)
return ResponseFuture(session, message, query, 1)
def make_mock_response(self, results):
return Mock(spec=ResultMessage, kind=RESULT_KIND_ROWS, results=results, paging_state=None)
def test_result_message(self):
session = self.make_basic_session()
session._load_balancer.make_query_plan.return_value = ['ip1', 'ip2']
pool = session._pools.get.return_value
pool.is_shutdown = False
connection = Mock(spec=Connection)
pool.borrow_connection.return_value = (connection, 1)
rf = self.make_response_future(session)
rf.send_request()
rf.session._pools.get.assert_called_once_with('ip1')
pool.borrow_connection.assert_called_once_with(timeout=ANY)
connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message)
rf._set_result(self.make_mock_response([{'col': 'val'}]))
result = rf.result()
self.assertEqual(result, [{'col': 'val'}])
def test_unknown_result_class(self):
session = self.make_session()
pool = session._pools.get.return_value
connection = Mock(spec=Connection)
pool.borrow_connection.return_value = (connection, 1)
rf = self.make_response_future(session)
rf.send_request()
rf._set_result(object())
self.assertRaises(ConnectionException, rf.result)
def test_set_keyspace_result(self):
session = self.make_session()
rf = self.make_response_future(session)
rf.send_request()
result = Mock(spec=ResultMessage,
kind=RESULT_KIND_SET_KEYSPACE,
results="keyspace1")
rf._set_result(result)
rf._set_keyspace_completed({})
self.assertFalse(rf.result())
def test_schema_change_result(self):
session = self.make_session()
rf = self.make_response_future(session)
rf.send_request()
event_results={'target_type': SchemaTargetType.TABLE, 'change_type': SchemaChangeType.CREATED,
'keyspace': "keyspace1", "table": "table1"}
result = Mock(spec=ResultMessage,
kind=RESULT_KIND_SCHEMA_CHANGE,
results=event_results)
rf._set_result(result)
session.submit.assert_called_once_with(ANY, ANY, rf, **event_results)
def test_other_result_message_kind(self):
session = self.make_session()
rf = self.make_response_future(session)
rf.send_request()
result = [1, 2, 3]
rf._set_result(Mock(spec=ResultMessage, kind=999, results=result))
self.assertListEqual(list(rf.result()), result)
def test_read_timeout_error_message(self):
session = self.make_session()
query = SimpleStatement("SELECT * FROM foo")
query.retry_policy = Mock()
query.retry_policy.on_read_timeout.return_value = (RetryPolicy.RETHROW, None)
message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)
rf = ResponseFuture(session, message, query, 1)
rf.send_request()
result = Mock(spec=ReadTimeoutErrorMessage, info={})
rf._set_result(result)
self.assertRaises(Exception, rf.result)
def test_write_timeout_error_message(self):
session = self.make_session()
query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)")
query.retry_policy = Mock()
query.retry_policy.on_write_timeout.return_value = (RetryPolicy.RETHROW, None)
message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)
rf = ResponseFuture(session, message, query, 1)
rf.send_request()
result = Mock(spec=WriteTimeoutErrorMessage, info={})
rf._set_result(result)
self.assertRaises(Exception, rf.result)
def test_unavailable_error_message(self):
session = self.make_session()
query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)")
query.retry_policy = Mock()
query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None)
message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)
rf = ResponseFuture(session, message, query, 1)
rf.send_request()
result = Mock(spec=UnavailableErrorMessage, info={})
rf._set_result(result)
self.assertRaises(Exception, rf.result)
def test_retry_policy_says_ignore(self):
session = self.make_session()
query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)")
query.retry_policy = Mock()
query.retry_policy.on_unavailable.return_value = (RetryPolicy.IGNORE, None)
message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)
rf = ResponseFuture(session, message, query, 1)
rf.send_request()
result = Mock(spec=UnavailableErrorMessage, info={})
rf._set_result(result)
self.assertFalse(rf.result())
def test_retry_policy_says_retry(self):
session = self.make_session()
pool = session._pools.get.return_value
query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)")
query.retry_policy = Mock()
query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETRY, ConsistencyLevel.ONE)
message = QueryMessage(query=query, consistency_level=ConsistencyLevel.QUORUM)
connection = Mock(spec=Connection)
pool.borrow_connection.return_value = (connection, 1)
rf = ResponseFuture(session, message, query, 1)
rf.send_request()
rf.session._pools.get.assert_called_once_with('ip1')
pool.borrow_connection.assert_called_once_with(timeout=ANY)
connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message)
result = Mock(spec=UnavailableErrorMessage, info={})
rf._set_result(result)
session.submit.assert_called_once_with(rf._retry_task, True)
self.assertEqual(1, rf._query_retries)
connection = Mock(spec=Connection)
pool.borrow_connection.return_value = (connection, 2)
# simulate the executor running this
rf._retry_task(True)
# it should try again with the same host since this was
# an UnavailableException
rf.session._pools.get.assert_called_with('ip1')
pool.borrow_connection.assert_called_with(timeout=ANY)
connection.send_msg.assert_called_with(rf.message, 2, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message)
def test_retry_with_different_host(self):
session = self.make_session()
pool = session._pools.get.return_value
connection = Mock(spec=Connection)
pool.borrow_connection.return_value = (connection, 1)
rf = self.make_response_future(session)
rf.message.consistency_level = ConsistencyLevel.QUORUM
rf.send_request()
rf.session._pools.get.assert_called_once_with('ip1')
pool.borrow_connection.assert_called_once_with(timeout=ANY)
connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message)
self.assertEqual(ConsistencyLevel.QUORUM, rf.message.consistency_level)
result = Mock(spec=OverloadedErrorMessage, info={})
rf._set_result(result)
session.submit.assert_called_once_with(rf._retry_task, False)
# query_retries does not get incremented for Overloaded/Bootstrapping errors
self.assertEqual(0, rf._query_retries)
connection = Mock(spec=Connection)
pool.borrow_connection.return_value = (connection, 2)
# simulate the executor running this
rf._retry_task(False)
# it should try with a different host
rf.session._pools.get.assert_called_with('ip2')
pool.borrow_connection.assert_called_with(timeout=ANY)
connection.send_msg.assert_called_with(rf.message, 2, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message)
# the consistency level should be the same
self.assertEqual(ConsistencyLevel.QUORUM, rf.message.consistency_level)
def test_all_retries_fail(self):
session = self.make_session()
pool = session._pools.get.return_value
connection = Mock(spec=Connection)
pool.borrow_connection.return_value = (connection, 1)
rf = self.make_response_future(session)
rf.send_request()
rf.session._pools.get.assert_called_once_with('ip1')
result = Mock(spec=IsBootstrappingErrorMessage, info={})
rf._set_result(result)
# simulate the executor running this
session.submit.assert_called_once_with(rf._retry_task, False)
rf._retry_task(False)
# it should try with a different host
rf.session._pools.get.assert_called_with('ip2')
result = Mock(spec=IsBootstrappingErrorMessage, info={})
rf._set_result(result)
# simulate the executor running this
session.submit.assert_called_with(rf._retry_task, False)
rf._retry_task(False)
self.assertRaises(NoHostAvailable, rf.result)
def test_all_pools_shutdown(self):
session = self.make_basic_session()
session._load_balancer.make_query_plan.return_value = ['ip1', 'ip2']
session._pools.get.return_value.is_shutdown = True
rf = ResponseFuture(session, Mock(), Mock(), 1)
rf.send_request()
self.assertRaises(NoHostAvailable, rf.result)
def test_first_pool_shutdown(self):
session = self.make_basic_session()
session._load_balancer.make_query_plan.return_value = ['ip1', 'ip2']
# first return a pool with is_shutdown=True, then is_shutdown=False
session._pools.get.side_effect = [Mock(is_shutdown=True), Mock(is_shutdown=False)]
rf = self.make_response_future(session)
rf.send_request()
rf._set_result(self.make_mock_response([{'col': 'val'}]))
result = rf.result()
self.assertEqual(result, [{'col': 'val'}])
def test_timeout_getting_connection_from_pool(self):
session = self.make_basic_session()
session._load_balancer.make_query_plan.return_value = ['ip1', 'ip2']
# the first pool will raise an exception on borrow_connection()
exc = NoConnectionsAvailable()
first_pool = Mock(is_shutdown=False)
first_pool.borrow_connection.side_effect = exc
# the second pool will return a connection
second_pool = Mock(is_shutdown=False)
connection = Mock(spec=Connection)
second_pool.borrow_connection.return_value = (connection, 1)
session._pools.get.side_effect = [first_pool, second_pool]
rf = self.make_response_future(session)
rf.send_request()
rf._set_result(self.make_mock_response([{'col': 'val'}]))
self.assertEqual(rf.result(), [{'col': 'val'}])
# make sure the exception is recorded correctly
self.assertEqual(rf._errors, {'ip1': exc})
def test_callback(self):
session = self.make_session()
rf = self.make_response_future(session)
rf.send_request()
callback = Mock()
expected_result = [{'col': 'val'}]
arg = "positional"
kwargs = {'one': 1, 'two': 2}
rf.add_callback(callback, arg, **kwargs)
rf._set_result(self.make_mock_response(expected_result))
result = rf.result()
self.assertEqual(result, expected_result)
callback.assert_called_once_with(expected_result, arg, **kwargs)
# this should get called immediately now that the result is set
rf.add_callback(self.assertEqual, [{'col': 'val'}])
def test_errback(self):
session = self.make_session()
pool = session._pools.get.return_value
connection = Mock(spec=Connection)
pool.borrow_connection.return_value = (connection, 1)
query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)")
query.retry_policy = Mock()
query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None)
message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)
rf = ResponseFuture(session, message, query, 1)
rf.send_request()
rf.add_errback(self.assertIsInstance, Exception)
result = Mock(spec=UnavailableErrorMessage, info={})
rf._set_result(result)
self.assertRaises(Exception, rf.result)
# this should get called immediately now that the error is set
rf.add_errback(self.assertIsInstance, Exception)
def test_multiple_callbacks(self):
session = self.make_session()
rf = self.make_response_future(session)
rf.send_request()
callback = Mock()
expected_result = [{'col': 'val'}]
arg = "positional"
kwargs = {'one': 1, 'two': 2}
rf.add_callback(callback, arg, **kwargs)
callback2 = Mock()
arg2 = "another"
kwargs2 = {'three': 3, 'four': 4}
rf.add_callback(callback2, arg2, **kwargs2)
rf._set_result(self.make_mock_response(expected_result))
result = rf.result()
self.assertEqual(result, expected_result)
callback.assert_called_once_with(expected_result, arg, **kwargs)
callback2.assert_called_once_with(expected_result, arg2, **kwargs2)
def test_multiple_errbacks(self):
session = self.make_session()
pool = session._pools.get.return_value
connection = Mock(spec=Connection)
pool.borrow_connection.return_value = (connection, 1)
query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)")
query.retry_policy = Mock()
query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None)
message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)
rf = ResponseFuture(session, message, query, 1)
rf.send_request()
callback = Mock()
arg = "positional"
kwargs = {'one': 1, 'two': 2}
rf.add_errback(callback, arg, **kwargs)
callback2 = Mock()
arg2 = "another"
kwargs2 = {'three': 3, 'four': 4}
rf.add_errback(callback2, arg2, **kwargs2)
expected_exception = Unavailable("message", 1, 2, 3)
result = Mock(spec=UnavailableErrorMessage, info={'something': 'here'})
result.to_exception.return_value = expected_exception
rf._set_result(result)
self.assertRaises(Exception, rf.result)
callback.assert_called_once_with(expected_exception, arg, **kwargs)
callback2.assert_called_once_with(expected_exception, arg2, **kwargs2)
def test_add_callbacks(self):
session = self.make_session()
query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)")
query.retry_policy = Mock()
query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None)
message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)
# test errback
rf = ResponseFuture(session, message, query, 1)
rf.send_request()
rf.add_callbacks(
callback=self.assertEqual, callback_args=([{'col': 'val'}],),
errback=self.assertIsInstance, errback_args=(Exception,))
result = Mock(spec=UnavailableErrorMessage, info={})
rf._set_result(result)
self.assertRaises(Exception, rf.result)
# test callback
rf = ResponseFuture(session, message, query, 1)
rf.send_request()
callback = Mock()
expected_result = [{'col': 'val'}]
arg = "positional"
kwargs = {'one': 1, 'two': 2}
rf.add_callbacks(
callback=callback, callback_args=(arg,), callback_kwargs=kwargs,
errback=self.assertIsInstance, errback_args=(Exception,))
rf._set_result(self.make_mock_response(expected_result))
self.assertEqual(rf.result(), expected_result)
callback.assert_called_once_with(expected_result, arg, **kwargs)
def test_prepared_query_not_found(self):
session = self.make_session()
pool = session._pools.get.return_value
connection = Mock(spec=Connection)
pool.borrow_connection.return_value = (connection, 1)
rf = self.make_response_future(session)
rf.send_request()
session.cluster._prepared_statements = MagicMock(dict)
prepared_statement = session.cluster._prepared_statements.__getitem__.return_value
prepared_statement.query_string = "SELECT * FROM foobar"
prepared_statement.keyspace = "FooKeyspace"
rf._connection.keyspace = "FooKeyspace"
result = Mock(spec=PreparedQueryNotFound, info='a' * 16)
rf._set_result(result)
session.submit.assert_called_once()
args, kwargs = session.submit.call_args
self.assertEqual(rf._reprepare, args[-2])
self.assertIsInstance(args[-1], PrepareMessage)
self.assertEqual(args[-1].query, "SELECT * FROM foobar")
def test_prepared_query_not_found_bad_keyspace(self):
session = self.make_session()
pool = session._pools.get.return_value
connection = Mock(spec=Connection)
pool.borrow_connection.return_value = (connection, 1)
rf = self.make_response_future(session)
rf.send_request()
session.cluster._prepared_statements = MagicMock(dict)
prepared_statement = session.cluster._prepared_statements.__getitem__.return_value
prepared_statement.query_string = "SELECT * FROM foobar"
prepared_statement.keyspace = "FooKeyspace"
rf._connection.keyspace = "BarKeyspace"
result = Mock(spec=PreparedQueryNotFound, info='a' * 16)
rf._set_result(result)
self.assertRaises(ValueError, rf.result)
|
|
#! /usr/bin/env python
import argparse
import copy
import json
import logging
import operator
import re
import time
import traceback
# support Python 2 and 3's versions of this module
try:
import html.parser as HTMLParser
except ImportError:
import HTMLParser
import config as _config
import executor
config = _config.Config()
class Flagger(executor.Executor):
operators = {'>': operator.gt, '<': operator.lt, '==': operator.eq,
'>=': operator.ge, '<=': operator.le}
def __init__(self, *args, **kwargs):
self.htmlparser = HTMLParser.HTMLParser()
super(Flagger, self).__init__(*args, **kwargs)
self.logger = logging.getLogger(__name__)
self.logger.setLevel((self.debug or self.verbose) and logging.DEBUG or logging.ERROR)
logging.basicConfig()
self.now = int(time.time())
def extract_threshold(self, token):
"""
accept tokens of the format:
int
>=int
<=int
==int
>int
<int
returns [comparator, int] or throws error if invalid
"""
comparator = re.sub("\d+$", "", token)
value = int(re.sub("\D*", "", token))
if comparator == '': # no comparator specified
comparator = '>='
comparator = self.htmlparser.unescape(comparator)
self.logger.debug("token: {} comparator: {} value: {}".format(token, comparator, value))
assert comparator in self.operators
return (comparator, value)
def initialize_control(self):
"""
sets up known control configuration based on control channel messages
"""
channel = config.control_channel
if not self.slacker.channel_exists(channel):
self.ds.logger.warning("Flagger control channel does not exist, cannot run. Please create #%s.", channel)
return False
cid = self.slacker.get_channelid(channel)
messages = self.slacker.get_messages_in_time_range(0, cid, self.now)
control = {}
for message in messages:
text = message['text']
tokens = text.split()
if tokens[0:3] != ['flag', 'content', 'rule']:
continue
if len(tokens) < 5:
self.ds.logger.warning("Control message %s has too few tokens", text)
continue
if len(tokens) == 5 and tokens[4] == 'delete':
uuid = tokens[3]
if uuid in control:
del(control[uuid])
self.logger.debug("Message {} deletes UUID {}".format(text, uuid))
continue
try:
tokens = text.split()
uuid = tokens[3]
comparator, threshold = self.extract_threshold(tokens[4])
emoji = tokens[5].replace(":", "")
output_channel_id = re.sub("[<>]", "", tokens[6])
if output_channel_id.find("|") != -1:
cid, cname = output_channel_id.split("|")
output_channel_id = cid
output_channel_name = self.slacker.replace_id(output_channel_id)
control[uuid] = {'threshold': threshold, "comparator": comparator,
'emoji': emoji, 'output': output_channel_name}
except Exception as e:
tb = traceback.format_exc()
m = "Couldn't create flagger rule with text {}: {} {}".format(text, Exception, e)
self.logger.debug(m)
self.logger.debug(tb)
if not self.debug:
self.ds.logger.warning(m)
self.control = control
self.logger.debug("control: {}".format(json.dumps(self.control, indent=4)))
self.emoji = [x['emoji'] for x in self.control.values()]
self.initialize_emoji_aliases()
return True
def initialize_emoji_aliases(self):
"""
In some cases, emojiA might be an alias of emojiB
The problem is that if we say that 2xemojiB should be
enough to flag something, then we should accept
2 x emojiB
1 x emojiA, 1 x emojiB
2 x emojiA
This method grabs the emoji list from the Slack and creates the equivalence
structure
"""
self.logger.debug("Starting emoji alias list")
emojis_response = self.slacker.get_emojis()
self.logger.debug("emojis_response keys are {}".format(emojis_response.keys()))
emojis = emojis_response['emoji']
equivalents = {}
for emoji in emojis:
target = emojis[emoji]
target_type, target_value = target.split(":", 1)
if target_type != "alias":
continue
self.logger.debug("Found emoji alias: {} <-> {}".format(emoji, target_value))
if emoji not in equivalents:
equivalents[emoji] = []
if target_value not in equivalents:
equivalents[target_value] = []
equivalents[emoji].append(target_value)
equivalents[target_value].append(emoji)
self.emoji_equivalents = equivalents
self.logger.debug("equivalents: {}".format(json.dumps(self.emoji_equivalents, indent=4)))
if "floppy_disk" in self.emoji_equivalents.keys():
self.logger.debug("floppy_disk: {}".format(self.emoji_equivalents['floppy_disk']))
def message_destination(self, message):
"""
if interesting, returns channel name[s] in which to announce
otherwise, returns []
"""
channels = []
if message.get("reactions") is None:
return False
reactions = message.get("reactions")
emoji_set = set(self.emoji)
current_reactions = {}
t = message.get("text")
if t.find("SVP") != -1:
def d(p):
pass
else:
def d(p):
pass
d("reactions: {}".format(reactions))
d("emoji_equivalents:\n{}".format(json.dumps(self.emoji_equivalents, indent=4)))
if "floppy_disk" in self.emoji_equivalents.keys():
d("floppy_disk: {}".format(self.emoji_equivalents['floppy_disk']))
for reaction in reactions:
count = reaction['count']
current_emoji = reaction['name']
d("current_emoji: {}".format(current_emoji))
equivalents = copy.copy(self.emoji_equivalents.get(current_emoji, []))
d("equivalents = {}".format(equivalents))
equivalents.append(current_emoji)
d("equivalents = {}".format(equivalents))
current_set = set(equivalents)
i = current_set.intersection(emoji_set)
if not i:
continue
for ce in equivalents:
current_reactions[ce] = current_reactions.get(ce, 0) + count
# if we're here, at least one emoji matches (but count may still not be right)
d("Current reactions: {}".format(current_reactions))
for uuid in self.control:
rule = self.control[uuid]
for ce in current_reactions:
if ce == rule['emoji']:
count = current_reactions[ce]
threshold = rule['threshold']
comparator = rule['comparator']
op = self.operators[comparator]
if op(count, threshold):
channels.append(rule)
return channels
def get_interesting_messages(self):
"""
returns [[message, [listofchannelstoannounce]]
"""
dayago = self.now - 86400
messages = []
for channel in self.slacker.channels_by_name:
cid = self.slacker.get_channelid(channel)
cur_messages = self.slacker.get_messages_in_time_range(dayago, cid, self.now)
for message in cur_messages:
announce = self.message_destination(message)
if announce:
messages.append([message, announce])
return messages
def announce_interesting_messages(self):
messages = self.get_interesting_messages()
slack_name = _config.SLACK_NAME
for message, channels in messages:
ts = message["ts"].replace(".", "")
channel = message["channel"]
author = message["user"]
author_name = self.slacker.users_by_id[author]
text = self.slacker.asciify(message["text"])
text = self.slacker.detokenize(text)
url = "http://{}.slack.com/archives/{}/p{}".format(slack_name, channel, ts)
m = "*@{}* said in *#{}* _'{}'_ ({})".format(author_name, channel, text, url)
for output_channel in channels:
if self.slacker.channel_exists(output_channel["output"]):
md = "Saying {} to {}".format(m, output_channel["output"])
self.logger.debug(md)
if not self.debug and self.destalinator_activated:
self.slackbot.say(output_channel["output"], m)
else:
self.ds.logger.warning("Attempted to announce in {} because of rule :{}:{}{}, but channel does not exist.".format(
output_channel["output"],
output_channel["emoji"],
output_channel["comparator"],
output_channel["threshold"]
))
def flag(self):
if self.initialize_control():
self.announce_interesting_messages()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Flag interesting Slack messages.')
parser.add_argument("--debug", action="store_true", default=False)
parser.add_argument("--verbose", action="store_true", default=False)
args = parser.parse_args()
flagger = Flagger(debug=args.debug, verbose=args.verbose)
flagger.flag()
|
|
#!/usr/bin/env python
import rospy
import os
import logging
import time
import datetime as dt
import threading
import re
import uuid
import pandas as pd
import traceback
from chatbot.cfg import ChatbotConfig
from chatbot.client import Client
from chatbot.db import get_mongodb, MongoDB
from chatbot.polarity import Polarity
from dynamic_reconfigure.server import Server
from r2_perception.msg import Forget, ForgetAll, Assign, State
from hr_msgs.msg import audiodata, SetGesture, Target
from hr_msgs.msg import ChatMessage, TTS, ChatResponse, ChatResponses
from std_msgs.msg import String, Bool
from jinja2 import Template
import dynamic_reconfigure
import dynamic_reconfigure.client
logger = logging.getLogger('hr.chatbot.ai')
report_logger = logging.getLogger('hr.chatbot.ai.report')
HR_CHATBOT_AUTHKEY = os.environ.get('HR_CHATBOT_AUTHKEY', 'AAAAB3NzaC')
HR_CHATBOT_REQUEST_DIR = os.environ.get('HR_CHATBOT_REQUEST_DIR') or \
os.path.expanduser('~/.hr/chatbot/requests')
HR_CHATBOT_RESPONSE_DIR = os.environ.get('HR_CHATBOT_RESPONSE_DIR') or \
os.path.expanduser('~/.hr/chatbot/responses')
ROBOT_NAME = os.environ.get('NAME', 'default')
count = 0
def update_parameter(node, param, *args, **kwargs):
client = dynamic_reconfigure.client.Client(node, *args, **kwargs)
try:
client.update_configuration(param)
except dynamic_reconfigure.DynamicReconfigureParameterException as ex:
logger.error("Updating {} parameter: {}".format(node, ex))
return False
return True
class Console(object):
def write(self, msg):
logger.info("Console: {}".format(msg.strip()))
class Locker(object):
def __init__(self):
self._lock = threading.RLock()
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
class Chatbot():
def __init__(self):
self.botname = rospy.get_param('botname', 'sophia')
self.client = Client(
HR_CHATBOT_AUTHKEY, self.botname, response_listener=self,
stdout=Console())
self.client.chatbot_url = rospy.get_param(
'chatbot_url', 'http://localhost:8001')
# chatbot now saves a bit of simple state to handle sentiment analysis
# after formulating a response it saves it in a buffer if S.A. active
# It has a simple state transition - initialized in wait_client
# after getting client if S.A. active go to wait_emo
# in affect_express call back publish response and reset to wait_client
self._response_buffer = ''
self._state = 'wait_client'
# argumment must be to activate sentiment analysis
self._sentiment_active = False
# sentiment dictionary
self.polarity = Polarity()
self._polarity_threshold = 0.2
self.speech = False
self.enable = True
self.mute = False
self.insert_behavior = False
self.enable_face_recognition = False
self.hybrid_mode = False
self._locker = Locker()
try:
self.mongodb = get_mongodb()
except Exception as ex:
self.mongodb = MongoDB()
self.node_name = rospy.get_name()
self.request_dir = os.path.join(HR_CHATBOT_REQUEST_DIR,
dt.datetime.strftime(dt.datetime.utcnow(), '%Y%m%d'))
if not os.path.isdir(self.request_dir):
os.makedirs(self.request_dir)
self.response_dir = os.path.join(HR_CHATBOT_RESPONSE_DIR,
dt.datetime.strftime(dt.datetime.utcnow(), '%Y%m%d'))
if not os.path.isdir(self.response_dir):
os.makedirs(self.response_dir)
self.requests_fname = os.path.join(
self.request_dir, '{}.csv'.format(str(uuid.uuid1())))
self.responses_fname = os.path.join(
self.response_dir, '{}.csv'.format(str(uuid.uuid1())))
self.input_stack = []
self.timer = None
self.delay_response = rospy.get_param('delay_response', False)
self.recover = False
self.delay_time = rospy.get_param('delay_time', 5)
self.run_id = rospy.get_param('/run_id', '')
self.client.set_run_id(self.run_id)
logger.info("Set run_id %s", self.run_id)
rospy.Subscriber('chatbot_speech', ChatMessage, self._request_callback)
rospy.Subscriber('speech_events', String, self._speech_event_callback) # robot starts to speak
rospy.Subscriber('chat_events', String, self._chat_event_callback) # user starts to speak
rospy.Subscriber('audio_sensors', audiodata, self._audio_sensors_callback)
self.tts_ctrl_pub = rospy.Publisher(
'tts_control', String, queue_size=1)
self._responses_publisher = rospy.Publisher(
'chatbot_responses', ChatResponses, queue_size=1)
# receive user's choice
rospy.Subscriber('chatbot_response', ChatResponse, self._response_callback)
# send to tts
self._response_publisher = rospy.Publisher(
'tts', TTS, queue_size=1)
self._hybrid_mode_publisher = rospy.Publisher(
'hybrid_mode', Bool, queue_size=1, latch=True)
# send communication non-verbal blink message to behavior
self._blink_publisher = rospy.Publisher(
'chatbot_blink', String, queue_size=1)
# Perceived emotional content; and emotion to express
# Perceived: based on what chatbot heard, this is how robot should
# feel. Expressed: the emotional content that the chatbot should
# put into what it says.
self._affect_publisher = rospy.Publisher(
'chatbot_affect_perceive', String, queue_size=1)
# Echo chat messages as plain strings.
self._echo_publisher = rospy.Publisher(
'perceived_text', String, queue_size=1)
rospy.Subscriber('chatbot_speech', ChatMessage, self._echo_callback)
rospy.set_param('node_status/chatbot', 'running')
self.btree_publisher = rospy.Publisher(
'/behavior_switch', String, queue_size=1)
self._gesture_publisher = rospy.Publisher(
'/blender_api/set_gesture', SetGesture, queue_size=1)
self._look_at_publisher = rospy.Publisher(
'/blender_api/set_face_target', Target, queue_size=1)
# r2_perception
self._perception_assign_publisher = rospy.Publisher(
'perception/api/assign', Assign, queue_size=1)
self._perception_forget_publisher = rospy.Publisher(
'perception/api/forget', Forget, queue_size=1)
self._perception_forget_all_publisher = rospy.Publisher(
'perception/api/forget_all', ForgetAll, queue_size=1)
self._perception_state_subscriber = rospy.Subscriber(
'perception/state', State, self._perception_state_callback)
self.perception_users = {}
self.face_cache = []
self.main_face = None
self.faces = {} # faceid(session) -> face
self.current_user = None
def _threadsafe(f):
def wrap(self, *args, **kwargs):
self._locker.lock()
try:
return f(self, *args, **kwargs)
finally:
self._locker.unlock()
return wrap
def _perception_state_callback(self, msg):
global count
count += 1
self.face_cache.extend(msg.faces)
if count % 30 == 0:
count = 0
self.perception_users = {}
for face in self.face_cache:
self.perception_users[face.fsdk_id] = face
faces = self.perception_users.values()
self.face_cache = []
if faces:
faces = sorted(faces, key=lambda face: face.position.x*face.position.x+face.position.y*face.position.y+face.position.z*face.position.z)
active_face = None
for face in faces:
if face.is_speaking:
active_face = face
logger.info("%s is speaking" % face.fsdk_id)
if not active_face:
active_face = faces[0] # the closest face
if self.main_face is None:
self.main_face = active_face
logger.warn("Assigned main face ID %s, first name %s" % (self.main_face.fsdk_id, self.main_face.first_name))
elif self.main_face.fsdk_id != active_face.fsdk_id:
logger.warn("Main face ID has been changed from %s to %s" % (self.main_face.fsdk_id, active_face.fsdk_id))
self.main_face = active_face
else:
if self.main_face:
logger.warn("Removed main face ID %s, first name %s" % (self.main_face.fsdk_id, self.main_face.first_name))
self.main_face = None
def assign_name(self, fsdk_id, firstname, lastname=None):
assign = Assign()
assign.fsdk_id = fsdk_id
assign.first_name = str(firstname)
assign.last_name = str(lastname)
assign.formal_name = str(firstname)
logger.info("Assigning name %s to face id %s" % (firstname, fsdk_id))
self._perception_assign_publisher.publish(assign)
logger.info("Assigned name %s to face id %s" % (firstname, fsdk_id))
def forget_name(self, uid):
self._perception_forget_publisher.publish(Forget(uid))
logger.info("Forgot name uid %s" % uid)
def sentiment_active(self, active):
self._sentiment_active = active
def ask(self, chatmessages, query=False):
if chatmessages and len(chatmessages) > 0:
self.client.lang = chatmessages[0].lang
if self.enable_face_recognition and self.main_face: # visual perception
self.client.set_user(self.main_face.fsdk_id)
self.faces[self.main_face.fsdk_id] = self.main_face
for face in self.faces.values():
if face.fsdk_id == self.main_face.fsdk_id and face.uid:
fullname = '{} {}'.format(face.first_name, face.last_name)
self.client.set_context('fullname={}'.format(fullname))
logger.info("Set context fullname %s" % fullname)
if face.formal_name:
self.client.set_context('firstname={}'.format(face.formal_name))
logger.info("Set context fistname %s" % face.first_name)
else:
self.client.set_context('firstname={}'.format(face.first_name))
logger.info("Set context fistname %s" % face.formal_name)
self.client.set_context('lastname={}'.format(face.last_name))
logger.info("Set context lastname %s" % face.last_name)
else:
if self.current_user:
self.client.set_user(self.current_user)
if '_' in self.current_user:
first, last = self.current_user.split('_', 1)
self.client.set_context('firstname={},lastname={},fullname={}'.format(first, last, self.current_user))
logger.info("Set context first name %s" % first)
logger.info("Set context last name %s" % last)
else:
self.client.set_context('name={}'.format(self.current_user))
logger.info("Set context name %s" % self.current_user)
else:
logger.error("No language is specified")
return
request_id = str(uuid.uuid1())
question = ' '.join([msg.utterance for msg in chatmessages])
logger.info("Asking {}".format(question))
#if self.main_face:
# self.client.ask('[start]', query, request_id=request_id)
self.client.ask(question, query, request_id=request_id)
logger.info("Sent request {}".format(request_id))
self.write_request(request_id, chatmessages)
def _speech_event_callback(self, msg):
if msg.data == 'start':
self.speech = True
if msg.data == 'stop':
self.speech = False
def _chat_event_callback(self, msg):
if msg.data.startswith('speechstart'):
if self.delay_response:
self.reset_timer()
def _audio_sensors_callback(self, msg):
if msg.Speech:
self.client.cancel_timer()
@_threadsafe
def _request_callback(self, chat_message):
if not self.enable:
logger.warn("Chatbot is disabled")
return
if 'shut up' in chat_message.utterance.lower():
logger.info("Robot's talking wants to be interruptted")
self.tts_ctrl_pub.publish("shutup")
rospy.sleep(0.5)
self._affect_publisher.publish(String('sad'))
return
if self.speech:
logger.warn("In speech, ignore the question")
return
# Handle chatbot command
cmd, arg, line = self.client.parseline(chat_message.utterance)
func = None
try:
if cmd is not None:
func = getattr(self.client, 'do_' + cmd)
except AttributeError as ex:
pass
if func:
try:
func(arg)
except Exception as ex:
logger.error("Executing command {} error {}".format(func, ex))
return
chat_message.utterance = self.handle_control(chat_message.utterance)
# blink that we heard something, request, probability defined in
# callback
self._blink_publisher.publish('chat_heard')
if self.delay_response:
logger.info("Add input: {}".format(chat_message.utterance))
self.input_stack.append((time.clock(), chat_message))
self._gesture_publisher.publish(SetGesture('nod-2', 0, 1, 1))
self._gesture_publisher.publish(SetGesture('blink-relaxed', 0, 1, 1))
self.reset_timer()
else:
self.ask([chat_message])
def _response_callback(self, msg):
logger.info("Get response msg %s", msg)
text = msg.text
text = re.sub(r"""\[callback.*\]""", '', text)
logger.warn('Send to TTS "%s"', text)
self._response_publisher.publish(TTS(text=text, lang=msg.lang))
if self.client.last_response and self.client.last_response_time:
request_id = self.client.last_response.get('RequestId')
elapse = dt.datetime.utcnow() - self.client.last_response_time
if elapse.total_seconds() > 10: # don't record request id for late coming msg
request_id = ''
try:
self.write_response(request_id, msg)
except Exception as ex:
logger.exception(ex)
else:
logger.warn("No last response")
# send the response back to chat server so it's aware of what's been
# actually said
self.client.feedback(msg.text, msg.label, msg.lang)
def reset_timer(self):
if self.timer is not None:
self.timer.cancel()
logger.info("Canceled timer, {}".format(self.delay_time))
self.timer = None
self.timer = threading.Timer(self.delay_time, self.process_input)
self.timer.start()
logger.info("New timer, {}".format(self.delay_time))
@_threadsafe
def process_input(self):
if not self.input_stack:
return
questions = [i[1].utterance for i in self.input_stack]
question = ' '.join(questions)
logger.info("Joined input: {}".format(question))
self.ask([i[1] for i in self.input_stack])
del self.input_stack[:]
def write_request(self, request_id, chatmessages):
requests = []
columns = ['Datetime', 'RequestId', 'Index', 'Source', 'AudioPath', 'Transcript', 'Confidence']
for i, msg in enumerate(chatmessages):
audio = os.path.basename(msg.audio_path)
request = {
'Datetime': dt.datetime.utcnow(),
'RequestId': request_id,
'Index': i,
'Source': msg.source,
'AudioPath': audio,
'Transcript': msg.utterance,
'RunID': self.run_id,
'Confidence': msg.confidence,
}
requests.append(request)
if requests:
df = pd.DataFrame(requests)
if not os.path.isfile(self.requests_fname):
with open(self.requests_fname, 'w') as f:
f.write(','.join(columns))
f.write('\n')
df.to_csv(self.requests_fname, mode='a', index=False, header=False,
columns=columns)
logger.info("Write request to {}".format(self.requests_fname))
report_logger.warn("Write request", extra={'data': requests[0]}) # Workaround: ES doesn't support array. Log the first request for now
def write_response(self, request_id, msg):
columns = ['Datetime', 'RequestId', 'Answer', 'Lang', 'Category', 'Tier', 'Label']
response = {
'Datetime': dt.datetime.utcnow(),
'RequestId': request_id,
'Answer': msg.text,
'Lang': msg.lang,
'Label': msg.label,
}
if msg.label and '-' in msg.label:
if msg.label.startswith('web'):
_, botid, cat = msg.label.split('-', 2)
else:
botid, cat = msg.label.split('-', 1)
response['Category'] = cat
response['Tier'] = botid
df = pd.DataFrame(response, index=[0])
if not os.path.isfile(self.responses_fname):
with open(self.responses_fname, 'w') as f:
f.write(','.join(columns))
f.write('\n')
df.to_csv(self.responses_fname, mode='a', index=False, header=False,
columns=columns)
logger.warn("Write response to {}".format(self.responses_fname))
report_logger.warn('Chatbot hybrid response', extra={'data': response})
def handle_control(self, response):
t = Template(response)
if hasattr(t.module, 'delay'):
delay = t.module.delay
if not self.delay_response:
self.recover = True
param = {'delay_time': delay}
param['delay_response'] = delay > 0
update_parameter('chatbot', param, timeout=2)
logger.info("Set delay to {}".format(delay))
if hasattr(t.module, 'btree'):
btree = t.module.btree
if btree in ['btree_on', 'on', 'true', True]:
self.btree_publisher.publish('btree_on')
logger.info("Enable btree")
elif btree in ['btree_off', 'off', 'false', False]:
self.btree_publisher.publish('btree_off')
logger.info("Disable btree")
else:
logger.warn("Incorrect btree argument, {}".format(btree))
return t.render()
def on_response(self, sid, response):
if response is None:
logger.error("No response")
return
if sid != self.client.session:
logger.error("Session id doesn't match")
return
lang = response.get('Lang')
if self.hybrid_mode:
responses_msg = ChatResponses()
responses = response.get('responses')
all_responses = []
for cat, trs in responses.iteritems():
if cat == '_DEFAULT_': continue
for tr in trs:
tr['cat'] = cat
all_responses.append(tr)
all_responses = sorted(all_responses, key=lambda x: x.get('cweight', 0))
for r in all_responses:
response_msg = ChatResponse()
response_msg.text = str(r.get('text'))
response_msg.lang = str(lang)
botid = r.get('botid')
cat = r.get('cat')
label = str('%s-%s' % (botid, cat))
response_msg.label = label
responses_msg.responses.append(response_msg)
logger.warn("Add response %s", response_msg)
self._responses_publisher.publish(responses_msg)
logger.info("Pulished responses in hybrid mode")
return
tier_response = response['default_response']
if not tier_response:
return
logger.info("Get response {}".format(tier_response))
#for k, v in response.iteritems():
# rospy.set_param('{}/response/{}'.format(self.node_name, k), v)
text = tier_response.get('text')
emotion = tier_response.get('emotion')
orig_text = tier_response.get('orig_text')
if orig_text:
try:
self.handle_control(orig_text)
except Exception as ex:
logger.error(ex)
#elif self.recover:
# param = {
# 'delay_response': False
# }
# update_parameter('chatbot', param, timeout=2)
# self.recover = False
# logger.info("Recovered delay response")
# Add space after punctuation for multi-sentence responses
text = text.replace('?', '? ')
text = text.replace('_', ' ')
if self.insert_behavior:
# no
pattern=r"(\bnot\s|\bno\s|\bdon't\s|\bwon't\s|\bdidn't\s)"
text = re.sub(pattern, '\g<1>|shake3| ', text, flags=re.IGNORECASE)
# yes
pattern=r'(\byes\b|\byeah\b|\byep\b)'
text = re.sub(pattern, '\g<1>|nod|', text, flags=re.IGNORECASE)
# question
# pattern=r'(\?)'
# thinks = ['thinkl', 'thinkr', 'thinklu', 'thinkld', 'thinkru', 'thinkrd']
# random.shuffle(thinks)
# text = re.sub(pattern, '|{}|\g<1>'.format(thinks[0]), text, flags=re.IGNORECASE)
# if sentiment active save state and wait for affect_express to publish response
# otherwise publish and let tts handle it
if self._sentiment_active:
emo = String()
if emotion:
emo.data = emotion
self._affect_publisher.publish(emo)
rospy.loginfo(
'[#][PERCEIVE ACTION][EMOTION] {}'.format(emo.data))
logger.info('Chatbot perceived emo: {}'.format(emo.data))
else:
p = self.polarity.get_polarity(text)
logger.debug('Polarity for "{}" is {}'.format(
text.encode('utf-8'), p))
# change emotion if polarity magnitude exceeds threshold defined in constructor
# otherwise let top level behaviors control
if p > self._polarity_threshold:
emo.data = 'happy'
self._affect_publisher.publish(emo)
rospy.loginfo(
'[#][PERCEIVE ACTION][EMOTION] {}'.format(emo.data))
logger.info(
'Chatbot perceived emo: {}'.format(emo.data))
# Currently response is independant of message received so no need to wait
# Leave it for Opencog to handle responses later on.
elif p < 0 and abs(p) > self._polarity_threshold:
emo.data = 'frustrated'
self._affect_publisher.publish(emo)
rospy.loginfo(
'[#][PERCEIVE ACTION][EMOTION] {}'.format(emo.data))
logger.info(
'Chatbot perceived emo: {}'.format(emo.data))
# Currently response is independant of message received so no need to wait
# Leave it for Opencog to handle responses later on.
if not self.mute:
self._blink_publisher.publish('chat_saying')
self._response_publisher.publish(TTS(text=text, lang=lang))
if rospy.has_param('{}/context'.format(self.node_name)):
rospy.delete_param('{}/context'.format(self.node_name))
context = self.client.get_context()
logger.warn("Get context %s" % context)
context['sid'] = self.client.session
for k, v in context.iteritems():
rospy.set_param('{}/context/{}'.format(self.node_name, k), v)
logger.info("Set param {}={}".format(k, v))
if self.enable_face_recognition:
# Assign known name to the percepted faces
face_id = self.client.user
if face_id in self.perception_users:
uid = self.perception_users[face_id].uid
context_firstname = context.get('firstname')
context_lastname = context.get('lastname')
firstname = self.perception_users[face_id].first_name
if not uid:
self.assign_name(face_id, context_firstname, context_lastname)
elif uid and firstname != context_firstname:
logger.warn("Update the name of face id %s from %s to %s" % (
face_id, firstname, context_firstname))
self.forget_name(uid)
self.assign_name(face_id, context_firstname, context_lastname)
else:
logger.warn("Failed to update name of face id %s from %s to %s" % (
face_id, firstname, context_firstname))
else:
logger.warn("User %s is out of scene" % face_id)
logger.warn("Perception face %s" % str(self.perception_users.keys()))
# Just repeat the chat message, as a plain string.
def _echo_callback(self, chat_message):
message = String()
message.data = chat_message.utterance
self._echo_publisher.publish(message)
def reconfig(self, config, level):
self.sentiment_active(config.sentiment)
self.client.chatbot_url = config.chatbot_url
self.enable = config.enable
if not self.enable:
self.client.cancel_timer()
if self.hybrid_mode != config.hybrid_mode:
self._hybrid_mode_publisher.publish(self.hybrid_mode)
self.hybrid_mode = config.hybrid_mode
if self.hybrid_mode:
logger.warn("Enabled hybrid mode")
self.delay_response = config.delay_response
self.delay_time = config.delay_time
self.client.ignore_indicator = config.ignore_indicator
if config.set_that:
self.client.do_said(config.set_that)
config.set_that = ''
if config.set_context:
self.client.set_context(config.set_context)
self.enable_face_recognition = config.enable_face_recognition
if not self.enable_face_recognition:
self.client.set_user()
marker = '%s:%s' % (config.type_of_marker, config.marker)
logger.info("Event marker %s", marker)
self.client.set_marker(marker)
self.client.set_context('event=%s' % config.type_of_marker)
self.mute = config.mute
self.insert_behavior = config.insert_behavior
if config.preset_user and config.preset_user != self.current_user:
self.current_user = config.preset_user
config.user = ''
logger.info("Set preset user %s" % self.current_user)
if config.user and config.user != self.current_user:
self.current_user = config.user
config.preset_user = ''
logger.info("Set current user %s" % self.current_user)
if config.reset_session:
self.client.reset_session()
config.reset_session = Fales
return config
if __name__ == '__main__':
rospy.init_node('chatbot')
bot = Chatbot()
from rospkg import RosPack
rp = RosPack()
data_dir = os.path.join(rp.get_path('chatbot'), 'scripts/aiml')
sent3_file = os.path.join(data_dir, "senticnet3.props.csv")
bot.polarity.load_sentiment_csv(sent3_file)
Server(ChatbotConfig, bot.reconfig)
rospy.spin()
|
|
# ActivitySim
# See full license in LICENSE.txt.
import logging
import time
import multiprocessing
import ctypes
from collections import OrderedDict
import numpy as np
import pandas as pd
from activitysim.core import inject
from activitysim.core import util
from activitysim.core import config
from activitysim.core import tracing
from activitysim.abm.tables.size_terms import tour_destination_size_terms
logger = logging.getLogger(__name__)
"""
ShadowPriceCalculator and associated utility methods
See docstrings for documentation on:
update_shadow_prices how shadow_price coefficients are calculated
synchronize_choices interprocess communication to compute aggregate modeled_size
check_fit convergence criteria for shadow_pric iteration
Import concepts and variables:
model_selector: str
Identifies a specific location choice model (e.g. 'school', 'workplace')
The various models work similarly, but use different expression files, model settings, etc.
segment: str
Identifies a specific demographic segment of a model (e.g. 'elementary' segment of 'school')
Models can have different size term coefficients (in destinatin_choice_size_terms file) and
different utility coefficients in models's location and location_sample csv expression files
size_table: pandas.DataFrame
"""
"""
Artisanal reverse semaphores to synchronize concurrent access to shared data buffer
we use the first two rows of the final column in numpy-wrapped shared data as 'reverse semaphores'
(they synchronize concurrent access to shared data resource rather than throttling access)
ShadowPriceCalculator.synchronize_choices coordinates access to the global aggregate zone counts
(local_modeled_size summed across all sub-processes) using these two semaphores
(which are really only tuples of indexes of locations in the shared data array.
"""
TALLY_CHECKIN = (0, -1)
TALLY_CHECKOUT = (1, -1)
def size_table_name(model_selector):
"""
Returns canonical name of injected destination desired_size table
Parameters
----------
model_selector : str
e.g. school or workplace
Returns
-------
table_name : str
"""
return "%s_destination_size" % model_selector
class ShadowPriceCalculator(object):
def __init__(self, model_settings, num_processes, shared_data=None, shared_data_lock=None):
"""
Presence of shared_data is used as a flag for multiprocessing
If we are multiprocessing, shared_data should be a multiprocessing.RawArray buffer
to aggregate modeled_size across all sub-processes, and shared_data_lock should be
a multiprocessing.Lock object to coordinate access to that buffer.
Optionally load saved shadow_prices from data_dir if config setting use_shadow_pricing
and shadow_setting LOAD_SAVED_SHADOW_PRICES are both True
Parameters
----------
model_settings : dict
shared_data : multiprocessing.Array or None (if single process)
shared_data_lock : numpy array wrapping multiprocessing.RawArray or None (if single process)
"""
self.num_processes = num_processes
self.use_shadow_pricing = bool(config.setting('use_shadow_pricing'))
self.saved_shadow_price_file_path = None # set by read_saved_shadow_prices if loaded
self.model_selector = model_settings['MODEL_SELECTOR']
full_model_run = config.setting('households_sample_size') == 0
if self.use_shadow_pricing and not full_model_run:
logger.warning("deprecated combination of use_shadow_pricing and not full_model_run")
if (self.num_processes > 1) and not config.setting('fail_fast'):
# if we are multiprocessing, then fail_fast should be true or we will wait forever for failed processes
logger.warning("deprecated combination of multiprocessing and not fail_fast")
raise RuntimeError("Shadow pricing requires fail_fast setting in multiprocessing mode")
self.segment_ids = model_settings['SEGMENT_IDS']
# - modeled_size (set by call to set_choices/synchronize_choices)
self.modeled_size = None
if self.use_shadow_pricing:
self.shadow_settings = config.read_model_settings('shadow_pricing.yaml')
for k in self.shadow_settings:
logger.debug("shadow_settings %s: %s" % (k, self.shadow_settings.get(k)))
# - destination_size_table (desired_size)
self.desired_size = inject.get_table(size_table_name(self.model_selector)).to_frame()
self.desired_size = self.desired_size.sort_index()
assert self.desired_size.index.is_monotonic_increasing, \
f"{size_table_name(self.model_selector)} not is_monotonic_increasing"
# - shared_data
if shared_data is not None:
assert shared_data.shape[0] == self.desired_size.shape[0]
assert shared_data.shape[1] == self.desired_size.shape[1] + 1 # tally column
assert shared_data_lock is not None
self.shared_data = shared_data
self.shared_data_lock = shared_data_lock
# - load saved shadow_prices (if available) and set max_iterations accordingly
if self.use_shadow_pricing:
self.shadow_prices = None
self.shadow_price_method = self.shadow_settings['SHADOW_PRICE_METHOD']
assert self.shadow_price_method in ['daysim', 'ctramp']
if self.shadow_settings['LOAD_SAVED_SHADOW_PRICES']:
# read_saved_shadow_prices logs error and returns None if file not found
self.shadow_prices = self.read_saved_shadow_prices(model_settings)
if self.shadow_prices is None:
self.max_iterations = self.shadow_settings.get('MAX_ITERATIONS', 5)
else:
self.max_iterations = self.shadow_settings.get('MAX_ITERATIONS_SAVED', 1)
# initial_shadow_price if we did not load
if self.shadow_prices is None:
# initial value depends on method
initial_shadow_price = 1.0 if self.shadow_price_method == 'ctramp' else 0.0
self.shadow_prices = \
pd.DataFrame(data=initial_shadow_price,
columns=self.desired_size.columns,
index=self.desired_size.index)
else:
self.max_iterations = 1
self.num_fail = pd.DataFrame(index=self.desired_size.columns)
self.max_abs_diff = pd.DataFrame(index=self.desired_size.columns)
self.max_rel_diff = pd.DataFrame(index=self.desired_size.columns)
def read_saved_shadow_prices(self, model_settings):
"""
Read saved shadow_prices from csv file in data_dir (so-called warm start)
returns None if no saved shadow price file name specified or named file not found
Parameters
----------
model_settings : dict
Returns
-------
shadow_prices : pandas.DataFrame or None
"""
shadow_prices = None
# - load saved shadow_prices
saved_shadow_price_file_name = model_settings.get('SAVED_SHADOW_PRICE_TABLE_NAME')
if saved_shadow_price_file_name:
# FIXME - where should we look for this file?
file_path = config.data_file_path(saved_shadow_price_file_name, mandatory=False)
if file_path:
shadow_prices = pd.read_csv(file_path, index_col=0)
self.saved_shadow_price_file_path = file_path # informational
logger.info("loaded saved_shadow_prices from %s" % file_path)
else:
logger.warning("Could not find saved_shadow_prices file %s" % file_path)
return shadow_prices
def synchronize_choices(self, local_modeled_size):
"""
We have to wait until all processes have computed choices and aggregated them by segment
and zone before we can compute global aggregate zone counts (by segment). Since the global
zone counts are in shared data, we have to coordinate access to the data structure across
sub-processes.
Note that all access to self.shared_data has to be protected by acquiring shared_data_lock
ShadowPriceCalculator.synchronize_choices coordinates access to the global aggregate
zone counts (local_modeled_size summed across all sub-processes).
* All processes wait (in case we are iterating) until any stragglers from the previous
iteration have exited the building. (TALLY_CHECKOUT goes to zero)
* Processes then add their local counts into the shared_data and increment TALLY_CHECKIN
* All processes wait until everybody has checked in (TALLY_CHECKIN == num_processes)
* Processes make local copy of shared_data and check out (increment TALLY_CHECKOUT)
* first_in process waits until all processes have checked out, then zeros shared_data
and clears semaphores
Parameters
----------
local_modeled_size : pandas DataFrame
Returns
-------
global_modeled_size_df : pandas DataFrame
local copy of shared global_modeled_size data as dataframe
with same shape and columns as local_modeled_size
"""
# shouldn't be called if we are not multiprocessing
assert self.shared_data is not None
assert self.num_processes > 1
def get_tally(t):
with self.shared_data_lock:
return self.shared_data[t]
def wait(tally, target):
while get_tally(tally) != target:
time.sleep(1)
# - nobody checks in until checkout clears
wait(TALLY_CHECKOUT, 0)
# - add local_modeled_size data, increment TALLY_CHECKIN
with self.shared_data_lock:
first_in = self.shared_data[TALLY_CHECKIN] == 0
# add local data from df to shared data buffer
# final column is used for tallys, hence the negative index
# Ellipsis expands : to fill available dims so [..., 0:-1] is the whole array except for the tallys
self.shared_data[..., 0:-1] += local_modeled_size.values
self.shared_data[TALLY_CHECKIN] += 1
# - wait until everybody else has checked in
wait(TALLY_CHECKIN, self.num_processes)
# - copy shared data, increment TALLY_CHECKIN
with self.shared_data_lock:
logger.info("copy shared_data")
# numpy array with sum of local_modeled_size.values from all processes
global_modeled_size_array = self.shared_data[..., 0:-1].copy()
self.shared_data[TALLY_CHECKOUT] += 1
# - first in waits until all other processes have checked out, and cleans tub
if first_in:
wait(TALLY_CHECKOUT, self.num_processes)
with self.shared_data_lock:
# zero shared_data, clear TALLY_CHECKIN, and TALLY_CHECKOUT semaphores
self.shared_data[:] = 0
logger.info("first_in clearing shared_data")
# convert summed numpy array data to conform to original dataframe
global_modeled_size_df = \
pd.DataFrame(data=global_modeled_size_array,
index=local_modeled_size.index,
columns=local_modeled_size.columns)
return global_modeled_size_df
def set_choices(self, choices, segment_ids):
"""
aggregate individual location choices to modeled_size by zone and segment
Parameters
----------
choices : pandas.Series
zone id of location choice indexed by person_id
segment_ids : pandas.Series
segment id tag for this individual indexed by person_id
Returns
-------
updates self.modeled_size
"""
modeled_size = pd.DataFrame(index=self.desired_size.index)
for seg_name in self.desired_size:
segment_choices = \
choices[(segment_ids == self.segment_ids[seg_name])]
modeled_size[seg_name] = segment_choices.value_counts()
modeled_size = modeled_size.fillna(0).astype(int)
if self.num_processes == 1:
# - not multiprocessing
self.modeled_size = modeled_size
else:
# - if we are multiprocessing, we have to aggregate across sub-processes
self.modeled_size = self.synchronize_choices(modeled_size)
def check_fit(self, iteration):
"""
Check convergence criteria fit of modeled_size to target desired_size
(For multiprocessing, this is global modeled_size summed across processes,
so each process will independently calculate the same result.)
Parameters
----------
iteration: int
iteration number (informational, for num_failand max_diff history columns)
Returns
-------
converged: boolean
"""
# fixme
if not self.use_shadow_pricing:
return False
assert self.modeled_size is not None
assert self.desired_size is not None
# - convergence criteria for check_fit
# ignore convergence criteria for zones smaller than size_threshold
size_threshold = self.shadow_settings['SIZE_THRESHOLD']
# zone passes if modeled is within percent_tolerance of desired_size
percent_tolerance = self.shadow_settings['PERCENT_TOLERANCE']
# max percentage of zones allowed to fail
fail_threshold = self.shadow_settings['FAIL_THRESHOLD']
modeled_size = self.modeled_size
desired_size = self.desired_size
abs_diff = (desired_size - modeled_size).abs()
rel_diff = abs_diff / modeled_size
# ignore zones where desired_size < threshold
rel_diff.where(desired_size >= size_threshold, 0, inplace=True)
# ignore zones where rel_diff < percent_tolerance
rel_diff.where(rel_diff > (percent_tolerance / 100.0), 0, inplace=True)
self.num_fail['iter%s' % iteration] = (rel_diff > 0).sum()
self.max_abs_diff['iter%s' % iteration] = abs_diff.max()
self.max_rel_diff['iter%s' % iteration] = rel_diff.max()
total_fails = (rel_diff > 0).values.sum()
# FIXME - should not count zones where desired_size < threshold? (could calc in init)
max_fail = (fail_threshold / 100.0) * util.iprod(desired_size.shape)
converged = (total_fails <= max_fail)
# for c in desired_size:
# print("check_fit %s segment %s" % (self.model_selector, c))
# print(" modeled %s" % (modeled_size[c].sum()))
# print(" desired %s" % (desired_size[c].sum()))
# print(" max abs diff %s" % (abs_diff[c].max()))
# print(" max rel diff %s" % (rel_diff[c].max()))
logger.info("check_fit %s iteration: %s converged: %s max_fail: %s total_fails: %s" %
(self.model_selector, iteration, converged, max_fail, total_fails))
# - convergence stats
if converged or iteration == self.max_iterations:
logger.info("\nshadow_pricing max_abs_diff\n%s" % self.max_abs_diff)
logger.info("\nshadow_pricing max_rel_diff\n%s" % self.max_rel_diff)
logger.info("\nshadow_pricing num_fail\n%s" % self.num_fail)
return converged
def update_shadow_prices(self):
"""
Adjust shadow_prices based on relative values of modeled_size and desired_size.
This is the heart of the shadow pricing algorithm.
The presumption is that shadow_price_adjusted_desired_size (along with other attractors)
is being used in a utility expression in a location choice model. The goal is to get the
aggregate location modeled size (choice aggregated by model_selector segment and zone) to
match desired_size. Since the location choice model may not achieve that goal initially,
we create a 'shadow price' that tweaks the size_term to encourage the aggregate choices to
approach the desired target desired_sizes.
shadow_prices is a table of coefficient (for each zone and segment) that is increases or
decreases the size term according to whether the modelled population is less or greater
than the desired_size. If too few total choices are made for a particular zone and
segment, then its shadow_price is increased, if too many, then it is decreased.
Since the location choice is being made according to a variety of utilities in the
expression file, whose relative weights are unknown to this algorithm, the choice of
how to adjust the shadow_price is not completely straightforward. CTRAMP and Daysim use
different strategies (see below) and there may not be a single method that works best for
all expression files. This would be a nice project for the mathematically inclined.
Returns
-------
updates self.shadow_prices
"""
assert self.use_shadow_pricing
shadow_price_method = self.shadow_settings['SHADOW_PRICE_METHOD']
# can't update_shadow_prices until after first iteration
# modeled_size should have been set by set_choices at end of previous iteration
assert self.modeled_size is not None
assert self.desired_size is not None
assert self.shadow_prices is not None
if shadow_price_method == 'ctramp':
# - CTRAMP
"""
if ( modeledDestinationLocationsByDestZone > 0 )
shadowPrice *= ( scaledSize / modeledDestinationLocationsByDestZone );
// else
// shadowPrice *= scaledSize;
"""
damping_factor = self.shadow_settings['DAMPING_FACTOR']
assert 0 < damping_factor <= 1
new_scale_factor = self.desired_size / self.modeled_size
damped_scale_factor = 1 + (new_scale_factor - 1) * damping_factor
new_shadow_prices = self.shadow_prices * damped_scale_factor
# following CTRAMP (revised version - with 0 dest zone case lines commented out)
# avoid zero-divide for 0 modeled_size, by leaving shadow_prices unchanged
new_shadow_prices.where(self.modeled_size > 0, self.shadow_prices, inplace=True)
elif shadow_price_method == 'daysim':
# - Daysim
"""
if modeled > desired: # if modeled is too high, increase shadow price
target = min(
modeled,
desired * (1 + percent_tolerance),
desired + absolute_tolerance)
if modeled < desired # modeled is too low, decrease shadow price
target = max(
modeled,
desired * (1 - percentTolerance),
desired - absoluteTolerance)
shadow_price = shadow_price + log(np.maximum(target, 0.01) / np.maximum(modeled, 0.01))
"""
# FIXME should these be the same as PERCENT_TOLERANCE and FAIL_THRESHOLD above?
absolute_tolerance = self.shadow_settings['DAYSIM_ABSOLUTE_TOLERANCE']
percent_tolerance = self.shadow_settings['DAYSIM_PERCENT_TOLERANCE'] / 100.0
assert 0 <= percent_tolerance <= 1
target = np.where(
self.modeled_size > self.desired_size,
np.minimum(self.modeled_size,
np.minimum(self.desired_size * (1 + percent_tolerance),
self.desired_size + absolute_tolerance)),
np.maximum(self.modeled_size,
np.maximum(self.desired_size * (1 - percent_tolerance),
self.desired_size - absolute_tolerance)))
# adjustment = np.log(np.maximum(target, 0.01) / np.maximum(self.modeled_size, 0.01))
adjustment = np.log(np.maximum(target, 0.01) / np.maximum(self.modeled_size, 1))
new_shadow_prices = self.shadow_prices + adjustment
else:
raise RuntimeError("unknown SHADOW_PRICE_METHOD %s" % shadow_price_method)
# print("\nself.desired_size\n%s" % self.desired_size.head())
# print("\nself.modeled_size\n%s" % self.modeled_size.head())
# print("\nprevious shadow_prices\n%s" % self.shadow_prices.head())
# print("\nnew_shadow_prices\n%s" % new_shadow_prices.head())
self.shadow_prices = new_shadow_prices
def dest_size_terms(self, segment):
assert segment in self.segment_ids
size_term_adjustment = 1
utility_adjustment = 0
if self.use_shadow_pricing:
shadow_price_method = self.shadow_settings['SHADOW_PRICE_METHOD']
if shadow_price_method == 'ctramp':
size_term_adjustment = self.shadow_prices[segment]
elif shadow_price_method == 'daysim':
utility_adjustment = self.shadow_prices[segment]
else:
raise RuntimeError("unknown SHADOW_PRICE_METHOD %s" % shadow_price_method)
size_terms = pd.DataFrame({
'size_term': self.desired_size[segment],
'shadow_price_size_term_adjustment': size_term_adjustment,
'shadow_price_utility_adjustment': utility_adjustment},
index=self.desired_size.index)
assert size_terms.index.is_monotonic_increasing
return size_terms
def write_trace_files(self, iteration):
"""
Write trace files for this iteration
Writes desired_size, modeled_size, and shadow_prices tables
Trace file names are tagged with model_selector and iteration number
(e.g. self.desired_size => shadow_price_school_desired_size_1)
Parameters
----------
iteration: int
current iteration to tag trace file
"""
logger.info("write_trace_files iteration %s" % iteration)
if iteration == 1:
# write desired_size only on first iteration, as it doesn't change
tracing.write_csv(self.desired_size,
'shadow_price_%s_desired_size' % self.model_selector,
transpose=False)
tracing.write_csv(self.modeled_size,
'shadow_price_%s_modeled_size_%s' % (self.model_selector, iteration),
transpose=False)
if self.use_shadow_pricing:
tracing.write_csv(self.shadow_prices,
'shadow_price_%s_shadow_prices_%s' % (self.model_selector, iteration),
transpose=False)
def block_name(model_selector):
"""
return canonical block name for model_selector
Ordinarily and ideally this would just be model_selector, but since mp_tasks saves all
shared data blocks in a common dict to pass to sub-tasks, we want to be able override
block naming convention to handle any collisions between model_selector names and skim names.
Until and unless that happens, we just use model_selector name.
Parameters
----------
model_selector
Returns
-------
block_name : str
canonical block name
"""
return model_selector
def buffers_for_shadow_pricing(shadow_pricing_info):
"""
Allocate shared_data buffers for multiprocess shadow pricing
Allocates one buffer per model_selector.
Buffer datatype and shape specified by shadow_pricing_info
buffers are multiprocessing.Array (RawArray protected by a multiprocessing.Lock wrapper)
We don't actually use the wrapped version as it slows access down and doesn't provide
protection for numpy-wrapped arrays, but it does provide a convenient way to bundle
RawArray and an associated lock. (ShadowPriceCalculator uses the lock to coordinate access to
the numpy-wrapped RawArray.)
Parameters
----------
shadow_pricing_info : dict
Returns
-------
data_buffers : dict {<model_selector> : <shared_data_buffer>}
dict of multiprocessing.Array keyed by model_selector
"""
dtype = shadow_pricing_info['dtype']
block_shapes = shadow_pricing_info['block_shapes']
data_buffers = {}
for block_key, block_shape in block_shapes.items():
# buffer_size must be int, not np.int64
buffer_size = util.iprod(block_shape)
csz = buffer_size * np.dtype(dtype).itemsize
logger.info("allocating shared shadow pricing buffer %s %s buffer_size %s bytes %s (%s)" %
(block_key, buffer_size, block_shape, csz, util.GB(csz)))
if np.issubdtype(dtype, np.int64):
typecode = ctypes.c_int64
else:
raise RuntimeError("buffer_for_shadow_pricing unrecognized dtype %s" % dtype)
shared_data_buffer = multiprocessing.Array(typecode, buffer_size)
logger.info("buffer_for_shadow_pricing added block %s" % block_key)
data_buffers[block_key] = shared_data_buffer
return data_buffers
def shadow_price_data_from_buffers(data_buffers, shadow_pricing_info, model_selector):
"""
Parameters
----------
data_buffers : dict of {<model_selector> : <multiprocessing.Array>}
multiprocessing.Array is simply a convenient way to bundle Array and Lock
we extract the lock and wrap the RawArray in a numpy array for convenience in indexing
The shared data buffer has shape (<num_zones, <num_segments> + 1)
extra column is for reverse semaphores with TALLY_CHECKIN and TALLY_CHECKOUT
shadow_pricing_info : dict
dict of useful info
dtype: sp_dtype,
block_shapes : OrderedDict({<model_selector>: <shape tuple>})
dict mapping model_selector to block shape (including extra column for semaphores)
e.g. {'school': (num_zones, num_segments + 1)
model_selector : str
location type model_selector (e.g. school or workplace)
Returns
-------
shared_data, shared_data_lock
shared_data : multiprocessing.Array or None (if single process)
shared_data_lock : numpy array wrapping multiprocessing.RawArray or None (if single process)
"""
assert type(data_buffers) == dict
dtype = shadow_pricing_info['dtype']
block_shapes = shadow_pricing_info['block_shapes']
if model_selector not in block_shapes:
raise RuntimeError("Model selector %s not in shadow_pricing_info" % model_selector)
if block_name(model_selector) not in data_buffers:
raise RuntimeError("Block %s not in data_buffers" % block_name(model_selector))
shape = block_shapes[model_selector]
data = data_buffers[block_name(model_selector)]
return np.frombuffer(data.get_obj(), dtype=dtype).reshape(shape), data.get_lock()
def load_shadow_price_calculator(model_settings):
"""
Initialize ShadowPriceCalculator for model_selector (e.g. school or workplace)
If multiprocessing, get the shared_data buffer to coordinate global_desired_size
calculation across sub-processes
Parameters
----------
model_settings : dict
Returns
-------
spc : ShadowPriceCalculator
"""
num_processes = inject.get_injectable('num_processes', 1)
model_selector = model_settings['MODEL_SELECTOR']
# - get shared_data from data_buffers (if multiprocessing)
data_buffers = inject.get_injectable('data_buffers', None)
if data_buffers is not None:
logger.info('Using existing data_buffers for shadow_price')
# - shadow_pricing_info
shadow_pricing_info = inject.get_injectable('shadow_pricing_info', None)
assert shadow_pricing_info is not None
# - extract data buffer and reshape as numpy array
data, lock = \
shadow_price_data_from_buffers(data_buffers, shadow_pricing_info, model_selector)
else:
assert num_processes == 1
data = None # ShadowPriceCalculator will allocate its own data
lock = None
# - ShadowPriceCalculator
spc = ShadowPriceCalculator(
model_settings,
num_processes, data, lock)
return spc
@inject.step()
def add_size_tables():
"""
inject tour_destination_size_terms tables for each model_selector (e.g. school, workplace)
Size tables are pandas dataframes with locations counts for model_selector by zone and segment
tour_destination_size_terms
if using shadow pricing, we scale size_table counts to sample population
(in which case, they have to be created while single-process)
Scaling is problematic as it breaks household result replicability across sample sizes
It also changes the magnitude of the size terms so if they are used as utilities in
expression files, their importance will diminish relative to other utilities as the sample
size decreases.
Scaling makes most sense for a full sample in conjunction with shadow pricing, where
shadow prices can be adjusted iteratively to bring modelled counts into line with desired
(size table) counts.
"""
use_shadow_pricing = bool(config.setting('use_shadow_pricing'))
shadow_settings = config.read_model_settings('shadow_pricing.yaml')
shadow_pricing_models = shadow_settings.get('shadow_pricing_models')
if shadow_pricing_models is None:
logger.warning('shadow_pricing_models list not found in shadow_pricing settings')
return
# probably ought not scale if not shadow_pricing (breaks partial sample replicability)
# but this allows compatability with existing CTRAMP behavior...
scale_size_table = shadow_settings.get('SCALE_SIZE_TABLE', False)
# shadow_pricing_models is dict of {<model_selector>: <model_name>}
# since these are scaled to model size, they have to be created while single-process
for model_selector, model_name in shadow_pricing_models.items():
model_settings = config.read_model_settings(model_name)
assert model_selector == model_settings['MODEL_SELECTOR']
assert 'SEGMENT_IDS' in model_settings, f"missing SEGMENT_IDS setting in {model_name} model_settings"
segment_ids = model_settings['SEGMENT_IDS']
chooser_table_name = model_settings['CHOOSER_TABLE_NAME']
chooser_segment_column = model_settings['CHOOSER_SEGMENT_COLUMN_NAME']
choosers_df = inject.get_table(chooser_table_name).to_frame()
if 'CHOOSER_FILTER_COLUMN_NAME' in model_settings:
choosers_df = \
choosers_df[choosers_df[model_settings['CHOOSER_FILTER_COLUMN_NAME']] != 0]
# - raw_desired_size
land_use = inject.get_table('land_use')
size_terms = inject.get_injectable('size_terms')
raw_size = tour_destination_size_terms(land_use, size_terms, model_selector)
assert set(raw_size.columns) == set(segment_ids.keys())
if use_shadow_pricing or scale_size_table:
# - scale size_table counts to sample population
# scaled_size = zone_size * (total_segment_modeled / total_segment_desired)
# segment scale factor (modeled / desired) keyed by segment_name
segment_scale_factors = {}
for c in raw_size:
# number of zone demographics desired destination choices
segment_desired_size = raw_size[c].astype(np.float64).sum()
# number of synthetic population choosers in segment
segment_chooser_count = \
(choosers_df[chooser_segment_column] == segment_ids[c]).sum()
segment_scale_factors[c] = \
segment_chooser_count / np.maximum(segment_desired_size, 1)
logger.info("add_desired_size_tables %s segment %s "
"desired %s modeled %s scale_factor %s" %
(chooser_table_name, c,
segment_desired_size,
segment_chooser_count,
segment_scale_factors[c]))
# FIXME - should we be rounding?
scaled_size = (raw_size * segment_scale_factors).round()
else:
scaled_size = raw_size
logger.debug(f"add_size_table {size_table_name(model_selector)} ({scaled_size.shape}) for {model_selector}")
assert scaled_size.index.is_monotonic_increasing, \
f"size table {size_table_name(model_selector)} not is_monotonic_increasing"
inject.add_table(size_table_name(model_selector), scaled_size)
def get_shadow_pricing_info():
"""
return dict with info about dtype and shapes of desired and modeled size tables
block shape is (num_zones, num_segments + 1)
Returns
-------
shadow_pricing_info: dict
dtype: <sp_dtype>,
block_shapes: dict {<model_selector>: <block_shape>}
"""
land_use = inject.get_table('land_use')
size_terms = inject.get_injectable('size_terms')
shadow_settings = config.read_model_settings('shadow_pricing.yaml')
# shadow_pricing_models is dict of {<model_selector>: <model_name>}
shadow_pricing_models = shadow_settings.get('shadow_pricing_models', {})
blocks = OrderedDict()
for model_selector in shadow_pricing_models:
sp_rows = len(land_use)
sp_cols = len(size_terms[size_terms.model_selector == model_selector])
# extra tally column for TALLY_CHECKIN and TALLY_CHECKOUT semaphores
blocks[block_name(model_selector)] = (sp_rows, sp_cols + 1)
sp_dtype = np.int64
shadow_pricing_info = {
'dtype': sp_dtype,
'block_shapes': blocks,
}
for k in shadow_pricing_info:
logger.debug("shadow_pricing_info %s: %s" % (k, shadow_pricing_info.get(k)))
return shadow_pricing_info
@inject.injectable(cache=True)
def shadow_pricing_info():
# when multiprocessing with shared data mp_tasks has to call network_los methods
# get_shadow_pricing_info() and buffers_for_shadow_pricing()
logger.debug("loading shadow_pricing_info injectable")
return get_shadow_pricing_info()
|
|
from cStringIO import StringIO
import contextlib
import logging
from teuthology import misc as teuthology
from teuthology import contextutil
from ..orchestra import run
from ..exceptions import UnsupportedPackageTypeError
log = logging.getLogger(__name__)
HADOOP_2x_URL = "http://apache.osuosl.org/hadoop/common/hadoop-2.5.2/hadoop-2.5.2.tar.gz"
def dict_to_hadoop_conf(items):
out = "<configuration>\n"
for key, value in items.iteritems():
out += " <property>\n"
out += " <name>" + key + "</name>\n"
out += " <value>" + value + "</value>\n"
out += " </property>\n"
out += "</configuration>\n"
return out
def get_slaves_data(ctx):
tempdir = teuthology.get_testdir(ctx)
path = "{tdir}/hadoop/etc/hadoop/slaves".format(tdir=tempdir)
nodes = ctx.cluster.only(teuthology.is_type('hadoop.slave'))
hosts = [s.ssh.get_transport().getpeername()[0] for s in nodes.remotes]
data = '\n'.join(hosts)
return path, data
def get_masters_data(ctx):
tempdir = teuthology.get_testdir(ctx)
path = "{tdir}/hadoop/etc/hadoop/masters".format(tdir=tempdir)
nodes = ctx.cluster.only(teuthology.is_type('hadoop.master'))
hosts = [s.ssh.get_transport().getpeername()[0] for s in nodes.remotes]
data = '\n'.join(hosts)
return path, data
def get_core_site_data(ctx, config):
tempdir = teuthology.get_testdir(ctx)
path = "{tdir}/hadoop/etc/hadoop/core-site.xml".format(tdir=tempdir)
nodes = ctx.cluster.only(teuthology.is_type('hadoop.master'))
host = [s.ssh.get_transport().getpeername()[0] for s in nodes.remotes][0]
conf = {}
if config.get('hdfs', False):
conf.update({
'fs.defaultFS': 'hdfs://{namenode}:9000',
'hadoop.tmp.dir': '{tdir}/hadoop_tmp',
})
else:
conf.update({
'fs.default.name': 'ceph://{namenode}:6789/',
'fs.defaultFS': 'ceph://{namenode}:6789/',
'ceph.conf.file': '/etc/ceph/ceph.conf',
'ceph.mon.address': '{namenode}:6789',
'ceph.auth.id': 'admin',
#'ceph.data.pools': 'cephfs_data',
'fs.AbstractFileSystem.ceph.impl': 'org.apache.hadoop.fs.ceph.CephFs',
'fs.ceph.impl': 'org.apache.hadoop.fs.ceph.CephFileSystem',
})
data_tmpl = dict_to_hadoop_conf(conf)
return path, data_tmpl.format(tdir=tempdir, namenode=host)
def get_mapred_site_data(ctx):
data_tmpl = """
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>{namenode}:9001</value>
</property>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
"""
tempdir = teuthology.get_testdir(ctx)
path = "{tdir}/hadoop/etc/hadoop/mapred-site.xml".format(tdir=tempdir)
nodes = ctx.cluster.only(teuthology.is_type('hadoop.master'))
hosts = [s.ssh.get_transport().getpeername()[0] for s in nodes.remotes]
assert len(hosts) == 1
host = hosts[0]
return path, data_tmpl.format(namenode=host)
def get_yarn_site_data(ctx):
conf = {}
conf.update({
'yarn.resourcemanager.resourcetracker.address': '{namenode}:8025',
'yarn.resourcemanager.scheduler.address': '{namenode}:8030',
'yarn.resourcemanager.address': '{namenode}:8050',
'yarn.resourcemanager.admin.address': '{namenode}:8041',
'yarn.resourcemanager.hostname': '{namenode}',
'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
'yarn.nodemanager.sleep-delay-before-sigkill.ms': '10000',
})
data_tmpl = dict_to_hadoop_conf(conf)
tempdir = teuthology.get_testdir(ctx)
path = "{tdir}/hadoop/etc/hadoop/yarn-site.xml".format(tdir=tempdir)
nodes = ctx.cluster.only(teuthology.is_type('hadoop.master'))
hosts = [s.ssh.get_transport().getpeername()[0] for s in nodes.remotes]
assert len(hosts) == 1
host = hosts[0]
return path, data_tmpl.format(namenode=host)
def get_hdfs_site_data(ctx):
data = """
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
"""
tempdir = teuthology.get_testdir(ctx)
path = "{tdir}/hadoop/etc/hadoop/hdfs-site.xml".format(tdir=tempdir)
return path, data
def configure(ctx, config, hadoops):
tempdir = teuthology.get_testdir(ctx)
log.info("Writing Hadoop slaves file...")
for remote in hadoops.remotes:
path, data = get_slaves_data(ctx)
teuthology.write_file(remote, path, StringIO(data))
log.info("Writing Hadoop masters file...")
for remote in hadoops.remotes:
path, data = get_masters_data(ctx)
teuthology.write_file(remote, path, StringIO(data))
log.info("Writing Hadoop core-site.xml file...")
for remote in hadoops.remotes:
path, data = get_core_site_data(ctx, config)
teuthology.write_file(remote, path, StringIO(data))
log.info("Writing Hadoop yarn-site.xml file...")
for remote in hadoops.remotes:
path, data = get_yarn_site_data(ctx)
teuthology.write_file(remote, path, StringIO(data))
log.info("Writing Hadoop hdfs-site.xml file...")
for remote in hadoops.remotes:
path, data = get_hdfs_site_data(ctx)
teuthology.write_file(remote, path, StringIO(data))
log.info("Writing Hadoop mapred-site.xml file...")
for remote in hadoops.remotes:
path, data = get_mapred_site_data(ctx)
teuthology.write_file(remote, path, StringIO(data))
log.info("Setting JAVA_HOME in hadoop-env.sh...")
for remote in hadoops.remotes:
path = "{tdir}/hadoop/etc/hadoop/hadoop-env.sh".format(tdir=tempdir)
if remote.os.package_type == 'rpm':
data = "JAVA_HOME=/usr/lib/jvm/java\n"
elif remote.os.package_type == 'deb':
data = "JAVA_HOME=/usr/lib/jvm/default-java\n"
else:
raise UnsupportedPackageTypeError(remote)
teuthology.prepend_lines_to_file(remote, path, data)
if config.get('hdfs', False):
log.info("Formatting HDFS...")
testdir = teuthology.get_testdir(ctx)
hadoop_dir = "{tdir}/hadoop/".format(tdir=testdir)
masters = ctx.cluster.only(teuthology.is_type('hadoop.master'))
assert len(masters.remotes) == 1
master = masters.remotes.keys()[0]
master.run(
args = [
hadoop_dir + "bin/hadoop",
"namenode",
"-format"
],
wait = True,
)
@contextlib.contextmanager
def install_hadoop(ctx, config):
testdir = teuthology.get_testdir(ctx)
log.info("Downloading Hadoop...")
hadoop_tarball = "{tdir}/hadoop.tar.gz".format(tdir=testdir)
hadoops = ctx.cluster.only(teuthology.is_type('hadoop'))
run.wait(
hadoops.run(
args = [
'wget',
'-nv',
'-O',
hadoop_tarball,
HADOOP_2x_URL
],
wait = False,
)
)
log.info("Create directory for Hadoop install...")
hadoop_dir = "{tdir}/hadoop".format(tdir=testdir)
run.wait(
hadoops.run(
args = [
'mkdir',
hadoop_dir
],
wait = False,
)
)
log.info("Unpacking Hadoop...")
run.wait(
hadoops.run(
args = [
'tar',
'xzf',
hadoop_tarball,
'--strip-components=1',
'-C',
hadoop_dir
],
wait = False,
)
)
log.info("Removing Hadoop download...")
run.wait(
hadoops.run(
args = [
'rm',
hadoop_tarball
],
wait = False,
)
)
log.info("Create Hadoop temporary directory...")
hadoop_tmp_dir = "{tdir}/hadoop_tmp".format(tdir=testdir)
run.wait(
hadoops.run(
args = [
'mkdir',
hadoop_tmp_dir
],
wait = False,
)
)
if not config.get('hdfs', False):
log.info("Fetching cephfs-hadoop...")
sha1, url = teuthology.get_ceph_binary_url(
package = "hadoop",
format = "jar",
dist = "precise",
arch = "x86_64",
flavor = "basic",
branch = "master")
run.wait(
hadoops.run(
args = [
'wget',
'-nv',
'-O',
"{tdir}/cephfs-hadoop.jar".format(tdir=testdir), # FIXME
url + "/cephfs-hadoop-0.80.6.jar", # FIXME
],
wait = False,
)
)
run.wait(
hadoops.run(
args = [
'mv',
"{tdir}/cephfs-hadoop.jar".format(tdir=testdir),
"{tdir}/hadoop/share/hadoop/common/".format(tdir=testdir),
],
wait = False,
)
)
# Copy JNI native bits. Need to do this explicitly because the
# handling is dependent on the os-type.
for remote in hadoops.remotes:
libcephfs_jni_path = None
if remote.os.package_type == 'rpm':
libcephfs_jni_path = "/usr/lib64/libcephfs_jni.so.1.0.0"
elif remote.os.package_type == 'deb':
libcephfs_jni_path = "/usr/lib/jni/libcephfs_jni.so"
else:
raise UnsupportedPackageTypeError(remote)
libcephfs_jni_fname = "libcephfs_jni.so"
remote.run(
args = [
'cp',
libcephfs_jni_path,
"{tdir}/hadoop/lib/native/{fname}".format(tdir=testdir,
fname=libcephfs_jni_fname),
])
run.wait(
hadoops.run(
args = [
'cp',
"/usr/share/java/libcephfs.jar",
"{tdir}/hadoop/share/hadoop/common/".format(tdir=testdir),
],
wait = False,
)
)
configure(ctx, config, hadoops)
try:
yield
finally:
run.wait(
hadoops.run(
args = [
'rm',
'-rf',
hadoop_dir,
hadoop_tmp_dir
],
wait = False,
)
)
@contextlib.contextmanager
def start_hadoop(ctx, config):
testdir = teuthology.get_testdir(ctx)
hadoop_dir = "{tdir}/hadoop/".format(tdir=testdir)
masters = ctx.cluster.only(teuthology.is_type('hadoop.master'))
assert len(masters.remotes) == 1
master = masters.remotes.keys()[0]
log.info("Stopping Hadoop daemons")
master.run(
args = [
hadoop_dir + "sbin/stop-yarn.sh"
],
wait = True,
)
master.run(
args = [
hadoop_dir + "sbin/stop-dfs.sh"
],
wait = True,
)
if config.get('hdfs', False):
log.info("Starting HDFS...")
master.run(
args = [
hadoop_dir + "sbin/start-dfs.sh"
],
wait = True,
)
log.info("Starting YARN...")
master.run(
args = [
hadoop_dir + "sbin/start-yarn.sh"
],
wait = True,
)
try:
yield
finally:
log.info("Stopping Hadoop daemons")
master.run(
args = [
hadoop_dir + "sbin/stop-yarn.sh"
],
wait = True,
)
master.run(
args = [
hadoop_dir + "sbin/stop-dfs.sh"
],
wait = True,
)
run.wait(
ctx.cluster.run(
args = [
'sudo',
'skill',
'-9',
'java'
],
wait = False
)
)
@contextlib.contextmanager
def task(ctx, config):
if config is None:
config = {}
assert isinstance(config, dict), "task hadoop config must be dictionary"
overrides = ctx.config.get('overrides', {})
teuthology.deep_merge(config, overrides.get('hadoop', {}))
tasks = [
lambda: install_hadoop(ctx=ctx, config=config),
lambda: start_hadoop(ctx=ctx, config=config),
]
with contextutil.nested(*tasks):
yield
|
|
"""
A buffered iterator for big arrays.
This module solves the problem of iterating over a big file-based array
without having to read it into memory. The `Arrayterator` class wraps
an array object, and when iterated it will return sub-arrays with at most
a user-specified number of elements.
"""
from __future__ import division, absolute_import, print_function
from operator import mul
from functools import reduce
from numpy.compat import long
__all__ = ['Arrayterator']
class Arrayterator(object):
"""
Buffered iterator for big arrays.
`Arrayterator` creates a buffered iterator for reading big arrays in small
contiguous blocks. The class is useful for objects stored in the
file system. It allows iteration over the object *without* reading
everything in memory; instead, small blocks are read and iterated over.
`Arrayterator` can be used with any object that supports multidimensional
slices. This includes NumPy arrays, but also variables from
Scientific.IO.NetCDF or pynetcdf for example.
Parameters
----------
var : array_like
The object to iterate over.
buf_size : int, optional
The buffer size. If `buf_size` is supplied, the maximum amount of
data that will be read into memory is `buf_size` elements.
Default is None, which will read as many element as possible
into memory.
Attributes
----------
var
buf_size
start
stop
step
shape
flat
See Also
--------
ndenumerate : Multidimensional array iterator.
flatiter : Flat array iterator.
memmap : Create a memory-map to an array stored in a binary file on disk.
Notes
-----
The algorithm works by first finding a "running dimension", along which
the blocks will be extracted. Given an array of dimensions
``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the
first dimension will be used. If, on the other hand,
``d1 < buf_size < d1*d2`` the second dimension will be used, and so on.
Blocks are extracted along this dimension, and when the last block is
returned the process continues from the next dimension, until all
elements have been read.
Examples
--------
>>> import numpy as np
>>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
>>> a_itor = np.lib.arrayterator.Arrayterator(a, 2)
>>> a_itor.shape
(3, 4, 5, 6)
Now we can iterate over ``a_itor``, and it will return arrays of size
two. Since `buf_size` was smaller than any dimension, the first
dimension will be iterated over first:
>>> for subarr in a_itor:
... if not subarr.all():
... print subarr, subarr.shape
...
[[[[0 1]]]] (1, 1, 1, 2)
"""
def __init__(self, var, buf_size=None):
self.var = var
self.buf_size = buf_size
self.start = [0 for dim in var.shape]
self.stop = [dim for dim in var.shape]
self.step = [1 for dim in var.shape]
def __getattr__(self, attr):
return getattr(self.var, attr)
def __getitem__(self, index):
"""
Return a new arrayterator.
"""
# Fix index, handling ellipsis and incomplete slices.
if not isinstance(index, tuple):
index = (index,)
fixed = []
length, dims = len(index), len(self.shape)
for slice_ in index:
if slice_ is Ellipsis:
fixed.extend([slice(None)] * (dims-length+1))
length = len(fixed)
elif isinstance(slice_, (int, long)):
fixed.append(slice(slice_, slice_+1, 1))
else:
fixed.append(slice_)
index = tuple(fixed)
if len(index) < dims:
index += (slice(None),) * (dims-len(index))
# Return a new arrayterator object.
out = self.__class__(self.var, self.buf_size)
for i, (start, stop, step, slice_) in enumerate(
zip(self.start, self.stop, self.step, index)):
out.start[i] = start + (slice_.start or 0)
out.step[i] = step * (slice_.step or 1)
out.stop[i] = start + (slice_.stop or stop-start)
out.stop[i] = min(stop, out.stop[i])
return out
def __array__(self):
"""
Return corresponding data.
"""
slice_ = tuple(slice(*t) for t in zip(
self.start, self.stop, self.step))
return self.var[slice_]
@property
def flat(self):
"""
A 1-D flat iterator for Arrayterator objects.
This iterator returns elements of the array to be iterated over in
`Arrayterator` one by one. It is similar to `flatiter`.
See Also
--------
`Arrayterator`
flatiter
Examples
--------
>>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
>>> a_itor = np.lib.arrayterator.Arrayterator(a, 2)
>>> for subarr in a_itor.flat:
... if not subarr:
... print subarr, type(subarr)
...
0 <type 'numpy.int32'>
"""
for block in self:
for value in block.flat:
yield value
@property
def shape(self):
"""
The shape of the array to be iterated over.
For an example, see `Arrayterator`.
"""
return tuple(((stop-start-1)//step+1) for start, stop, step in
zip(self.start, self.stop, self.step))
def __iter__(self):
# Skip arrays with degenerate dimensions
if [dim for dim in self.shape if dim <= 0]:
return
start = self.start[:]
stop = self.stop[:]
step = self.step[:]
ndims = len(self.var.shape)
while True:
count = self.buf_size or reduce(mul, self.shape)
# iterate over each dimension, looking for the
# running dimension (ie, the dimension along which
# the blocks will be built from)
rundim = 0
for i in range(ndims-1, -1, -1):
# if count is zero we ran out of elements to read
# along higher dimensions, so we read only a single position
if count == 0:
stop[i] = start[i]+1
elif count <= self.shape[i]:
# limit along this dimension
stop[i] = start[i] + count*step[i]
rundim = i
else:
# read everything along this dimension
stop[i] = self.stop[i]
stop[i] = min(self.stop[i], stop[i])
count = count//self.shape[i]
# yield a block
slice_ = tuple(slice(*t) for t in zip(start, stop, step))
yield self.var[slice_]
# Update start position, taking care of overflow to
# other dimensions
start[rundim] = stop[rundim] # start where we stopped
for i in range(ndims-1, 0, -1):
if start[i] >= self.stop[i]:
start[i] = self.start[i]
start[i-1] += self.step[i-1]
if start[0] >= self.stop[0]:
return
|
|
#!/usr/bin/python
import sys
import pygame
from math import ceil
from core import *
from walkers import *
WALKERS = {
'A*': AStarWalker,
'Dijkstra': DijkstraWalker,
'BFS': BFSWalker
}
BRUSHES = ['Wall', 'Weight-1', 'Weight-2', 'Weight-3']
class Ring(object):
"""
A very simle implementation of "right", i.e. a cyclic
linked list with one "active" item and ability to
switch it to next or previous item.
"""
def __init__(self, collection):
"""Build a ring upon given collection"""
self._idx = 0
self._items = collection
def get(self):
"""get active item"""
return self._items[self._idx]
def set_active(self, val):
self._idx = self._items.index(val)
def items(self):
for i in self._items:
yield i
def next(self):
self._idx += 1
if self._idx >= len(self._items):
self._idx = -len(self._items)
def prev(self):
self._idx -= 1
if self._idx < -len(self._items):
self._idx = len(self._items) - 1
class Menu(object):
"""
Simple pygame configuration menu.
"""
def __init__(self, surf, menu_dict):
"""
Make a Menu() occupying the whole surface "surf".
"menu_dict" is a dictionary describing configuration
menu options and their possible values.
Dictionary format:
{
"Option-1": ["value1", ..., "valueN"],
...,
"Option-N": [ ... ]
}
"""
self._surf = surf
self._font = pygame.font.SysFont(DEFAULT_FONT, MENU_FONT_SIZE,
bold=True)
self._mdict = {}
for k, v in menu_dict.iteritems():
self._mdict[k] = Ring(v)
# Menu options should be sorted alphabetically
moptions = self._mdict.keys()
moptions.sort()
self._menu = Ring(moptions)
# self._active denotes whether user switched
# focus to the menu
self._active = False
# Ensure that we have enough space on the surface
# to display the longest possible variant of the menu
self._check_size()
def selected_items(self):
"""Get currently selected values of all options."""
return {opt: self._mdict[opt].get() for opt in self._menu.items()}
def select(self, option, value):
"""Select given value of the option (make it active)"""
self._mdict[option].set_active(value)
def draw(self):
"""Display the menu"""
self._surf.fill(pygame.Color(MENU_BG_COLOR))
pygame.draw.rect(self._surf, pygame.Color(MENU_FG_COLOR),
(0, 0, self._surf.get_width(),
self._surf.get_height()), 1)
buttons = []
buttons_width = 0
for opt in self._menu.items():
button = {}
mval = self._mdict[opt].get()
button['text'] = "%s: %s" % (opt, mval)
button['fg'] = MENU_FG_COLOR
button['bg'] = MENU_BG_COLOR
# If focus is on the given option,
# draw it with different colors.
if self._is_selected(opt):
button['fg'] = MENU_SEL_FG_COLOR
button['bg'] = MENU_SEL_BG_COLOR
buttons_width += self._font.size(button['text'])[0]
buttons.append(button)
# fspace - a width of space not occupied by buttons text
fspace = (self._surf.get_width() - buttons_width) / len(buttons)
offset = 1
for i in xrange(0, len(buttons)):
button = buttons[i]
is_last = (i == len(buttons) - 1)
img = self._font.render(button['text'], True,
pygame.Color(button['fg']),
pygame.Color(button['bg']))
if is_last:
fspace = self._surf.get_width() - (offset + img.get_width())
width = img.get_width() + fspace
# Prepare a rectangle for button
pygame.draw.rect(self._surf, pygame.Color(button['bg']),
(offset, 1, width, self._surf.get_height() - 2))
# Center text in the rectangle
self._surf.blit(img, (offset + fspace / 2,
(MENU_HEIGHT - MENU_FONT_SIZE) / 2))
offset += width
# Draw a line to visually separate one button from another
pygame.draw.line(self._surf,
pygame.Color(MENU_FG_COLOR), (offset - 1, 0),
(offset - 1, self._surf.get_height()), 1)
def kbd_event(self, event):
"""Handle keyboard events"""
# When user presses ESC, spdemo sets
# focus to the menu. Further pressing
# of ESC switches focus back.
if event.key == pygame.K_ESCAPE:
self._active = not self._active
if not self._active:
return
if event.key == pygame.K_RIGHT:
self._menu.next()
elif event.key == pygame.K_LEFT:
self._menu.prev()
elif event.key == pygame.K_UP:
self._mdict[self._menu.get()].next()
elif event.key == pygame.K_DOWN:
self._mdict[self._menu.get()].prev()
def is_active(self):
return self._active
def _is_selected(self, mkey):
if not self._active:
return False
return (mkey == self._menu.get())
def _check_size(self):
total_width = 0
max_height = 0
for mkey in self._menu.items():
maxval = max(self._mdict[mkey].items(), key=len)
w, h = self._font.size("%s: %s" % (mkey, maxval))
total_width += w + 4
if h > max_height:
max_heigh = h
if (total_width > self._surf.get_width()
or max_heigh > self._surf.get_height()):
raise ValueError(("Surface is too small to fit the menu. "
"Min size: %dx%d" %
(ceil(float(total_width)/DEFAULT_SQ_SIZE),
ceil(float(max_heigh)/DEFAULT_SQ_SIZE))))
class Point(object):
"""
A source or destination point of the SPdemoGrid.
"""
def __init__(self, row, col):
self.row = row
self.col = col
def __eq__(self, other):
return (self.row == other.row and
self.col == other.col)
class SPDemoGrid(object):
"""
SPDemoGrid() does all the visualization and grid
related event handling.
"""
def __init__(self, rows, cols, surface):
"""
Initialise the grid of size "rows" x "cols"
on the given surface.
"""
self._rows = rows
self._cols = cols
if (surface.get_width() < (cols * DEFAULT_SQ_SIZE) or
surface.get_height() < (rows * DEFAULT_SQ_SIZE)):
raise ValueError("Surface is too small")
self._surf = surface
# Source and destination points
self._srcp = Point(0, 0)
self._dstp = Point(self._rows - 1, self._cols - 1)
# The underneath graph
self._graph = GridGraph(self._rows, self._cols)
self._font = pygame.font.SysFont(DEFAULT_FONT, MENU_FONT_SIZE)
# A name of current shortest path algorithm
# (or "walker") the grid uses
self._walker_class = DEFAULT_WALKER
# current brush (either wall or one of predefined weights)
self._brush = DEFAULT_BRUSH
# denotes whether diagonal movements are possible
self._use_diags = DEFAULT_USE_DIAGS
self._spoint = None
self._walker = None
self._grid_changed = True
# a list of points forming shortest path
self._path = None
# denotes whether visualization is started
self._started = False
# if True, user can draw walls or set weights
# on the grid
self._brush_enabled = False
def set_walker(self, wname):
assert wname in WALKERS.keys()
self._walker_class = wname
def set_brush(self, bname):
assert bname in BRUSHES
self._brush = bname
def set_diagonals(self, dval):
assert dval in ['On', 'Off']
self._use_diags = (dval == 'On')
def draw(self):
if self._started:
if not self._walker.finished():
self._walker.step()
self._grid_changed = True
else:
self._path = self._walker.get_path()
self._draw_grid()
self._draw_path()
self._draw_points()
def kbd_event(self, event):
"""
Handle grid related keyboard events
"""
if event.key == pygame.K_SPACE:
# Pause/Resume the visualization
self._started = not self._started
if self._started and self._walker is None:
# Setup the walker if it hasn't been set up yet
src_cell = self._graph.get_cell(self._srcp.row, self._srcp.col)
dst_cell = self._graph.get_cell(self._dstp.row, self._dstp.col)
wclass = self._walker_class
self._walker = WALKERS[wclass](self._graph, src_cell,
dst_cell, self._use_diags)
elif event.key == pygame.K_c:
# Just clean everything from the grid
self.clear()
def mouse_event(self, event):
"""
Handle grid related mouse event
"""
if self._started or self._path is not None:
# Ignore events if visualization is in progress
# or has been finished.
return
if event.type == pygame.MOUSEBUTTONDOWN:
# Select source or destination point to move
self._spoint = self._point_on_mouse(event.pos)
if self._spoint is None:
# Or enter to the drawing mode
self._brush_enabled = True
self._do_brush(event.pos, click=True)
elif event.type == pygame.MOUSEBUTTONUP:
self._spoint = None
self._brush_enabled = False
elif event.type == pygame.MOUSEMOTION:
if self._spoint is not None:
# Move the source/destination point to
# another place on the grid
row, col = self._pos_to_rowcol(event.pos)
cell = self._graph.get_cell(row, col)
if self._spoint is not None:
self._move_spoint_to_cell(cell)
elif self._brush_enabled:
# Or just draw walls/set weights to cells
self._do_brush(event.pos)
def clear(self, clear_walls=True):
"""
Clear the grid.
if "clear_walls" is False, everything will be
cleaned up except walls and weights.
"""
self._spoint = None
self._walker = None
self._path = None
self._started = False
self._brush_enabled = False
for cell in self._graph.cells():
cell.parent = None
if clear_walls:
cell.status = CellStatus.NotVisited
cell.weight = DEFAULT_CELL_WEIGHT
elif cell.status != CellStatus.Blocked:
cell.status = CellStatus.NotVisited
self._grid_changed = True
def _draw_grid(self):
if not self._grid_changed:
return
self._surf.fill(pygame.Color(GRID_BG_COLOR))
for c in self._graph.cells():
self._draw_square(c)
self._grid_changed = False
def _draw_points(self):
def draw_point(point, color):
x, y = self._get_square_xy(point.row, point.col)
radius = DEFAULT_SQ_SIZE / 2
pygame.draw.circle(self._surf, pygame.Color(color),
(x + radius, y + radius),
radius - 2)
draw_point(self._srcp, SOURCE_POINT_COLOR)
draw_point(self._dstp, DESTINATION_POINT_COLOR)
def _draw_path(self):
if self._path is None:
return
if len(self._path) == 1:
# Shortest path does not exist, no luck...
font = pygame.font.SysFont(DEFAULT_FONT, REPORT_FONT_SIZE,
bold=True)
img = font.render('Path not found', True,
pygame.Color(REPORT_FAIL_FONT_COLOR),
pygame.Color(REPORT_BG_COLOR))
# center the text
self._surf.blit(img, ((self._surf.get_width() - img.get_width())/2,
(self._surf.get_height() - img.get_height())/2))
return
# Draw a line connecting source and destination points
# through the points included to "shortest path" array.
pointlist = []
total_weight = 0
for c in self._path:
left = c.col * DEFAULT_SQ_SIZE
top = c.row * DEFAULT_SQ_SIZE
total_weight += c.weight
self._surf.fill(pygame.Color(PATH_CELL_COLOR),
(left, top, DEFAULT_SQ_SIZE - 1,
DEFAULT_SQ_SIZE - 1))
pointlist.append((left + DEFAULT_SQ_SIZE / 2,
top + DEFAULT_SQ_SIZE / 2))
pygame.draw.lines(self._surf, pygame.Color(PATH_LINE_COLOR),
False, pointlist, 3)
# and write down some numbers
font = pygame.font.SysFont(DEFAULT_FONT, REPORT_FONT_SIZE,
bold=True)
msg = ("Shortest path length: %s, weight %s"
% (len(self._path), total_weight))
img = font.render(msg, True, pygame.Color(REPORT_SUCCESS_FONT_COLOR),
# unfortunaly font looks very ugly if it doesn't
# have background :(
pygame.Color(REPORT_BG_COLOR))
self._surf.blit(img, ((self._surf.get_width() - img.get_width())/2,
(self._surf.get_height() - img.get_height())/2))
self._path = None
def _do_brush(self, pos, click=False):
if self._point_on_mouse(pos) is not None:
return
rc = self._pos_to_rowcol(pos)
cell = self._graph.get_cell(rc[0], rc[1])
if self._brush == 'Wall':
if cell.status != CellStatus.Blocked:
cell.status = CellStatus.Blocked
elif click:
cell.status = CellStatus.NotVisited
cell.weight = DEFAULT_CELL_WEIGHT
else:
weight = int(self._brush[-1])
if cell.weight != weight:
cell.weight = weight
elif click:
cell.weight = DEFAULT_CELL_WEIGHT
cell.status = CellStatus.NotVisited
self._grid_changed = True
def _draw_square(self, cell):
color = self._cell_to_color(cell)
rect = pygame.Rect((cell.col * DEFAULT_SQ_SIZE,
cell.row * DEFAULT_SQ_SIZE,
DEFAULT_SQ_SIZE, DEFAULT_SQ_SIZE))
pygame.draw.rect(self._surf, pygame.Color(GRID_FG_COLOR), rect, 1)
self._surf.fill(pygame.Color(color), (rect.left, rect.top,
rect.width - 1, rect.height - 1))
if cell.weight != DEFAULT_CELL_WEIGHT:
img = self._font.render(str(cell.weight), True,
pygame.Color(CELL_WEIGHT_COLOR),
pygame.Color(color))
self._surf.blit(img, (rect.left +
(rect.width - img.get_width())/2,
rect.top + (rect.height - img.get_height())/2))
def _move_spoint_to_cell(self, cell):
assert self._spoint is not None
if (cell.status == CellStatus.Blocked or
Point(cell.row, cell.col) in (self._srcp, self._dstp)):
return
self._spoint.row = cell.row
self._spoint.col = cell.col
self._grid_changed = True
def _point_on_mouse(self, pos):
rc = self._pos_to_rowcol(pos)
point = Point(rc[0], rc[1])
if point == self._srcp:
return self._srcp
elif point == self._dstp:
return self._dstp
else:
return None
def _pos_to_rowcol(self, pos):
def divide_coord(coord, lim):
return min(coord / DEFAULT_SQ_SIZE, lim - 1)
return (divide_coord(pos[1], self._graph.get_rows()),
divide_coord(pos[0], self._graph.get_cols()))
def _get_square_xy(self, row, col):
return (col * DEFAULT_SQ_SIZE, row * DEFAULT_SQ_SIZE)
def _cell_to_color(self, cell):
status = cell.status
if status == CellStatus.Discovered:
return DISCOVERED_CELL_COLOR
elif status == CellStatus.Visited:
return VISITED_CELL_COLOR
elif status == CellStatus.Blocked:
return BLOCKED_CELL_COLOR
else:
if cell.weight != DEFAULT_CELL_WEIGHT:
return WEIGHTED_CELL_COLOR
return NOTVISITED_CELL_COLOR
class SPDemo(object):
def __init__(self, rows, cols):
if any([i <= 0 for i in (rows, cols)]):
raise ValueError("rows and cols must be positive")
pygame.init()
self._width = cols * DEFAULT_SQ_SIZE
self._height = rows * DEFAULT_SQ_SIZE
sysinfo = pygame.display.Info()
orig_rows = rows
orig_cols = cols
if self._width > sysinfo.current_w:
self._width = sysinfo.current_w
cols = self._width / DEFAULT_SQ_SIZE
if (self._height + MENU_HEIGHT) > sysinfo.current_h:
self._height = sysinfo.current_h - MENU_HEIGHT
rows = self._height / DEFAULT_SQ_SIZE
if orig_rows != rows or orig_cols != cols:
sys.stderr.write(("[WARNING] %sx%s doesn't fit your screen, "
"reducing to %sx%s\n" %
(orig_rows, orig_cols, rows, cols)))
self._surf = pygame.display.set_mode((self._width,
self._height + MENU_HEIGHT))
menu_surf = self._surf.subsurface((0, self._height,
self._width, MENU_HEIGHT))
menu_dict = {"Algorithm": WALKERS.keys(),
"Brush": BRUSHES,
"Diagonals": ["Off", "On"]}
self._menu = Menu(menu_surf, menu_dict)
self._menu.select('Algorithm', DEFAULT_WALKER)
self._menu.select('Brush', DEFAULT_BRUSH)
self._menu.select('Diagonals', 'On')
grid_surf = self._surf.subsurface((0, 0, self._width, self._height))
self._grid = SPDemoGrid(rows, cols, grid_surf)
def run(self):
clock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
elif event.type == pygame.KEYDOWN:
if self._menu.is_active() or event.key == pygame.K_ESCAPE:
if event.key == pygame.K_ESCAPE:
self._grid.clear(clear_walls=False)
self._menu.kbd_event(event)
if not self._menu.is_active():
cfg = self._menu.selected_items()
self._grid.set_walker(cfg['Algorithm'])
self._grid.set_brush(cfg['Brush'])
self._grid.set_diagonals(cfg['Diagonals'])
else:
self._grid.kbd_event(event)
elif (not self._menu.is_active() and
event.type in (pygame.MOUSEBUTTONDOWN,
pygame.MOUSEBUTTONUP,
pygame.MOUSEMOTION)):
self._grid.mouse_event(event)
self._grid.draw()
self._menu.draw()
clock.tick(DEFAULT_FPS)
pygame.display.flip()
def usage():
sys.stderr.write("USAGE: %s: ROWSxCOLUNMS\n" % sys.argv[0])
sys.exit(1)
def show_help():
# just too lazy to draw it in pygame...
print "============================="
print "Help"
print "============================="
print "Kyes:"
print " Space - start,resume/pause the visualization"
print " c - clean everything from the grid"
print " Esc - enter to the menu mode, clean everything"
print " from the grid except walls and weights"
print " Up/Down - (in menu mode) swtich the value of selected option"
print " Left/Right - (in menu mode) switch current menu option"
print ""
print "Mouse:"
print (" You can move source (%s) and destination (%s) points withing the grid"
% (SOURCE_POINT_COLOR, DESTINATION_POINT_COLOR))
print " using mouse. You can also draw walls and set weights to any"
print " non-busy cell on the grid. (note: default weight of \"white\""
print " cells is %d)" % DEFAULT_CELL_WEIGHT
print ""
print "Menu:"
print " -> Algorithm: select shortest path finding algorithm"
print " -> Brush: switch between drawing walls and setting weights facilities"
print " -> Diagonals: enable/disable diagonal moves"
def main():
if len(sys.argv) != 2:
usage()
try:
rows, cols = [int(i) for i in sys.argv[1].split('x')]
except ValueError:
usage()
show_help()
try:
spd = SPDemo(rows, cols)
spd.run()
except ValueError as err:
sys.stderr.write("Error: " + str(err) + "\n")
if __name__ == '__main__':
main()
|
|
"""Various high level TF models."""
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
import tensorflow as tf
from skflow.ops import mean_squared_error_regressor, softmax_classifier, dnn
def linear_regression(X, y):
"""Creates linear regression TensorFlow subgraph.
Args:
X: tensor or placeholder for input features.
y: tensor or placeholder for target.
Returns:
Predictions and loss tensors.
"""
with tf.variable_scope('linear_regression'):
tf.histogram_summary('linear_regression.X', X)
tf.histogram_summary('linear_regression.y', y)
y_shape = y.get_shape()
if len(y_shape) == 1:
output_shape = 1
else:
output_shape = y_shape[1]
weights = tf.get_variable('weights', [X.get_shape()[1], output_shape])
bias = tf.get_variable('bias', [output_shape])
tf.histogram_summary('linear_regression.weights', weights)
tf.histogram_summary('linear_regression.bias', bias)
return mean_squared_error_regressor(X, y, weights, bias)
def logistic_regression(X, y, class_weight=None):
"""Creates logistic regression TensorFlow subgraph.
Args:
X: tensor or placeholder for input features,
shape should be [batch_size, n_features].
y: tensor or placeholder for target,
shape should be [batch_size, n_classes].
class_weight: tensor, [n_classes], where for each class
it has weight of the class. If not provided
will check if graph contains tensor `class_weight:0`.
If that is not provided either all ones are used.
Returns:
Predictions and loss tensors.
"""
with tf.variable_scope('logistic_regression'):
tf.histogram_summary('logistic_regression.X', X)
tf.histogram_summary('logistic_regression.y', y)
weights = tf.get_variable('weights', [X.get_shape()[1],
y.get_shape()[-1]])
bias = tf.get_variable('bias', [y.get_shape()[-1]])
tf.histogram_summary('logistic_regression.weights', weights)
tf.histogram_summary('logistic_regression.bias', bias)
# If no class weight provided, try to retrieve one from pre-defined
# tensor name in the graph.
if not class_weight:
try:
class_weight = tf.get_default_graph().get_tensor_by_name('class_weight:0')
except KeyError:
pass
return softmax_classifier(X, y, weights, bias,
class_weight=class_weight)
def get_dnn_model(hidden_units, target_predictor_fn):
"""Returns a function that creates a DNN TensorFlow subgraph with given
params.
Args:
hidden_units: List of values of hidden units for layers.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes X, y and returns predictions and loss tensors.
Returns:
A function that creates the subgraph.
"""
def dnn_estimator(X, y):
"""DNN estimator with target predictor function on top."""
layers = dnn(X, hidden_units)
return target_predictor_fn(layers, y)
return dnn_estimator
## This will be in Tensorflow 0.7.
## TODO(ilblackdragon): Clean this up when it's released
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
lengths: A tensor of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
for input_ in input_seq:
input_.set_shape(input_.get_shape().with_rank(2))
# Join into (time, batch_size, depth)
s_joined = tf.pack(input_seq)
# Reverse along dimension 0
s_reversed = tf.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = tf.unpack(s_reversed)
return result
def bidirectional_rnn(cell_fw, cell_bw, inputs,
initial_state_fw=None, initial_state_bw=None,
dtype=None, sequence_length=None, scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
[batch_size x cell.state_size].
initial_state_bw: (optional) Same as for initial_state_fw.
dtype: (optional) The data type for the initial state. Required if either
of the initial states are not provided.
sequence_length: (optional) An int64 vector (tensor) of size [batch_size],
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to "BiRNN"
Returns:
A set of output `Tensors` where:
outputs is a length T list of outputs (one for each input), which
are depth-concatenated forward and backward outputs
Raises:
TypeError: If "cell_fw" or "cell_bw" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell_fw, tf.nn.rnn_cell.RNNCell):
raise TypeError("cell_fw must be an instance of RNNCell")
if not isinstance(cell_bw, tf.nn.rnn_cell.RNNCell):
raise TypeError("cell_bw must be an instance of RNNCell")
if not isinstance(inputs, list):
raise TypeError("inputs must be a list")
if not inputs:
raise ValueError("inputs must not be empty")
name = scope or "BiRNN"
# Forward direction
with tf.variable_scope(name + "_FW"):
output_fw, _ = tf.nn.rnn(cell_fw, inputs, initial_state_fw, dtype,
sequence_length)
# Backward direction
with tf.variable_scope(name + "_BW"):
tmp, _ = tf.nn.rnn(cell_bw, _reverse_seq(inputs, sequence_length),
initial_state_bw, dtype, sequence_length)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
outputs = [tf.concat(1, [fw, bw])
for fw, bw in zip(output_fw, output_bw)]
return outputs
# End of Tensorflow 0.7
def get_rnn_model(rnn_size, cell_type, num_layers, input_op_fn,
bidirectional, target_predictor_fn,
sequence_length, initial_state):
"""Returns a function that creates a RNN TensorFlow subgraph with given
params.
Args:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument X for input and returns transformed X.
bidirectional: boolean, Whether this is a bidirectional rnn.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes X, y and returns predictions and loss tensors.
sequence_length: If sequence_length is provided, dynamic calculation is performed.
This saves computational time when unrolling past max sequence length.
Required for bidirectional RNNs.
initial_state: An initial state for the RNN. This must be a tensor of appropriate type
and shape [batch_size x cell.state_size].
Returns:
A function that creates the subgraph.
"""
def rnn_estimator(X, y):
"""RNN estimator with target predictor function on top."""
X = input_op_fn(X)
if cell_type == 'rnn':
cell_fn = tf.nn.rnn_cell.BasicRNNCell
elif cell_type == 'gru':
cell_fn = tf.nn.rnn_cell.GRUCell
elif cell_type == 'lstm':
cell_fn = tf.nn.rnn_cell.BasicLSTMCell
else:
raise ValueError("cell_type {} is not supported. ".format(cell_type))
if bidirectional:
# forward direction cell
rnn_fw_cell = tf.nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
# backward direction cell
rnn_bw_cell = tf.nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
encoding = bidirectional_rnn(rnn_fw_cell, rnn_bw_cell, X,
dtype=tf.float32,
sequence_length=sequence_length,
initial_state_fw=initial_state,
initial_state_bw=initial_state)
else:
cell = tf.nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
_, encoding = tf.nn.rnn(cell, X, dtype=tf.float32,
sequence_length=sequence_length,
initial_state=initial_state)
return target_predictor_fn(encoding[-1], y)
return rnn_estimator
|
|
#------------------------------------------------------------------------------
# Copyright 2014 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
import os, sys, traceback, math, decimal
import arcpy
from arcpy import env
input_centers = arcpy.GetParameterAsText(0)
numberOfRings = int(arcpy.GetParameterAsText(1))
ringInterval = float(arcpy.GetParameterAsText(2))
outputRings = arcpy.GetParameterAsText(3)
distanceUnits = arcpy.GetParameterAsText(4)
numberOfRadials = int(arcpy.GetParameterAsText(5))
bearingUnits = arcpy.GetParameterAsText(6)
outputRadials = arcpy.GetParameterAsText(7)
buildRadials = True
delete_me = []
try:
currentOverwriteOutput = env.overwriteOutput
env.overwriteOutput = True
installInfo = arcpy.GetInstallInfo("desktop")
installDirectory = installInfo["InstallDir"]
GCS_WGS_1984 = os.path.join(installDirectory,r"Coordinate Systems", r"Geographic Coordinate Systems", r"World",r"WGS 1984.prj")
# Get SR of Input Centers
inputCentersSR = arcpy.Describe(input_centers).spatialReference
result = arcpy.GetCount_management(input_centers)
arcpy.AddMessage("Using " + str(result) + " centers ...")
# create temp table
tempTable = os.path.join(env.scratchWorkspace,"tempTable")
delete_me.append(tempTable)
arcpy.CreateTable_management(os.path.dirname(tempTable),os.path.basename(tempTable))
arcpy.AddField_management(tempTable,"POINT_X","DOUBLE")
arcpy.AddField_management(tempTable,"POINT_Y","DOUBLE")
arcpy.AddField_management(tempTable,"Range","DOUBLE")
arcpy.AddField_management(tempTable,"RingID","LONG")
arcpy.AddField_management(tempTable,"RingRadius","DOUBLE")
# if zero radials, don't build them
if numberOfRadials < 1: buildRadials = False
# Add XY
arcpy.AddXY_management(input_centers)
# build ring values
arcpy.AddMessage("Building ring table ...")
getRows = arcpy.SearchCursor(input_centers)
addRows = arcpy.InsertCursor(tempTable)
y = 1
for getRow in getRows:
pointX = getRow.POINT_X
pointY = getRow.POINT_Y
x = 1
while x <= numberOfRings:
addRow = addRows.newRow()
addRow.POINT_X = pointX
addRow.POINT_Y = pointY
rd = float(x) * ringInterval
# print "row: " + str(x) + " " + str(pointX) + " " + str(pointY) + " " + str(rd)
arcpy.AddMessage("row: " + str(x) + " " + str(pointX) + " " + str(pointY) + " " + str(rd))
addRow.Range = rd
addRow.RingRadius = rd * 2.0
addRow.RingID = y
addRows.insertRow(addRow)
x += 1
y += 1
del addRow
del addRows
del getRow
del getRows
results = arcpy.GetCount_management(tempTable)
# build ellipses
arcpy.AddMessage("Constructing " + str(results) + " ring features ...")
arcpy.TableToEllipse_management(tempTable,outputRings,"POINT_X","POINT_Y","RingRadius","RingRadius",distanceUnits,"#","#","#",inputCentersSR)
# Join fields
tempTableOIDFieldName = arcpy.Describe(tempTable).OIDFieldName
ringOIDFieldName = arcpy.Describe(outputRings).OIDFieldName
arcpy.JoinField_management(outputRings,ringOIDFieldName,tempTable,tempTableOIDFieldName,["Range","RingID"])
# Delete junk field
arcpy.DeleteField_management(outputRings,"RingRadius")
# create radials temp table
if buildRadials == True:
arcpy.AddMessage("Using " + str(numberOfRadials) + " radials ...")
tempRadialTable = os.path.join(env.scratchWorkspace,"tempRadialTable")
delete_me.append(tempRadialTable)
arcpy.CreateTable_management(os.path.dirname(tempRadialTable),os.path.basename(tempRadialTable))
arcpy.AddField_management(tempRadialTable,"POINT_X","DOUBLE")
arcpy.AddField_management(tempRadialTable,"POINT_Y","DOUBLE")
arcpy.AddField_management(tempRadialTable,"Azimuth","DOUBLE")
arcpy.AddField_management(tempRadialTable,"Range","DOUBLE")
arcpy.AddField_management(tempRadialTable,"RingID","LONG")
maxRadialRange = ringInterval * numberOfRings
radialInterval = 360.0/numberOfRadials
radialList = []
r = 1
while r <= numberOfRadials:
radialList.append(r * radialInterval)
r += 1
arcpy.AddMessage("Building radial table ...")
getRows = arcpy.SearchCursor(input_centers)
addRows = arcpy.InsertCursor(tempRadialTable)
y = 1
for getRow in getRows:
pointX = getRow.POINT_X
pointY = getRow.POINT_Y
for radialAzimuth in radialList:
addRow = addRows.newRow()
addRow.POINT_X = pointX
addRow.POINT_Y = pointY
addRow.Range = maxRadialRange
addRow.Azimuth = radialAzimuth
addRow.RingID = y
addRows.insertRow(addRow)
y += 1
del addRow
del addRows
del getRow
del getRows
results = arcpy.GetCount_management(tempRadialTable)
# build ellipses
arcpy.AddMessage("Constructing " + str(results) + " radial features ...")
arcpy.BearingDistanceToLine_management(tempRadialTable,outputRadials,"POINT_X","POINT_Y","Range",distanceUnits,"Azimuth",bearingUnits,"RHUMB_LINE","RingID",inputCentersSR)
# Join fields
tempRadialTableOIDFieldName = arcpy.Describe(tempRadialTable).OIDFieldName
radialOIDFieldName = arcpy.Describe(outputRadials).OIDFieldName
#arcpy.JoinField_management(outputRadials,radialOIDFieldName,tempRadialTable,tempRadialTableOIDFieldName,["Azimuth","Range","RingID"])
else:
arcpy.AddMessage("Zero radials to build ...")
# set output
arcpy.SetParameter(3,outputRings)
if buildRadials == True:
arcpy.SetParameter(7,outputRadials)
# cleanup
arcpy.AddMessage("Removing scratch datasets:")
for ds in delete_me:
arcpy.AddMessage(str(ds))
arcpy.Delete_management(ds)
env.overwriteOutput = currentOverwriteOutput
except arcpy.ExecuteError:
error = True
# Get the tool error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
#print msgs #UPDATE
print(msgs)
except:
# Get the traceback object
error = True
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# Print Python error messages for use in Python / Python Window
#print pymsg + "\n" #UPDATE
print(pymsg + "\n")
#print msgs #UPDATE
print(msgs)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
try:
import cPickle as pickle
except ImportError:
import pickle
import random
try:
from django.utils.encoding import smart_bytes
except ImportError:
from django.utils.encoding import smart_str as smart_bytes
from django.utils.datastructures import SortedDict
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from redis import Redis
from redis.exceptions import ConnectionError, ResponseError
from redis.connection import (Connection,
DefaultParser,
UnixDomainSocketConnection)
from ..util import CacheKey, load_class, integer_types
from ..exceptions import ConnectionInterrupted
from ..pool import get_or_create_connection_pool
class DefaultClient(object):
def __init__(self, server, params, backend):
self._pickle_version = -1
self._backend = backend
self._server = server
self._params = params
if not self._server:
raise ImproperlyConfigured("Missing connections string")
if not isinstance(self._server, (list, tuple, set)):
self._server = self._server.split(",")
self._clients = [None for x in range(len(self._server))]
self._options = params.get('OPTIONS', {})
self.setup_pickle_version()
def __contains__(self, key):
return self.has_key(key)
def get_client(self, write=True):
if write or len(self._server) == 1:
index = 0
else:
index = random.randint(1, len(self._server)-1)
if self._clients[index] is None:
self._clients[index] = self.connect(index)
return self._clients[index]
@property
def parser_class(self):
cls = self._options.get('PARSER_CLASS', None)
if cls is None:
return DefaultParser
return load_class(cls)
def parse_connection_string(self, constring):
"""
Method that parse a connection string.
"""
try:
host, port, db = constring.split(":")
port = int(port) if host != "unix" else port
db = int(db)
return host, port, db
except (ValueError, TypeError):
raise ImproperlyConfigured("Incorrect format '%s'" % (constring))
def _connect(self, host, port, db):
"""
Creates a redis connection with connection pool.
"""
kwargs = {
"db": db,
"parser_class": self.parser_class,
"password": self._options.get('PASSWORD', None),
}
if host == "unix":
kwargs.update({'path': port, 'connection_class': UnixDomainSocketConnection})
else:
kwargs.update({'host': host, 'port': port, 'connection_class': Connection})
if 'SOCKET_TIMEOUT' in self._options:
kwargs.update({'socket_timeout': int(self._options['SOCKET_TIMEOUT'])})
connection_pool = get_or_create_connection_pool(**kwargs)
connection = Redis(connection_pool=connection_pool)
return connection
def connect(self, index=0):
host, port, db = self.parse_connection_string(self._server[index])
connection = self._connect(host, port, db)
return connection
def setup_pickle_version(self):
if "PICKLE_VERSION" in self._options:
try:
self._pickle_version = int(self._options['PICKLE_VERSION'])
except (ValueError, TypeError):
raise ImproperlyConfigured("PICKLE_VERSION value must be an integer")
def set(self, key, value, timeout=None, version=None, client=None, nx=False):
"""
Persist a value to the cache, and set an optional expiration time.
Also supports optional nx parameter. If set to True - will use redis setnx instead of set.
"""
if not client:
client = self.get_client(write=True)
key = self.make_key(key, version=version)
value = self.pickle(value)
if timeout is None:
timeout = self._backend.default_timeout
try:
if nx:
res = client.setnx(key, value)
if res and timeout > 0:
return client.expire(key, int(timeout))
return res
else:
if timeout > 0:
return client.setex(key, value, int(timeout))
return client.set(key, value)
except ConnectionError:
raise ConnectionInterrupted(connection=client)
def incr_version(self, key, delta=1, version=None, client=None):
"""
Adds delta to the cache version for the supplied key. Returns the
new version.
"""
if client is None:
client = self.get_client(write=True)
if version is None:
version = self._backend.version
old_key = self.make_key(key, version)
value = self.get(old_key, version=version, client=client)
try:
ttl = client.ttl(old_key)
except ConnectionError:
raise ConnectionInterrupted(connection=client)
if value is None:
raise ValueError("Key '%s' not found" % key)
if isinstance(key, CacheKey):
new_key = self.make_key(key.original_key(), version=version + delta)
else:
new_key = self.make_key(key, version=version + delta)
self.set(new_key, value, timeout=ttl, client=client)
self.delete(old_key, client=client)
return version + delta
def add(self, key, value, timeout=None, version=None, client=None):
"""
Add a value to the cache, failing if the key already exists.
Returns ``True`` if the object was added, ``False`` if not.
"""
return self.set(key, value, timeout, client=client, nx=True)
def get(self, key, default=None, version=None, client=None):
"""
Retrieve a value from the cache.
Returns unpickled value if key is found, the default if not.
"""
if client is None:
client = self.get_client(write=False)
key = self.make_key(key, version=version)
try:
value = client.get(key)
except ConnectionError:
raise ConnectionInterrupted(connection=client)
if value is None:
return default
return self.unpickle(value)
def delete(self, key, version=None, client=None):
"""
Remove a key from the cache.
"""
if client is None:
client = self.get_client(write=True)
try:
client.delete(self.make_key(key, version=version))
except ConnectionError:
raise ConnectionInterrupted(connection=client)
def delete_pattern(self, pattern, version=None, client=None):
"""
Remove all keys matching pattern.
"""
if client is None:
client = self.get_client(write=True)
pattern = self.make_key(pattern, version=version)
try:
keys = client.keys(pattern)
if keys:
client.delete(*keys)
except ConnectionError:
raise ConnectionInterrupted(connection=client)
def delete_many(self, keys, version=None, client=None):
"""
Remove multiple keys at once.
"""
if client is None:
client = self.get_client(write=True)
if not keys:
return
keys = map(lambda key: self.make_key(key, version=version), keys)
try:
client.delete(*keys)
except ConnectionError:
raise ConnectionInterrupted(connection=client)
def clear(self, client=None):
"""
Flush all cache keys.
"""
if client is None:
client = self.get_client(write=True)
client.flushdb()
def unpickle(self, value):
"""
Unpickles the given value.
"""
try:
value = int(value)
except (ValueError, TypeError):
value = smart_bytes(value)
value = pickle.loads(value)
return value
def pickle(self, value):
"""
Pickle the given value.
"""
if isinstance(value, bool) or not isinstance(value, integer_types):
return pickle.dumps(value, self._pickle_version)
return value
def get_many(self, keys, version=None, client=None):
"""
Retrieve many keys.
"""
if client is None:
client = self.get_client(write=False)
if not keys:
return {}
recovered_data = SortedDict()
new_keys = list(map(lambda key: self.make_key(key, version=version), keys))
map_keys = dict(zip(new_keys, keys))
try:
results = client.mget(*new_keys)
except ConnectionError:
raise ConnectionInterrupted(connection=client)
for key, value in zip(new_keys, results):
if value is None:
continue
recovered_data[map_keys[key]] = self.unpickle(value)
return recovered_data
def set_many(self, data, timeout=None, version=None, client=None):
"""
Set a bunch of values in the cache at once from a dict of key/value
pairs. This is much more efficient than calling set() multiple times.
If timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
"""
if client is None:
client = self.get_client(write=True)
try:
pipeline = client.pipeline()
for key, value in data.items():
self.set(key, value, timeout, version=version, client=pipeline)
pipeline.execute()
except ConnectionError:
raise ConnectionInterrupted(connection=client)
def _incr(self, key, delta=1, version=None, client=None):
if client is None:
client = self.get_client(write=True)
key = self.make_key(key, version=version)
try:
if not client.exists(key):
raise ValueError("Key '%s' not found" % key)
try:
value = client.incr(key, delta)
except ResponseError:
# if cached value or total value is greater than 64 bit signed
# integer.
# elif int is pickled. so redis sees the data as string.
# In this situations redis will throw ResponseError
# try to keep TTL of key
timeout = client.ttl(key)
value = self.get(key, version=version, client=client) + delta
self.set(key, value, version=version, timeout=timeout,
client=client)
except ConnectionError:
raise ConnectionInterrupted(connection=client)
return value
def incr(self, key, delta=1, version=None, client=None):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
return self._incr(key=key, delta=delta, version=version, client=client)
def decr(self, key, delta=1, version=None, client=None):
"""
Decreace delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
return self._incr(key=key, delta=delta * -1, version=version,
client=client)
def has_key(self, key, version=None, client=None):
"""
Test if key exists.
"""
if client is None:
client = self.get_client(write=False)
key = self.make_key(key, version=version)
try:
return client.exists(key)
except ConnectionError:
raise ConnectionInterrupted(connection=client)
def keys(self, search, client=None):
if client is None:
client = self.get_client(write=False)
pattern = self.make_key(search)
try:
encoding_map = map(lambda x: x.decode('utf-8'), client.keys(pattern))
return list(map(lambda x: x.split(":", 2)[2], encoding_map))
except ConnectionError:
raise ConnectionInterrupted(connection=client)
def make_key(self, key, version=None):
if not isinstance(key, CacheKey):
key = CacheKey(self._backend.make_key(key, version))
return key
def close(self, **kwargs):
if getattr(settings, "DJANGO_REDIS_CLOSE_CONNECTION", False):
for c in self.client.connection_pool._available_connections:
c.disconnect()
del self._client
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from urllib import urlencode
import json
from django.conf import settings
from django.contrib.auth.models import User
from django.db import transaction
from django.db.models import Q
from django.shortcuts import get_object_or_404, Http404, render_to_response
from django.template import RequestContext
from django.utils import timezone
from django.views.decorators.csrf import ensure_csrf_cookie
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from apps.core.exceptions import Forbidden
from apps.core.decorators import (accepts, api_view, login_required,
owner_required)
from apps.core.models import Layer, LayerImage, LayerTag, UserFavoriteLayer
from apps.home.forms import LayerForm
from apps.home.filters import LayerFilter
from apps.workers.sqs_manager import SQSManager
from apps.workers.process import JOB_COPY_IMAGE, JOB_VALIDATE
RESULTS_PER_PAGE = 10
MAX_RESULTS_PER_PAGE = 100
@ensure_csrf_cookie
def home_page(request):
context = RequestContext(request)
return render_to_response('home/home.html', context)
@api_view
@accepts('GET')
def not_found(request):
raise Http404()
@api_view
@accepts('GET', 'PUT', 'DELETE')
def layer_detail(request, username, layer_id):
layer = _get_layer_or_404(request, id=layer_id, user__username=username)
if request.method == 'GET':
return layer.to_json()
elif request.method == 'PUT':
return _save_layer(request, layer, username=username)
elif request.method == 'DELETE':
return _delete_layer(request, layer, username=username)
@api_view
@accepts('GET')
def layer_meta(request, username, layer_id):
layer = _get_layer_or_404(request, id=layer_id, user__username=username)
try:
meta = layer.layer_metas.order_by('-created_at')[0]
return meta.to_json()
except IndexError:
raise Http404()
@api_view
@login_required
@accepts('POST')
def layer_dismiss(request):
user_id = request.user.id
layer_id = request.POST.get('layer_id')
layer = get_object_or_404(Layer, id=layer_id, user_id=user_id)
layer.dismissed = True
layer.save()
return 'OK'
@api_view
@login_required
@accepts('POST')
def layer_retry(request):
user_id = request.user.id
layer_id = request.POST.get('layer_id')
layer = get_object_or_404(Layer, id=layer_id, user_id=user_id)
if layer.retry_possible():
_retry_layer(layer)
return 'OK'
else:
raise Forbidden(errors={
'layer_id': ['Layer needs to have failed to retry.']
})
def _retry_layer(layer):
# Attempts to roll the layer back to an error-free state,
# and retry jobs starting at the step where errors could have possibly
# occurred. For images that were copied, this means starting at the copy
# job. For images that were uploaded, this means starting at the
# validation job.
sqs_manager = SQSManager()
layer.reset()
for image in layer.layer_images.all():
if image.is_copy_image():
job_type = JOB_COPY_IMAGE
data = {'image_id': image.id}
sqs_manager.add_message(job_type, data)
else:
job_type = JOB_VALIDATE
data = {'image_id': image.id}
sqs_manager.add_message(job_type, data)
def _get_layer_or_404(request, **kwargs):
try:
crit = Q(**kwargs)
return _get_layer_models(request, crit)['layers'][0]
except IndexError:
raise Http404()
@api_view
@login_required
@accepts('POST')
def create_layer(request, username):
layer = Layer()
layer.user = request.user
return _save_layer(request, layer, username=username)
@transaction.atomic
@owner_required
def _save_layer(request, layer, username=None):
"""
Create or update a layer model with data from POST or PUT form fields.
"""
body = json.loads(request.body)
form = LayerForm(body, instance=layer)
if not form.is_valid():
raise Forbidden(errors=form.errors)
if Layer.objects.filter(user__username=request.user.username,
name=form.cleaned_data['name']).count():
raise Forbidden(errors={
'name': ['Layer with name already exists for user.']
})
try:
layer = form.save()
except Exception as ex:
# TODO: Log exception
raise Forbidden(errors={
'all': ex.message
})
# Update tags.
LayerTag.objects.filter(layer=layer).delete()
LayerTag.objects.bulk_create([
LayerTag(layer=layer, name=tag)
for tag in form.cleaned_data['tags']
])
# Update images.
LayerImage.objects.filter(layer=layer).delete()
LayerImage.objects.bulk_create([
LayerImage(layer=layer,
s3_uuid=image['s3_uuid'],
file_extension=image['file_extension'],
file_name=image['file_name'],
bucket_name=settings.AWS_BUCKET_NAME,
source_s3_bucket_key=image['source_s3_bucket_key'])
for image in form.cleaned_data['images']
])
# Create jobs to copy images into S3 bucket
if layer.has_copy_images():
sqs_manager = SQSManager()
for image in LayerImage.objects.filter(layer=layer):
if image.is_copy_image():
job_type = JOB_COPY_IMAGE
data = {'image_id': image.id}
sqs_manager.add_message(job_type, data)
return layer.to_json()
@owner_required
def _delete_layer(request, layer, username=None):
layer.deleted_at = timezone.now()
layer.save()
return 'OK'
@api_view
@accepts('GET')
def user_layers(request, username):
get_object_or_404(User, username=username)
crit = Q(user__username=username, status_completed__isnull=False)
return _get_layers(request, crit)
@api_view
@login_required
@accepts('GET')
def my_layers(request):
crit = Q(user=request.user)
if request.GET.get('pending') == 'true':
crit &= Q(dismissed=False)
else:
crit &= Q(status_completed__isnull=False)
return _get_layers(request, crit)
@api_view
@login_required
@accepts('GET')
def my_favorites(request):
ids = UserFavoriteLayer.objects.filter(user__id=request.user.id) \
.values_list('layer_id', flat=True)
return _get_layers(request, Q(id__in=ids))
@api_view
@login_required
@accepts('POST', 'DELETE')
def create_or_destroy_favorite(request, layer_id):
"""
Create or destroy "favorited" layer for currently authenticated user.
"""
kwargs = {
'user_id': request.user.id,
'layer_id': layer_id,
}
if request.method == 'POST':
# Ensure user can only favorite owned/public layers.
_get_layer_or_404(request, id=layer_id)
model, created = UserFavoriteLayer.objects.get_or_create(**kwargs)
model.save()
elif request.method == 'DELETE':
model = get_object_or_404(UserFavoriteLayer, **kwargs)
model.delete()
return 'OK'
@api_view
@accepts('GET')
def all_layers(request):
crit = Q(status_completed__isnull=False)
return _get_layers(request, crit)
def _get_layer_models(request, crit=None):
"""
Return list of filtered layer models.
"""
qs = Layer.objects.select_related('user') \
.prefetch_related('layer_images', 'layer_tags',
'favorites')
qs = qs.filter(deleted_at__isnull=True)
if not request.user.is_staff:
is_visible = Q(user__id=request.user.id) | Q(is_public=True)
qs = qs.filter(is_visible)
if crit:
qs = qs.filter(crit)
filtered_layers = LayerFilter(request.GET, queryset=qs)
page = request.GET.get('page')
page_size = request.GET.get('page_size')
results_per_page = RESULTS_PER_PAGE
if page_size:
try:
page_size = int(page_size)
if page_size == 0:
num_layers = filtered_layers.count()
if num_layers > 0:
results_per_page = min(num_layers, MAX_RESULTS_PER_PAGE)
else:
results_per_page = min(page_size, MAX_RESULTS_PER_PAGE)
except:
pass
paginator = Paginator(filtered_layers, results_per_page)
try:
layers = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
page = 1
layers = paginator.page(page)
except EmptyPage:
# If page is out of range, deliver last page of results.
page = paginator.num_pages
layers = paginator.page(page)
page = int(page)
prev_url = (page - 1) if page > 1 else None
next_url = (page + 1) if page < paginator.num_pages else None
return {
'layers': layers,
'pages': paginator.num_pages,
'current_page': page,
'next_url': next_url,
'prev_url': prev_url
}
def _get_layers(request, crit=None):
"""
Return list of JSON serializable layer models.
"""
results = _get_layer_models(request, crit)
models = [m.to_json() for m in results['layers']]
prev_url = None
next_url = None
get = request.GET.copy()
if results['prev_url'] is not None:
get['page'] = results['prev_url']
prev_url = request.path + '?' + urlencode(get)
if results['next_url'] is not None:
get['page'] = results['next_url']
next_url = request.path + '?' + urlencode(get)
return {
'layers': models,
'pages': results['pages'],
'current_page': results['current_page'],
'prev_url': prev_url,
'next_url': next_url
}
|
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import pytest
from monty.serialization import loadfn
from pymatgen.analysis.bond_dissociation import BondDissociationEnergies
module_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
class BondDissociationTest(unittest.TestCase):
def setUp(self):
pytest.importorskip("openbabel", reason="OpenBabel not installed")
self.PC_65_principle = loadfn(os.path.join(module_dir, "PC_65_principle.json"))
self.PC_65_principle["initial_molecule"] = self.PC_65_principle["initial_molecule"].as_dict()
self.PC_65_principle["final_molecule"] = self.PC_65_principle["final_molecule"].as_dict()
self.PC_65_fragments = loadfn(os.path.join(module_dir, "PC_65_fragments.json"))
for entry in self.PC_65_fragments:
entry["initial_molecule"] = entry["initial_molecule"].as_dict()
entry["final_molecule"] = entry["final_molecule"].as_dict()
self.PC_correct = [
[
-0.1537378967699965,
[(0, 6)],
"O",
"C",
"[O][C@H](CO[C]=O)C",
"no_change",
0,
3,
-381.557913621934,
],
[
-0.13698599276000323,
[(0, 3)],
"O",
"C",
"O=C(OC[CH]C)[O]",
"no_change",
0,
3,
-381.574665525944,
],
[
-0.13862754671799848,
[(1, 4)],
"O",
"C",
"O([C@H]([CH2])C)C(=O)[O]",
"no_change",
0,
3,
-381.573023971986,
],
[
-0.15215890127200282,
[(1, 6)],
"O",
"C",
"O([C@H](C[O])C)[C]=O",
"no_change",
0,
3,
-381.559492617432,
],
[
-0.7159314516110271,
[(2, 6)],
"O",
"C",
"O1[C@H](CO[C]1)C",
"no_change",
-1,
2,
-306.454993516193,
"[O]",
"no_change",
1,
2,
-74.5407265509,
],
[
-0.2463273115880611,
[(2, 6)],
"O",
"C",
"O1[C@H](CO[C]1)C",
"no_change",
0,
1,
-306.402705691416,
"[O]",
"no_change",
0,
3,
-75.0626185157,
],
[
-0.3083568154030445,
[(2, 6)],
"O",
"C",
"O1[C@H](CO[C]1)C",
"no_change",
1,
2,
-306.159209742101,
"[O]",
"no_change",
-1,
2,
-75.2440849612,
],
[
-0.2757153725892181,
[(3, 5)],
"C",
"C",
"O1[CH]COC1=O",
"no_change",
-1,
1,
-341.848790340731,
"[CH3]",
"no_change",
1,
1,
-39.5871458053838,
],
[
-0.23669554002367477,
[(3, 5)],
"C",
"C",
"O1[CH]COC1=O",
"no_change",
1,
1,
-341.536118459517,
"[CH3]",
"no_change",
-1,
1,
-39.9388375191633,
],
[
-0.15603034591947562,
[(3, 5)],
"C",
"C",
"O1[CH]COC1=O",
"no_change",
0,
2,
-341.725750601598,
"[CH3]",
"no_change",
0,
2,
-39.8298705711865,
],
[
-0.1455391987270218,
[(3, 4)],
"C",
"C",
"O([CH]C)C(=O)O[CH2]",
"no_change",
0,
3,
-381.566112319977,
],
[
-0.18308384202697425,
[(3, 7)],
"C",
"H",
"O1[C](COC1=O)C",
"no_change",
1,
1,
-380.872362641477,
"[H]",
"no_change",
-1,
1,
-0.6562050352,
],
[
-0.1619771586430261,
[(3, 7)],
"C",
"H",
"O1[C](COC1=O)C",
"no_change",
0,
2,
-381.046845508561,
"[H]",
"no_change",
0,
2,
-0.5028288515,
],
[
-0.201648081019016,
[(4, 9)],
"C",
"H",
"O1[C@H]([CH]OC1=O)C",
"no_change",
1,
1,
-380.853798402485,
"[H]",
"no_change",
-1,
1,
-0.6562050352,
],
[
-0.1664265655520012,
[(4, 9)],
"C",
"H",
"O1[C@H]([CH]OC1=O)C",
"no_change",
0,
2,
-381.042396101652,
"[H]",
"no_change",
0,
2,
-0.5028288515,
],
[
-0.17386520505198177,
[(5, 12)],
"C",
"H",
"O1[C@H](COC1=O)[CH2]",
"no_change",
0,
2,
-381.034957462152,
"[H]",
"no_change",
0,
2,
-0.5028288515,
],
[
-0.34285821379000936,
[(5, 12)],
"C",
"H",
"O1[C@H](COC1=O)[CH2]",
"no_change",
1,
3,
-380.712588269714,
"[H]",
"no_change",
-1,
1,
-0.6562050352,
],
[
-0.18298781245698592,
[(5, 12)],
"C",
"H",
"O1[C](COC1=O)C",
"bond_change",
1,
1,
-380.872458671047,
"[H]",
"no_change",
-1,
1,
-0.6562050352,
],
]
self.neg_EC_40_principle = loadfn(os.path.join(module_dir, "neg_EC_40_principle.json"))
self.neg_EC_40_principle["initial_molecule"] = self.neg_EC_40_principle["initial_molecule"].as_dict()
self.neg_EC_40_principle["final_molecule"] = self.neg_EC_40_principle["final_molecule"].as_dict()
self.neg_EC_40_fragments = loadfn(os.path.join(module_dir, "neg_EC_40_fragments.json"))
for entry in self.neg_EC_40_fragments:
entry["initial_molecule"] = entry["initial_molecule"].as_dict()
entry["final_molecule"] = entry["final_molecule"].as_dict()
self.EC_correct = [
[
0.02488474745905478,
[(0, 5)],
"O",
"C",
"O1CCO[C]1[O]",
"more_bonds",
-1,
2,
-342.440795051501,
],
[
0.06645176460301627,
[(0, 3)],
"O",
"C",
"O=C(OC[CH2])[O]",
"no_change",
-1,
2,
-342.482362068645,
],
[
-0.08663102172198478,
[(2, 5)],
"O",
"C",
"O1CCO[C]1",
"no_change",
0,
1,
-267.08645842702,
"[O]",
"no_change",
-1,
2,
-75.2428208553,
],
[
-0.21497449222397336,
[(2, 5)],
"O",
"C",
"O1CCO[C]1",
"no_change",
-1,
2,
-267.138323931018,
"[O]",
"no_change",
0,
3,
-75.0626118808,
],
[
-0.0652242017809499,
[(3, 6)],
"C",
"H",
"O1[CH]COC1=O",
"no_change",
-1,
1,
-341.847857507061,
"[H]",
"no_change",
0,
2,
-0.5028285952,
],
[
-0.03541898787199216,
[(3, 6)],
"C",
"H",
"O1[CH]COC1=O",
"no_change",
0,
2,
-341.72560514147,
"[H]",
"no_change",
-1,
1,
-0.6548861747,
],
[
-0.05485312948695764,
[(3, 4)],
"C",
"C",
"O([CH2])C(=O)O[CH2]",
"no_change",
-1,
2,
-342.361057174555,
],
]
self.neg_TFSI_principle = loadfn(os.path.join(module_dir, "neg_TFSI_principle.json"))
self.neg_TFSI_principle["initial_molecule"] = self.neg_TFSI_principle["initial_molecule"].as_dict()
self.neg_TFSI_principle["final_molecule"] = self.neg_TFSI_principle["final_molecule"].as_dict()
self.neg_TFSI_fragments = loadfn(os.path.join(module_dir, "neg_TFSI_fragments.json"))
for entry in self.neg_TFSI_fragments:
entry["initial_molecule"] = entry["initial_molecule"].as_dict()
entry["final_molecule"] = entry["final_molecule"].as_dict()
self.TFSI_correct = [
[
-0.15474507240992352,
[(0, 2)],
"S",
"O",
"S(=O)(=O)(C(F)(F)F)[N][S@](=O)C(F)(F)F",
"no_change",
-1,
1,
-1752.01611801942,
"[O]",
"no_change",
0,
3,
-75.0626185157,
],
[
-0.15103778016987235,
[(0, 2)],
"S",
"O",
"S(=O)(=O)(C(F)(F)F)[N][S@](=O)C(F)(F)F",
"no_change",
0,
2,
-1751.83835886616,
"[O]",
"no_change",
-1,
2,
-75.2440849612,
],
[
-0.13498512745195512,
[(0, 14)],
"S",
"N",
"[S]([O])([O])C(F)(F)F",
"no_change",
0,
2,
-886.155841072364,
"S(=O)(=O)(C(F)(F)F)[N]",
"no_change",
-1,
2,
-940.942655407714,
],
[
-0.18234084293294472,
[(0, 14)],
"S",
"N",
"[S]([O])([O])C(F)(F)F",
"no_change",
-1,
1,
-886.286067516302,
"[S@]1(O[N]1)([O])C(F)(F)F",
"more_bonds",
0,
1,
-940.765073248295,
],
[
-0.17810498602898406,
[(0, 6)],
"S",
"C",
"S(=O)(=O)(C(F)(F)F)[N][S@@](=O)[O]",
"no_change",
0,
1,
-1489.42685775311,
"F[C](F)F",
"no_change",
-1,
1,
-337.628518868391,
],
[
-0.10131920738194822,
[(0, 6)],
"S",
"C",
"S(=O)(=O)(C(F)(F)F)[N][S]([O])[O]",
"no_change",
-1,
2,
-1489.56757113509,
"F[C](F)F",
"no_change",
0,
2,
-337.564591265058,
],
[
-0.19376265759979105,
[(6, 10)],
"C",
"F",
"S(=O)(=O)(C(F)(F)F)[N]S(=O)(=O)[C](F)F",
"no_change",
-1,
2,
-1727.31068781023,
"[F]",
"no_change",
0,
2,
-99.7290311397,
],
[
-0.16665302734986653,
[(6, 10)],
"C",
"F",
"[S@]1([O])(OC(S(=O)(=O)[N]1)(F)F)C(F)(F)F",
"more_bonds",
0,
1,
-1727.21369677138,
"[F]",
"no_change",
-1,
1,
-99.8531318088,
],
]
def test_tfsi_neg_no_pcm(self):
BDE = BondDissociationEnergies(self.neg_TFSI_principle, self.neg_TFSI_fragments)
self.assertEqual(len(BDE.filtered_entries), 16)
self.assertEqual(BDE.bond_dissociation_energies, self.TFSI_correct)
def test_pc_neutral_pcm_65(self):
BDE = BondDissociationEnergies(self.PC_65_principle, self.PC_65_fragments)
self.assertEqual(len(BDE.filtered_entries), 36)
self.assertEqual(BDE.bond_dissociation_energies, self.PC_correct)
def test_ec_neg_pcm_40(self):
BDE = BondDissociationEnergies(self.neg_EC_40_principle, self.neg_EC_40_fragments)
self.assertEqual(len(BDE.filtered_entries), 18)
self.assertEqual(BDE.bond_dissociation_energies, self.EC_correct)
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# ChemPy - A chemistry toolkit for Python
#
# Copyright (c) 2010 by Joshua W. Allen ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
Each atom in a molecular configuration has three spatial dimensions in which it
can move. Thus, a molecular configuration consisting of :math:`N` atoms has
:math:`3N` degrees of freedom. We can distinguish between those modes that
involve movement of atoms relative to the molecular center of mass (called
*internal* modes) and those that do not (called *external* modes). Of the
external degrees of freedom, three involve translation of the entire molecular
configuration, while either three (for a nonlinear molecule) or two (for a
linear molecule) involve rotation of the entire molecular configuration
around the center of mass. The remaining :math:`3N-6` (nonlinear) or
:math:`3N-5` (linear) degrees of freedom are the internal modes, and can be
divided into those that involve vibrational motions (symmetric and asymmetric
stretches, bends, etc.) and those that involve torsional rotation around single
bonds between nonterminal heavy atoms.
The mathematical description of these degrees of freedom falls under the purview
of quantum chemistry, and involves the solution of the time-independent
Schrodinger equation:
.. math:: \\hat{H} \\psi = E \\psi
where :math:`\\hat{H}` is the Hamiltonian, :math:`\\hat{H}` is the wavefunction,
and :math:`E` is the energy. The exact form of the Hamiltonian varies depending
on the degree of freedom you are modeling. Since this is a quantum system, the
energy can only take on discrete values. Once the allowed energy levels are
known, the partition function :math:`Q(\\beta)` can be computed using the
summation
.. math:: Q(\\beta) = \\sum_i g_i e^{-\\beta E_i}
where :math:`g_i` is the degeneracy of energy level :math:`i` (i.e. the number
of energy states at that energy level) and
:math:`\\beta \\equiv (k_\\mathrm{B} T)^{-1}`.
The partition function is an immensely useful quantity, as all sorts of
thermodynamic parameters can be evaluated using the partition function:
.. math:: A = - k_\\mathrm{B} T \\ln Q
.. math:: U = - \\frac{\\partial \\ln Q}{\\partial \\beta}
.. math:: S = \\frac{\\partial}{\\partial T} \\left( k_\\mathrm{B} T \\ln Q \\right)
.. math:: C_\\mathrm{v} = \\frac{1}{k_\\mathrm{B} T} \\frac{\\partial^2 \\ln Q}{\\partial \\beta^2}
Above, :math:`A`, :math:`U`, :math:`S`, and :math:`C_\\mathrm{v}` are the
Helmholtz free energy, internal energy, entropy, and constant-volume heat
capacity, respectively.
The partition function for a molecular configuration is the product of the
partition functions for each invidual degree of freedom:
.. math:: Q = Q_\\mathrm{trans} Q_\\mathrm{rot} Q_\\mathrm{vib} Q_\\mathrm{tors} Q_\\mathrm{elec}
This means that the contributions to each thermodynamic quantity from each
molecular degree of freedom are additive.
This module contains models for various molecular degrees of freedom. All such
models derive from the :class:`Mode` base class. A list of molecular degrees of
freedom can be stored in a :class:`StatesModel` object.
"""
################################################################################
import math
import cython
import numpy
import constants
from exception import InvalidStatesModelError
################################################################################
class Mode:
def getPartitionFunctions(self, Tlist):
return numpy.array([self.getPartitionFunction(T) for T in Tlist], numpy.float64)
def getHeatCapacities(self, Tlist):
return numpy.array([self.getHeatCapacity(T) for T in Tlist], numpy.float64)
def getEnthalpies(self, Tlist):
return numpy.array([self.getEnthalpy(T) for T in Tlist], numpy.float64)
def getEntropies(self, Tlist):
return numpy.array([self.getEntropy(T) for T in Tlist], numpy.float64)
################################################################################
class Translation(Mode):
"""
A representation of translational motion in three dimensions for an ideal
gas. The `mass` attribute is the molar mass of the molecule in kg/mol. The
quantities that depend on volume/pressure (partition function and entropy)
are evaluated at a standard pressure of 1 bar.
"""
def __init__(self, mass=0.0):
self.mass = mass
def __repr__(self):
"""
Return a string representation that can be used to reconstruct the
object.
"""
return 'Translation(mass=%g)' % (self.mass)
def getPartitionFunction(self, T):
"""
Return the value of the partition function at the specified temperatures
`Tlist` in K. The formula is
.. math:: q_\\mathrm{trans}(T) = \\left( \\frac{2 \\pi m k_\\mathrm{B} T}{h^2} \\right)^{3/2} \\frac{k_\\mathrm{B} T}{P}
where :math:`T` is temperature, :math:`V` is volume, :math:`m` is mass,
:math:`d` is dimensionality, :math:`k_\\mathrm{B}` is the Boltzmann
constant, and :math:`h` is the Planck constant.
"""
cython.declare(qt=cython.double)
qt = ((2 * constants.pi * self.mass / constants.Na) / (constants.h * constants.h))**1.5 / 1e5
return qt * (constants.kB * T)**2.5
def getHeatCapacity(self, T):
"""
Return the contribution to the heat capacity due to translation in
J/mol*K at the specified temperatures `Tlist` in K. The formula is
.. math:: \\frac{C_\\mathrm{v}^\\mathrm{trans}(T)}{R} = \\frac{3}{2}
where :math:`T` is temperature and :math:`R` is the gas law constant.
"""
return 1.5 * constants.R
def getEnthalpy(self, T):
"""
Return the contribution to the enthalpy due to translation in J/mol
at the specified temperatures `Tlist` in K. The formula is
.. math:: \\frac{H^\\mathrm{trans}(T)}{RT} = \\frac{3}{2}
where :math:`T` is temperature and :math:`R` is the gas law constant.
"""
return 1.5 * constants.R * T
def getEntropy(self, T):
"""
Return the contribution to the entropy due to translation in J/mol*K
at the specified temperatures `Tlist` in K. The formula is
.. math:: \\frac{S^\\mathrm{trans}(T)}{R} = \\ln q_\\mathrm{trans}(T) + \\frac{3}{2} + 1
where :math:`T` is temperature, :math:`q_\\mathrm{trans}` is the
partition function, and :math:`R` is the gas law constant.
"""
return (numpy.log(self.getPartitionFunction(T)) + 1.5 + 1.0) * constants.R
def getDensityOfStates(self, Elist):
"""
Return the density of states at the specified energlies `Elist` in J/mol
above the ground state. The formula is
.. math:: \\rho(E) = \\left( \\frac{2 \\pi m}{h^2} \\right)^{3/2} \\frac{E^{3/2}}{\\Gamma(5/2)} \\frac{1}{P}
where :math:`E` is energy, :math:`m` is mass, :math:`k_\\mathrm{B}` is
the Boltzmann constant, and :math:`R` is the gas law constant.
"""
cython.declare(rho=numpy.ndarray, qt=cython.double)
rho = numpy.zeros_like(Elist)
qt = ((2 * constants.pi * self.mass / constants.Na / constants.Na) / (constants.h * constants.h))**(1.5) / 1e5
rho = qt * Elist**1.5 / (numpy.sqrt(math.pi) * 0.25) / constants.Na
return rho
################################################################################
class RigidRotor(Mode):
"""
A rigid rotor approximation of (external) rotational modes. The `linear`
attribute is :data:`True` if the associated molecule is linear, and
:data:`False` if nonlinear. For a linear molecule, `inertia` stores a
list with one moment of inertia in kg*m^2. For a nonlinear molecule,
`frequencies` stores a list of the three moments of inertia, even if two or
three are equal, in kg*m^2. The symmetry number of the rotation is stored
in the `symmetry` attribute.
"""
def __init__(self, linear=False, inertia=None, symmetry=1):
self.linear = linear
self.inertia = inertia or []
self.symmetry = symmetry
def __repr__(self):
"""
Return a string representation that can be used to reconstruct the
object.
"""
inertia = ', '.join(['%g' % i for i in self.inertia])
return 'RigidRotor(linear=%s, inertia=[%s], symmetry=%s)' % (self.linear, inertia, self.symmetry)
def getPartitionFunction(self, T):
"""
Return the value of the partition function at the specified temperatures
`Tlist` in K. The formula is
.. math:: q_\\mathrm{rot}(T) = \\frac{8 \\pi^2 I k_\\mathrm{B} T}{\\sigma h^2}
for linear rotors and
.. math:: q_\\mathrm{rot}(T) = \\frac{\\sqrt{\\pi}}{\\sigma} \\left( \\frac{8 \\pi^2 k_\\mathrm{B} T}{h^2} \\right)^{3/2} \\sqrt{I_\\mathrm{A} I_\\mathrm{B} I_\\mathrm{C}}
for nonlinear rotors. Above, :math:`T` is temperature, :math:`\\sigma`
is the symmetry number, :math:`I` is the moment of inertia,
:math:`k_\\mathrm{B}` is the Boltzmann constant, and :math:`h` is the
Planck constant.
"""
cython.declare(theta=cython.double, inertia=cython.double)
if self.linear:
theta = constants.h * constants.h / (8 * constants.pi * constants.pi * self.inertia[0] * constants.kB)
return T / theta / self.symmetry
else:
theta = 1.0
for inertia in self.inertia:
theta *= constants.h * constants.h / (8 * constants.pi * constants.pi * inertia * constants.kB)
return numpy.sqrt(constants.pi * T**len(self.inertia) / theta) / self.symmetry
def getHeatCapacity(self, T):
"""
Return the contribution to the heat capacity due to rigid rotation
in J/mol*K at the specified temperatures `Tlist` in K. The formula is
.. math:: \\frac{C_\\mathrm{v}^\\mathrm{rot}(T)}{R} = 1
if linear and
.. math:: \\frac{C_\\mathrm{v}^\\mathrm{rot}(T)}{R} = \\frac{3}{2}
if nonlinear, where :math:`T` is temperature and :math:`R` is the gas
law constant.
"""
if self.linear:
return constants.R
else:
return 1.5 * constants.R
def getEnthalpy(self, T):
"""
Return the contribution to the enthalpy due to rigid rotation in J/mol
at the specified temperatures `Tlist` in K. The formula is
.. math:: \\frac{H^\\mathrm{rot}(T)}{RT} = 1
for linear rotors and
.. math:: \\frac{H^\\mathrm{rot}(T)}{RT} = \\frac{3}{2}
for nonlinear rotors, where :math:`T` is temperature and :math:`R` is
the gas law constant.
"""
if self.linear:
return constants.R * T
else:
return 1.5 * constants.R * T
def getEntropy(self, T):
"""
Return the contribution to the entropy due to rigid rotation in J/mol*K
at the specified temperatures `Tlist` in K. The formula is
.. math:: \\frac{S^\\mathrm{rot}(T)}{R} = \\ln Q^\\mathrm{rot} + 1
for linear rotors and
.. math:: \\frac{S^\\mathrm{rot}(T)}{R} = \\ln Q^\\mathrm{rot} + \\frac{3}{2}
for nonlinear rotors, where :math:`Q^\\mathrm{rot}` is the partition
function for a rigid rotor and :math:`R` is the gas law constant.
"""
if self.linear:
return (numpy.log(self.getPartitionFunction(T)) + 1.0) * constants.R
else:
return (numpy.log(self.getPartitionFunction(T)) + 1.5) * constants.R
def getDensityOfStates(self, Elist):
"""
Return the density of states at the specified energlies `Elist` in J/mol
above the ground state in mol/J. The formula is
.. math:: \\rho(E) = \\frac{8 \\pi^2 I}{\\sigma h^2}
for linear rotors and
.. math:: \\rho(E) = \\frac{\\sqrt{\\pi}}{\\sigma} \\left( \\frac{8 \\pi^2}{h^2} \\right)^{3/2} \\sqrt{I_\\mathrm{A} I_\\mathrm{B} I_\\mathrm{C}} \\frac{E^{1/2}}{\\frac{1}{2}!}
for nonlinear rotors. Above, :math:`E` is energy, :math:`\\sigma`
is the symmetry number, :math:`I` is the moment of inertia,
:math:`k_\\mathrm{B}` is the Boltzmann constant, and :math:`h` is the
Planck constant.
"""
cython.declare(theta=cython.double, inertia=cython.double)
if self.linear:
theta = constants.h * constants.h / (8 * constants.pi * constants.pi * self.inertia[0]) * constants.Na
return numpy.ones_like(Elist) / theta / self.symmetry
else:
theta = 1.0
for inertia in self.inertia:
theta *= constants.h * constants.h / (8 * constants.pi * constants.pi * inertia) * constants.Na
return 2.0 * numpy.sqrt(Elist / theta) / self.symmetry
################################################################################
class HinderedRotor(Mode):
"""
A one-dimensional hindered rotor using one of two potential functions:
the the cosine potential function
.. math:: V(\\phi) = \\frac{1}{2} V_0 \\left[1 - \\cos \\left( \\sigma \\phi \\right) \\right]
where :math:`V_0` is the height of the potential barrier and
:math:`\\sigma` is the number of minima or maxima in one revolution of
angle :math:`\\phi`, equivalent to the symmetry number of that rotor;
or a Fourier series
.. math:: V(\\phi) = A + \\sum_{k=1}^C \\left( a_k \\cos k \\phi + b_k \\sin k \\phi \\right)
For the cosine potential, the hindered rotor is described by the `barrier`
height in J/mol. For the Fourier series potential, the potential is instead
defined by a :math:`C \\times 2` array `fourier` containing the Fourier
coefficients. Both forms require the reduced moment of `inertia` of the
rotor in kg*m^2 and the `symmetry` number.
If both sets of parameters are available, the Fourier series will be used,
as it is more accurate. However, it is also significantly more
computationally demanding.
"""
def __init__(self, inertia=0.0, barrier=0.0, symmetry=1, fourier=None):
self.inertia = inertia
self.barrier = barrier
self.symmetry = symmetry
self.fourier = fourier
self.energies = None
if self.fourier is not None: self.energies = self.__solveSchrodingerEquation()
def __repr__(self):
"""
Return a string representation that can be used to reconstruct the
object.
"""
return 'HinderedRotor(inertia=%g, barrier=%g, symmetry=%g, fourier=%s)' % (self.inertia, self.barrier, self.symmetry, self.fourier)
def getPotential(self, phi):
"""
Return the values of the hindered rotor potential :math:`V(\\phi)`
in J/mol at the angles `phi` in radians.
"""
cython.declare(V=numpy.ndarray, k=cython.int)
V = numpy.zeros_like(phi)
if self.fourier is not None:
for k in range(self.fourier.shape[1]):
V += self.fourier[0,k] * numpy.cos((k+1) * phi) + self.fourier[1,k] * numpy.sin((k+1) * phi)
V -= numpy.sum(self.fourier[0,:])
else:
V = 0.5 * self.barrier * (1 - numpy.cos(self.symmetry * phi))
return V
def __solveSchrodingerEquation(self):
"""
Solves the one-dimensional time-independent Schrodinger equation
.. math:: -\\frac{\\hbar}{2I} \\frac{d^2 \\psi}{d \\phi^2} + V(\\phi) \\psi(\\phi) = E \\psi(\\phi)
where :math:`I` is the reduced moment of inertia for the rotor and
:math:`V(\\phi)` is the rotation potential function, to determine the
energy levels of a one-dimensional hindered rotor with a Fourier series
potential. The solution method utilizes an orthonormal basis set
expansion of the form
.. math:: \\psi (\\phi) = \\sum_{m=-M}^M c_m \\frac{e^{im\\phi}}{\\sqrt{2*\\pi}}
which converts the Schrodinger equation into a standard eigenvalue
problem. For the purposes of this function it is sufficient to set
:math:`M = 200`, which corresponds to 401 basis functions. Returns the
energy eigenvalues of the Hamiltonian matrix in J/mol.
"""
cython.declare(M=cython.int, m=cython.int, row=cython.int, n=cython.int)
cython.declare(H=numpy.ndarray, fourier=numpy.ndarray, A=cython.double, E=numpy.ndarray)
# The number of terms to use is 2*M + 1, ranging from -m to m inclusive
M = 200
# Populate Hamiltonian matrix
H = numpy.zeros((2*M+1,2*M+1), numpy.complex64)
fourier = self.fourier / constants.Na / 2.0
A = numpy.sum(self.fourier[0,:]) / constants.Na
row = 0
for m in range(-M, M+1):
H[row,row] = A + constants.h * constants.h * m * m / (8 * math.pi * math.pi * self.inertia)
for n in range(fourier.shape[1]):
if row-n-1 > -1: H[row,row-n-1] = complex(fourier[0,n], - fourier[1,n])
if row+n+1 < 2*M+1: H[row,row+n+1] = complex(fourier[0,n], fourier[1,n])
row += 1
# The overlap matrix is the identity matrix, i.e. this is a standard
# eigenvalue problem
# Find the eigenvalues and eigenvectors of the Hamiltonian matrix
E, V = numpy.linalg.eigh(H)
# Return the eigenvalues
return (E - numpy.min(E)) * constants.Na
def getPartitionFunction(self, T):
"""
Return the value of the partition function at the specified temperatures
`Tlist` in K. For the cosine potential, the formula makes use of the
Pitzer-Gwynn approximation:
.. math:: q_\\mathrm{hind}(T) = \\frac{q_\\mathrm{vib}^\\mathrm{quant}(T)}{q_\\mathrm{vib}^\\mathrm{class}(T)} q_\\mathrm{hind}^\\mathrm{class}(T)
Substituting in for the right-hand side partition functions gives
.. math:: q_\\mathrm{hind}(T) = \\frac{h \\nu}{k_\\mathrm{B} T} \\frac{1}{1 - \\exp \\left(- h \\nu / k_\\mathrm{B} T \\right)} \\left( \\frac{2 \\pi I k_\\mathrm{B} T}{h^2} \\right)^{1/2} \\frac{2 \\pi}{\\sigma} \\exp \\left( -\\frac{V_0}{2 k_\\mathrm{B} T} \\right) I_0 \\left( \\frac{V_0}{2 k_\\mathrm{B} T} \\right)
where
.. math:: \\nu = \\frac{\\sigma}{2 \\pi} \\sqrt{\\frac{V_0}{2 I}}
:math:`T` is temperature, :math:`V_0` is the barrier height,
:math:`I` is the moment of inertia, :math:`\\sigma` is the symmetry
number, :math:`k_\\mathrm{B}` is the Boltzmann constant, and :math:`h`
is the Planck constant. :math:`I_0(x)` is the modified Bessel function
of order zero for argument :math:`x`.
For the Fourier series potential, we solve the corresponding 1D
Schrodinger equation to obtain the energy levels of the rotor and
utilize the expression
.. math:: q_\\mathrm{hind}(T) = \\frac{1}{\\sigma} \\sum_i e^{-\\beta E_i}
to obtain the partition function.
"""
if self.fourier is not None:
# Fourier series data found, so use it
# This means solving the 1D Schrodinger equation - slow!
cython.declare(Q=cython.double, E=numpy.ndarray, e_kT=numpy.ndarray, i=cython.int)
e_kT = numpy.exp(-self.energies / constants.R / T)
Q = numpy.sum(e_kT)
return Q / self.symmetry # No Fourier data, so use the cosine potential data
else:
cython.declare(frequency=cython.double, x=cython.double, z=cython.double)
frequency = self.getFrequency() * constants.c * 100
x = constants.h * frequency / (constants.kB * T)
z = 0.5 * self.barrier / (constants.R * T)
return x / (1 - numpy.exp(-x)) * numpy.sqrt(2 * math.pi * self.inertia * constants.kB * T / constants.h / constants.h) * (2 * math.pi / self.symmetry) * numpy.exp(-z) * besseli0(z)
def getHeatCapacity(self, T):
"""
Return the contribution to the heat capacity due to hindered rotation
in J/mol*K at the specified temperatures `Tlist` in K.
For the cosine potential, the formula is
.. math:: \\frac{C_\\mathrm{v}^\\mathrm{hind}(T)}{R} = \\frac{C_\\mathrm{v}^\\mathrm{vib}(T)}{R} -\\frac{1}{2} + \\zeta^2 - \\left[ \\zeta \\frac{I_1(\\zeta)}{I_0(\\zeta)} \\right]^2 - \\zeta \\frac{I_1(\\zeta)}{I_0(\\zeta)}
where :math:`\\zeta \\equiv V_0 / 2 k_\\mathrm{B} T`,
:math:`T` is temperature, :math:`V_0` is the barrier height,
:math:`k_\\mathrm{B}` is the Boltzmann constant, and :math:`R` is the
gas law constant.
For the Fourier series potential, we solve the corresponding 1D
Schrodinger equation to obtain the energy levels of the rotor and
utilize the expression
.. math:: \\frac{C_\\mathrm{v}^\\mathrm{hind}(T)}{R} = \\beta^2 \\frac{\\left( \\sum_i E_i^2 e^{-\\beta E_i} \\right) \\left( \\sum_i e^{-\\beta E_i} \\right) - \\left( \\sum_i E_i e^{-\\beta E_i} \\right)^2}{\\left( \\sum_i e^{-\\beta E_i} \\right)^2}
to obtain the heat capacity.
"""
if self.fourier is not None:
cython.declare(Cv=cython.double, E=numpy.ndarray, e_kT=numpy.ndarray, i=cython.int)
E = self.energies
e_kT = numpy.exp(-E / constants.R / T)
Cv = (numpy.sum(E*E*e_kT) * numpy.sum(e_kT) - numpy.sum(E*e_kT)**2) / (constants.R*T*T * numpy.sum(e_kT)**2)
return Cv
else:
cython.declare(frequency=cython.double, x=cython.double, z=cython.double)
cython.declare(exp_x=cython.double, one_minus_exp_x=cython.double, BB=cython.double)
frequency = self.getFrequency() * constants.c * 100
x = constants.h * frequency / (constants.kB * T)
z = 0.5 * self.barrier / (constants.R * T)
exp_x = numpy.exp(x)
one_minus_exp_x = 1.0 - exp_x
BB = besseli1(z) / besseli0(z)
return (x * x * exp_x / one_minus_exp_x / one_minus_exp_x - 0.5 + z * (z - BB - z * BB * BB)) * constants.R
def getEnthalpy(self, T):
"""
Return the contribution to the heat capacity due to hindered rotation
in J/mol at the specified temperatures `Tlist` in K. For the cosine
potential, this is calculated numerically from the partition function.
For the Fourier series potential, we solve the corresponding 1D
Schrodinger equation to obtain the energy levels of the rotor and
utilize the expression
.. math:: H^\\mathrm{hind}(T) - H_0 = \\frac{\\sum_i E_i e^{-\\beta E_i}}{\\sum_i e^{-\\beta E_i}}
to obtain the enthalpy.
"""
if self.fourier is not None:
cython.declare(H=cython.double, E=numpy.ndarray, e_kT=numpy.ndarray, i=cython.int)
E = self.energies
e_kT = numpy.exp(-E / constants.R / T)
H = numpy.sum(E*e_kT) / numpy.sum(e_kT)
return H
else:
Tlow = T * 0.999
Thigh = T * 1.001
return (T *
(numpy.log(self.getPartitionFunction(Thigh)) -
numpy.log(self.getPartitionFunction(Tlow))) /
(Thigh - Tlow)) * constants.R * T
def getEntropy(self, T):
"""
Return the contribution to the heat capacity due to hindered rotation
in J/mol*K at the specified temperatures `Tlist` in K. For the cosine
potential, this is calculated numerically from the partition function.
For the Fourier series potential, we solve the corresponding 1D
Schrodinger equation to obtain the energy levels of the rotor and
utilize the expression
.. math:: S^\\mathrm{hind}(T) = R \\left( \\ln q_\\mathrm{hind}(T) + \\frac{\\sum_i E_i e^{-\\beta E_i}}{RT \\sum_i e^{-\\beta E_i}} \\right)
to obtain the entropy.
"""
if self.fourier is not None:
cython.declare(S=cython.double, E=numpy.ndarray, e_kT=numpy.ndarray, i=cython.int)
E = self.energies
S = constants.R * numpy.log(self.getPartitionFunction(T))
e_kT = numpy.exp(-E / constants.R / T)
S += numpy.sum(E*e_kT) / (T * numpy.sum(e_kT))
return S
else:
Tlow = T * 0.999
Thigh = T * 1.001
return (numpy.log(self.getPartitionFunction(Thigh)) +
T * (numpy.log(self.getPartitionFunction(Thigh)) -
numpy.log(self.getPartitionFunction(Tlow))) /
(Thigh - Tlow)) * constants.R
def getDensityOfStates(self, Elist):
"""
Return the density of states at the specified energlies `Elist` in J/mol
above the ground state. For the cosine potential, the formula is
.. math:: \\rho(E) = \\frac{2 q_\\mathrm{1f}}{\\pi^{3/2} V_0^{1/2}} \\mathcal{K}(E / V_0) \\hspace{20pt} E < V_0
and
.. math:: \\rho(E) = \\frac{2 q_\\mathrm{1f}}{\\pi^{3/2} E^{1/2}} \\mathcal{K}(V_0 / E) \\hspace{20pt} E > V_0
where
.. math:: q_\\mathrm{1f} = \\frac{\\pi^{1/2}}{\\sigma} \\left( \\frac{8 \\pi^2 I}{h^2} \\right)^{1/2}
:math:`E` is energy, :math:`V_0` is barrier height, and
:math:`\\mathcal{K}(x)` is the complete elliptic integral of the first
kind. There is currently no functionality for using the Fourier series
potential.
"""
cython.declare(rho=numpy.ndarray, q1f=cython.double, pre=cython.double, V0=cython.double, i=cython.int)
rho = numpy.zeros_like(Elist)
q1f = math.sqrt(8 * math.pi * math.pi * math.pi * self.inertia / constants.h / constants.h / constants.Na) / self.symmetry
V0 = self.barrier
pre = 2.0 * q1f / math.sqrt(math.pi * math.pi * math.pi * V0)
# The following is only valid in the classical limit
# Note that cellipk(1) = infinity, so we must skip that value
for i in range(len(Elist)):
if Elist[i] / V0 < 1:
rho[i] = pre * cellipk(Elist[i] / V0)
elif Elist[i] / V0 > 1:
rho[i] = pre * math.sqrt(V0 / Elist[i]) * cellipk(V0 / Elist[i])
return rho
def getFrequency(self):
"""
Return the frequency of vibration corresponding to the limit of
harmonic oscillation. The formula is
.. math:: \\nu = \\frac{\\sigma}{2 \\pi} \\sqrt{\\frac{V_0}{2 I}}
where :math:`\\sigma` is the symmetry number, :math:`V_0` the barrier
height, and :math:`I` the reduced moment of inertia of the rotor. The
units of the returned frequency are cm^-1.
"""
V0 = self.barrier
if self.fourier is not None:
V0 = -numpy.sum(self.fourier[:,0])
return self.symmetry / 2.0 / math.pi * math.sqrt(V0 / constants.Na / 2 / self.inertia) / (constants.c * 100)
def besseli0(x):
"""
Return the value of the zeroth-order modified Bessel function at `x`.
"""
import scipy.special
return scipy.special.i0(x)
def besseli1(x):
"""
Return the value of the first-order modified Bessel function at `x`.
"""
import scipy.special
return scipy.special.i1(x)
def cellipk(x):
"""
Return the value of the complete elliptic integral of the first kind at `x`.
"""
import scipy.special
return scipy.special.ellipk(x)
################################################################################
class HarmonicOscillator(Mode):
"""
A representation of a set of vibrational modes as one-dimensional quantum
harmonic oscillator. The oscillators are defined by their `frequencies` in
cm^-1.
"""
def __init__(self, frequencies=None):
self.frequencies = frequencies or []
def __repr__(self):
"""
Return a string representation that can be used to reconstruct the
object.
"""
frequencies = ', '.join(['%g' % freq for freq in self.frequencies])
return 'HarmonicOscillator(frequencies=[%s])' % (frequencies)
def getPartitionFunction(self, T):
"""
Return the value of the partition function at the specified temperatures
`Tlist` in K. The formula is
.. math:: q_\\mathrm{vib}(T) = \\prod_i \\frac{1}{1 - e^{-\\xi_i}}
where :math:`\\xi_i \\equiv h \\nu_i / k_\\mathrm{B} T`,
:math:`T` is temperature, :math:`\\nu_i` is the frequency of vibration
:math:`i`, :math:`k_\\mathrm{B}` is the Boltzmann constant, :math:`h`
is the Planck constant, and :math:`R` is the gas law constant. Note
that we have chosen our zero of energy to be at the zero-point energy
of the molecule, *not* the bottom of the potential well.
"""
cython.declare(Q=cython.double, freq=cython.double)
Q = 1.0
for freq in self.frequencies:
Q = Q / (1 - numpy.exp(-freq / (0.695039 * T))) # kB = 0.695039 cm^-1/K
return Q
def getHeatCapacity(self, T):
"""
Return the contribution to the heat capacity due to vibration
in J/mol*K at the specified temperatures `Tlist` in K. The formula is
.. math:: \\frac{C_\\mathrm{v}^\\mathrm{vib}(T)}{R} = \\sum_i \\xi_i^2 \\frac{e^{\\xi_i}}{\\left( 1 - e^{\\xi_i} \\right)^2}
where :math:`\\xi_i \\equiv h \\nu_i / k_\\mathrm{B} T`,
:math:`T` is temperature, :math:`\\nu_i` is the frequency of vibration
:math:`i`, :math:`k_\\mathrm{B}` is the Boltzmann constant, :math:`h`
is the Planck constant, and :math:`R` is the gas law constant.
"""
cython.declare(Cv=cython.double, freq=cython.double)
cython.declare(x=cython.double, exp_x=cython.double, one_minus_exp_x=cython.double)
Cv = 0.0
for freq in self.frequencies:
x = freq / (0.695039 * T) # kB = 0.695039 cm^-1/K
exp_x = numpy.exp(x)
one_minus_exp_x = 1.0 - exp_x
Cv = Cv + x * x * exp_x / one_minus_exp_x / one_minus_exp_x
return Cv * constants.R
def getEnthalpy(self, T):
"""
Return the contribution to the enthalpy due to vibration in J/mol at
the specified temperatures `Tlist` in K. The formula is
.. math:: \\frac{H^\\mathrm{vib}(T)}{RT} = \\sum_i \\frac{\\xi_i}{e^{\\xi_i} - 1}
where :math:`\\xi_i \\equiv h \\nu_i / k_\\mathrm{B} T`,
:math:`T` is temperature, :math:`\\nu_i` is the frequency of vibration
:math:`i`, :math:`k_\\mathrm{B}` is the Boltzmann constant, :math:`h`
is the Planck constant, and :math:`R` is the gas law constant.
"""
cython.declare(H=cython.double, freq=cython.double)
cython.declare(x=cython.double, exp_x=cython.double)
H = 0.0
for freq in self.frequencies:
x = freq / (0.695039 * T) # kB = 0.695039 cm^-1/K
exp_x = numpy.exp(x)
H = H + x / (exp_x - 1)
return H * constants.R * T
def getEntropy(self, T):
"""
Return the contribution to the entropy due to vibration in J/mol*K at
the specified temperatures `Tlist` in K. The formula is
.. math:: \\frac{S^\\mathrm{vib}(T)}{R} = \\sum_i \\left[ - \\ln \\left(1 - e^{-\\xi_i} \\right) + \\frac{\\xi_i}{e^{\\xi_i} - 1} \\right]
where :math:`\\xi_i \\equiv h \\nu_i / k_\\mathrm{B} T`,
:math:`T` is temperature, :math:`\\nu_i` is the frequency of vibration
:math:`i`, :math:`k_\\mathrm{B}` is the Boltzmann constant, :math:`h`
is the Planck constant, and :math:`R` is the gas law constant.
"""
cython.declare(S=cython.double, freq=cython.double)
cython.declare(x=cython.double, exp_x=cython.double)
S = numpy.log(self.getPartitionFunction(T))
for freq in self.frequencies:
x = freq / (0.695039 * T) # kB = 0.695039 cm^-1/K
exp_x = numpy.exp(x)
S = S + x / (exp_x - 1)
return S * constants.R
def getDensityOfStates(self, Elist, rho0=None):
"""
Return the density of states at the specified energies `Elist` in J/mol
above the ground state. The Beyer-Swinehart method is used to
efficiently convolve the vibrational density of states into the
density of states of other modes. To be accurate, this requires a small
(:math:`1-10 \\ \\mathrm{cm^{-1}}` or so) energy spacing.
"""
cython.declare(rho=numpy.ndarray, freq=cython.double)
cython.declare(dE=cython.double, nE=cython.int, dn=cython.int, n=cython.int)
if rho0 is not None:
rho = rho0
else:
rho = numpy.zeros_like(Elist)
dE = Elist[1] - Elist[0]
nE = len(Elist)
for freq in self.frequencies:
dn = int(freq * constants.h * constants.c * 100 * constants.Na / dE)
for n in range(dn+1, nE):
rho[n] = rho[n] + rho[n-dn]
return rho
################################################################################
class StatesModel:
"""
A set of molecular degrees of freedom data for a given molecule, comprising
the results of a quantum chemistry calculation.
=================== =================== ====================================
Attribute Type Description
=================== =================== ====================================
`modes` ``list`` A list of the degrees of freedom
`spinMultiplicity` ``int`` The spin multiplicity of the molecule
=================== =================== ====================================
"""
def __init__(self, modes=None, spinMultiplicity=1):
self.modes = modes or []
self.spinMultiplicity = spinMultiplicity
def getHeatCapacity(self, T):
"""
Return the constant-pressure heat capacity in J/mol*K at the specified
temperatures `Tlist` in K.
"""
cython.declare(Cp=cython.double)
Cp = constants.R
for mode in self.modes:
Cp += mode.getHeatCapacity(T)
return Cp
def getEnthalpy(self, T):
"""
Return the enthalpy in J/mol at the specified temperatures `Tlist` in K.
"""
cython.declare(H=cython.double)
H = constants.R * T
for mode in self.modes:
H += mode.getEnthalpy(T)
return H
def getEntropy(self, T):
"""
Return the entropy in J/mol*K at the specified temperatures `Tlist` in
K.
"""
cython.declare(S=cython.double)
S = 0.0
for mode in self.modes:
S += mode.getEntropy(T)
return S
def getPartitionFunction(self, T):
"""
Return the the partition function at the specified temperatures
`Tlist` in K. An active K-rotor is automatically included if there are
no external rotational modes.
"""
cython.declare(Q=cython.double, Trot=cython.double)
Q = 1.0
# Active K-rotor
rotors = [mode for mode in self.modes if isinstance(mode, RigidRotor)]
if len(rotors) == 0:
Trot = 1.0 / constants.R / 3.141592654
Q *= numpy.sqrt(T / Trot)
# Other modes
for mode in self.modes:
Q *= mode.getPartitionFunction(T)
return Q * self.spinMultiplicity
def getDensityOfStates(self, Elist):
"""
Return the value of the density of states in mol/J at the specified
energies `Elist` in J/mol above the ground state. An active K-rotor is
automatically included if there are no external rotational modes.
"""
cython.declare(rho=numpy.ndarray, i=cython.int, E=cython.double)
rho = numpy.zeros_like(Elist)
# Active K-rotor
rotors = [mode for mode in self.modes if isinstance(mode, RigidRotor)]
if len(rotors) == 0:
rho0 = numpy.zeros_like(Elist)
for i, E in enumerate(Elist):
if E > 0: rho0[i] = 1.0 / math.sqrt(1.0 * E)
rho = convolve(rho, rho0, Elist)
# Other non-vibrational modes
for mode in self.modes:
if not isinstance(mode, HarmonicOscillator):
rho = convolve(rho, mode.getDensityOfStates(Elist), Elist)
# Vibrational modes
for mode in self.modes:
if isinstance(mode, HarmonicOscillator):
rho = mode.getDensityOfStates(Elist, rho)
return rho * self.spinMultiplicity
def getSumOfStates(self, Elist):
"""
Return the value of the sum of states at the specified energies `Elist`
in J/mol above the ground state. The sum of states is computed via
numerical integration of the density of states.
"""
cython.declare(densStates=numpy.ndarray, sumStates=numpy.ndarray, i=cython.int, dE=cython.double)
densStates = self.getDensityOfStates(Elist)
sumStates = numpy.zeros_like(densStates)
dE = Elist[1] - Elist[0]
for i in range(len(densStates)):
sumStates[i] = numpy.sum(densStates[0:i]) * dE
return sumStates
def getPartitionFunctions(self, Tlist):
return numpy.array([self.getPartitionFunction(T) for T in Tlist], numpy.float64)
def getHeatCapacities(self, Tlist):
return numpy.array([self.getHeatCapacity(T) for T in Tlist], numpy.float64)
def getEnthalpies(self, Tlist):
return numpy.array([self.getEnthalpy(T) for T in Tlist], numpy.float64)
def getEntropies(self, Tlist):
return numpy.array([self.getEntropy(T) for T in Tlist], numpy.float64)
def __phi(self, beta, E):
beta = float(beta)
cython.declare(T=numpy.ndarray, Q=cython.double)
Q = self.getPartitionFunction(1.0 / (constants.R * beta))
return math.log(Q) + beta * float(E)
def getDensityOfStatesILT(self, Elist, order=1):
"""
Return the value of the density of states in mol/J at the specified
energies `Elist` in J/mol above the ground state, calculated by
numerical inverse Laplace transform of the partition function using
the method of steepest descents. This method is generally slower than
direct density of states calculation, but is guaranteed to correspond
with the partition function. The optional `order` attribute controls
the order of the steepest descents approximation applied (1 = first,
2 = second); the first-order approximation is slightly less accurate,
smoother, and faster to calculate than the second-order approximation.
This method is adapted from the discussion in Forst [Forst2003]_.
.. [Forst2003] W. Forst.
*Unimolecular Reactions: A Concise Introduction.*
Cambridge University Press (2003).
`isbn:978-0-52-152922-8 <http://www.cambridge.org/9780521529228>`_
"""
import scipy.optimize
cython.declare(rho=numpy.ndarray)
cython.declare(x=cython.double, E=cython.double, dx=cython.double, f=cython.double)
cython.declare(d2fdx2=cython.double, d3fdx3=cython.double, d4fdx4=cython.double)
rho = numpy.zeros_like(Elist)
# Initial guess for first minimization
x = 1e-5
# Iterate over energies
for i in range(1, len(Elist)):
E = Elist[i]
# Find minimum of phi func x0 arg xtol ftol maxi maxf fullout disp retall callback
x = scipy.optimize.fmin(self.__phi, x, [Elist[i]], 1e-8, 1e-8, 100, 1000, False, False, False, None)
x = float(x)
dx = 1e-4 * x
# Determine value of density of states using steepest descents approximation
d2fdx2 = (self.__phi(x+dx, E) - 2 * self.__phi(x, E) + self.__phi(x-dx, E)) / (dx**2)
# Apply first-order steepest descents approximation (accurate to 1-3%, smoother)
f = self.__phi(x, E)
rho[i] = math.exp(f) / math.sqrt(2 * math.pi * d2fdx2)
if order == 2:
# Apply second-order steepest descents approximation (more accurate, less smooth)
d3fdx3 = (self.__phi(x+1.5*dx, E) - 3 * self.__phi(x+0.5*dx, E) + 3 * self.__phi(x-0.5*dx, E) - self.__phi(x-1.5*dx, E)) / (dx**3)
d4fdx4 = (self.__phi(x+2*dx, E) - 4 * self.__phi(x+dx, E) + 6 * self.__phi(x, E) - 4 * self.__phi(x-dx, E) + self.__phi(x-2*dx, E)) / (dx**4)
rho[i] *= 1 + d4fdx4 / 8 / (d2fdx2**2) - 5 * (d3fdx3**2) / 24 / (d2fdx2**3)
return rho
def convolve(rho1, rho2, Elist):
"""
Convolutes two density of states arrays `rho1` and `rho2` with corresponding
energies `Elist` together using the equation
.. math:: \\rho(E) = \\int_0^E \\rho_1(x) \\rho_2(E-x) \\, dx
The units of the parameters do not matter so long as they are consistent.
"""
cython.declare(rho=numpy.ndarray, found1=cython.bint, found2=cython.bint)
cython.declare(dE=cython.double, nE=cython.int, i=cython.int, j=cython.int)
rho = numpy.zeros_like(Elist)
found1 = rho1.any(); found2 = rho2.any()
if not found1 and not found2:
pass
elif found1 and not found2:
rho = rho1
elif not found1 and found2:
rho = rho2
else:
dE = Elist[1] - Elist[0]
nE = len(Elist)
for i in range(nE):
for j in range(i+1):
rho[i] += rho2[i-j] * rho1[i] * dE
return rho
|
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for Juniper SRX acl rendering module."""
import copy
import datetime
import unittest
from lib import aclgenerator
from lib import junipersrx
from lib import nacaddr
from lib import naming
from lib import policy
import mox
GOOD_HEADER = """
header {
comment:: "This is a test acl with a comment"
target:: srx from-zone trust to-zone untrust
}
"""
GOOD_HEADER_2 = """
header {
comment:: "This is a header from untrust to trust"
target:: srx from-zone untrust to-zone trust
}
"""
GOOD_HEADER_3 = """
header {
comment:: "This is a test acl with a comment"
target:: srx from-zone trust to-zone untrust inet
}
"""
GOOD_HEADER_4 = """
header {
comment:: "This is a test acl with a comment"
target:: srx from-zone trust to-zone untrust inet6
}
"""
GOOD_HEADER_5 = """
header {
target:: srx from-zone trust to-zone untrust inet
apply-groups:: tcp-test1 tcp-test2
}
"""
GOOD_HEADER_6 = """
header {
target:: srx from-zone trust to-zone untrust inet
apply-groups-except:: tcp-test1 tcp-test2
}
"""
GOOD_HEADER_7 = """
header {
comment:: "This is a test acl with a comment"
target:: srx from-zone trust to-zone untrust address-book-zone inet
}
"""
GOOD_HEADER_8 = """
header {
comment:: "This is a test acl with a comment"
target:: srx from-zone trust to-zone untrust address-book-zone inet6
}
"""
GOOD_HEADER_9 = """
header {
comment:: "This is a test acl with a comment"
target:: srx from-zone trust to-zone untrust address-book-zone
}
"""
GOOD_HEADER_10 = """
header {
comment:: "This is a test acl with a global policy"
target:: srx from-zone all to-zone all address-book-global
}
"""
GOOD_HEADER_11 = """
header {
comment:: "This is a test acl with a comment"
target:: srx from-zone trust to-zone dmz
}
"""
BAD_HEADER = """
header {
target:: srx something
}
"""
BAD_HEADER_1 = """
header {
comment:: "This header has two address families"
target:: srx from-zone trust to-zone untrust inet6 mixed
}
"""
BAD_HEADER_2 = """
header {
comment:: "This header has two address-book-types"
target:: srx from-zone trust to-zone untrust address-book-zone address-book-zone
}
"""
BAD_HEADER_3 = """
header {
comment:: "This is a test acl with a global policy"
target:: srx from-zone all to-zone all address-book-zone
}
"""
BAD_HEADER_4 = """
header {
comment:: "This is a test acl with a global policy"
target:: srx from-zone test to-zone all
}
"""
GOOD_TERM_1 = """
term good-term-1 {
comment:: "This header is very very very very very very very very very very very very very very very very very very very very large"
destination-address:: SOME_HOST
destination-port:: SMTP
protocol:: tcp
action:: accept
}
"""
GOOD_TERM_2 = """
term good-term-2 {
destination-address:: SOME_HOST
destination-port:: SMTP
protocol:: tcp
action:: accept
}
"""
GOOD_TERM_3 = """
term good-term-3 {
destination-address:: SOME_HOST
protocol:: tcp
action:: accept
vpn:: good-vpn-3
}
"""
GOOD_TERM_4 = """
term good-term-4 {
destination-address:: SOME_HOST
protocol:: tcp
action:: accept
vpn:: good-vpn-4 policy-4
}
"""
GOOD_TERM_5 = '''
term good-term-5 {
action:: accept
logging:: log-both
}
'''
GOOD_TERM_10 = """
term good-term-10 {
destination-address:: SOME_HOST
action:: accept
dscp-set:: b111000
}
"""
GOOD_TERM_11 = """
term good-term-11 {
destination-address:: SOME_HOST
action:: accept
dscp-set:: af42
dscp-match:: af41-af42 5
dscp-except:: be
}
"""
GOOD_TERM_12 = """
term dup-of-term-1 {
destination-address:: FOOBAR
destination-port:: SMTP
protocol:: tcp
action:: accept
}
"""
GOOD_TERM_13 = """
term dup-of-term-1 {
destination-address:: FOOBAR SOME_HOST
destination-port:: SMTP
protocol:: tcp
action:: accept
}
"""
GOOD_TERM_14 = """
term term_to_split {
source-address:: FOOBAR
destination-address:: SOME_HOST
destination-port:: SMTP
protocol:: tcp
action:: accept
}
"""
BAD_TERM_1 = """
term bad-term-1 {
destination-address:: SOME_HOST
protocol:: tcp
action:: deny
vpn:: good-vpn-4 policy-4
}
"""
EXPIRED_TERM_1 = """
term expired_test {
expiration:: 2000-1-1
action:: deny
}
"""
EXPIRING_TERM = """
term is_expiring {
expiration:: %s
action:: accept
}
"""
ICMP_TYPE_TERM_1 = """
term test-icmp {
protocol:: icmp
icmp-type:: echo-request echo-reply
action:: accept
}
"""
IPV6_ICMP_TERM = """
term test-ipv6_icmp {
protocol:: icmpv6
action:: accept
}
"""
BAD_ICMP_TERM_1 = """
term test-icmp {
icmp-type:: echo-request echo-reply
action:: accept
}
"""
ICMP_ONLY_TERM_1 = """
term test-icmp {
protocol:: icmp
action:: accept
}
"""
OWNER_TERM = """
term owner-test {
owner:: [email protected]
action:: accept
}
"""
MULTIPLE_PROTOCOLS_TERM = """
term multi-proto {
protocol:: tcp udp icmp
action:: accept
}
"""
DEFAULT_TERM_1 = """
term default-term-1 {
action:: deny
}
"""
TIMEOUT_TERM = """
term timeout-term {
protocol:: icmp
icmp-type:: echo-request
timeout:: 77
action:: accept
}
"""
# Print a info message when a term is set to expire in that many weeks.
# This is normally passed from command line.
EXP_INFO = 2
_IPSET = [nacaddr.IP('10.0.0.0/8'),
nacaddr.IP('2001:4860:8000::/33')]
_IPSET2 = [nacaddr.IP('10.23.0.0/22'), nacaddr.IP('10.23.0.6/23')]
_IPSET3 = [nacaddr.IP('10.23.0.0/23')]
class JuniperSRXTest(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
self.naming = self.mox.CreateMock(naming.Naming)
def tearDown(self):
self.mox.VerifyAll()
self.mox.ResetAll()
def testHeaderComment(self):
pol = policy.ParsePolicy(GOOD_HEADER + ICMP_TYPE_TERM_1, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('This is a test acl with a comment' in output, output)
def testHeaderApplyGroups(self):
pol = policy.ParsePolicy(GOOD_HEADER_5 + ICMP_TYPE_TERM_1, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('apply-groups [ tcp-test1 tcp-test2 ]' in output,
output)
def testHeaderApplyGroupsExcept(self):
pol = policy.ParsePolicy(GOOD_HEADER_6 + ICMP_TYPE_TERM_1, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('apply-groups-except [ tcp-test1 tcp-test2 ]' in output,
output)
def testLongComment(self):
expected_output = """
/*
This header is very very very very very very very very very very
very very very very very very very very very very large
*/"""
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1,
self.naming), EXP_INFO)
output = str(srx)
self.failUnless(expected_output in output, output)
def testTermAndFilterName(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1,
self.naming), EXP_INFO)
output = str(srx)
self.failUnless('policy good-term-1 {' in output, output)
def testVpnWithoutPolicy(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.mox.ReplayAll()
srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_3,
self.naming), EXP_INFO)
output = str(srx)
self.failUnless('ipsec-vpn good-vpn-3;' in output, output)
def testVpnWithPolicy(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.mox.ReplayAll()
srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_4,
self.naming), EXP_INFO)
output = str(srx)
self.failUnless('ipsec-vpn good-vpn-4;' in output, output)
self.failUnless('pair-policy policy-4;' in output, output)
def testVpnWithDrop(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.mox.ReplayAll()
srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + BAD_TERM_1,
self.naming), EXP_INFO)
output = str(srx)
self.failUnless('ipsec-vpn good-vpn-4;' not in output, output)
self.failUnless('pair-policy policy-4;' not in output, output)
def testDefaultDeny(self):
self.mox.ReplayAll()
srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + DEFAULT_TERM_1,
self.naming), EXP_INFO)
output = str(srx)
self.failUnless('deny;' in output, output)
def testIcmpTypes(self):
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER + ICMP_TYPE_TERM_1, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('application test-icmp-app;' in output, output)
self.failUnless('application test-icmp-app {' in output, output)
self.failUnless('term t1 protocol icmp icmp-type 0 inactivity-timeout 60'
in output, output)
self.failUnless('term t2 protocol icmp icmp-type 8 inactivity-timeout 60'
in output, output)
def testLoggingBoth(self):
self.mox.ReplayAll()
srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_5,
self.naming), EXP_INFO)
output = str(srx)
self.failUnless('session-init;' in output, output)
self.failUnless('session-close;' in output, output)
def testOwnerTerm(self):
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER + OWNER_TERM, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless(' /*\n'
' Owner: [email protected]\n'
' */' in output, output)
def testBadICMP(self):
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER + BAD_ICMP_TERM_1, self.naming)
self.assertRaises(aclgenerator.UnsupportedFilterError,
junipersrx.JuniperSRX, pol, EXP_INFO)
def testICMPProtocolOnly(self):
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER + ICMP_ONLY_TERM_1, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('protocol icmp;' in output, output)
def testMultipleProtocolGrouping(self):
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER + MULTIPLE_PROTOCOLS_TERM, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('application-set multi-proto-app {' in output, output)
self.failUnless('application multi-proto-app1;' in output, output)
self.failUnless('application multi-proto-app2;' in output, output)
self.failUnless('application multi-proto-app3;' in output, output)
self.failUnless('application multi-proto-app1 {' in output, output)
self.failUnless('term t1 protocol tcp;' in output, output)
self.failUnless('application multi-proto-app2 {' in output, output)
self.failUnless('term t2 protocol udp;' in output, output)
self.failUnless('application multi-proto-app3 {' in output, output)
self.failUnless('term t3 protocol icmp;' in output, output)
def testGlobalPolicyHeader(self):
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER_10 + MULTIPLE_PROTOCOLS_TERM,
self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.assertEqual(output.count('global {'), 2)
self.assertFalse('from-zone all to-zone all {' in output)
def testBadGlobalPolicyHeaderZoneBook(self):
self.mox.ReplayAll()
pol = policy.ParsePolicy(BAD_HEADER_3 + MULTIPLE_PROTOCOLS_TERM,
self.naming)
self.assertRaises(junipersrx.UnsupportedFilterError, junipersrx.JuniperSRX,
pol, EXP_INFO)
def testBadGlobalPolicyHeaderNameAll(self):
self.mox.ReplayAll()
pol = policy.ParsePolicy(BAD_HEADER_4 + MULTIPLE_PROTOCOLS_TERM,
self.naming)
self.assertRaises(junipersrx.UnsupportedFilterError, junipersrx.JuniperSRX,
pol, EXP_INFO)
def testBadHeaderType(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(BAD_HEADER + GOOD_TERM_1, self.naming)
self.assertRaises(junipersrx.UnsupportedFilterError, junipersrx.JuniperSRX,
pol, EXP_INFO)
def testBadHeaderMultiAF(self):
# test for multiple address faimilies in header
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(BAD_HEADER_1 + GOOD_TERM_1, self.naming)
self.assertRaises(junipersrx.ConflictingTargetOptions,
junipersrx.JuniperSRX,
pol, EXP_INFO)
def testBadHeaderMultiAB(self):
# test for multiple address-book-types in header
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(BAD_HEADER_2 + GOOD_TERM_1, self.naming)
self.assertRaises(junipersrx.ConflictingTargetOptions,
junipersrx.JuniperSRX,
pol, EXP_INFO)
def testExpiredTerm(self):
self.mox.StubOutWithMock(junipersrx.logging, 'warn')
# create mock to ensure we warn about expired terms being skipped
junipersrx.logging.warn('WARNING: Term %s in policy %s>%s is expired.',
'expired_test', 'trust', 'untrust')
self.mox.ReplayAll()
_ = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + EXPIRED_TERM_1,
self.naming), EXP_INFO)
def testExpiringTerm(self):
self.mox.StubOutWithMock(junipersrx.logging, 'info')
# create mock to ensure we inform about expiring terms
junipersrx.logging.info('INFO: Term %s in policy %s>%s expires in '
'less than two weeks.', 'is_expiring',
'trust', 'untrust')
self.mox.ReplayAll()
exp_date = datetime.date.today() + datetime.timedelta(weeks=EXP_INFO)
_ = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + EXPIRING_TERM %
exp_date.strftime('%Y-%m-%d'),
self.naming), EXP_INFO)
def testTimeout(self):
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER + TIMEOUT_TERM, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('timeout 77' in output, output)
def testIcmpV6(self):
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER + IPV6_ICMP_TERM, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('protocol icmp6' in output, output)
def testReplaceStatement(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('replace: address-book' in output, output)
self.failUnless('replace: policies' in output, output)
self.failUnless('replace: applications' in output, output)
def testAdressBookBothAFs(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('replace: address-book {' in output, output)
self.failUnless('global {' in output, output)
self.failUnless('2001:4860:8000::/33' in output, output)
self.failUnless('10.0.0.0/8' in output, output)
def testAdressBookIPv4(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_1, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('replace: address-book {' in output, output)
self.failUnless('global {' in output, output)
self.failUnless('2001:4860:8000::/33' not in output, output)
self.failUnless('10.0.0.0/8' in output, output)
def testAdressBookIPv6(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER_4 + GOOD_TERM_1, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('replace: address-book {' in output, output)
self.failUnless('global {' in output, output)
self.failUnless('2001:4860:8000::/33' in output, output)
self.failUnless('10.0.0.0/8' not in output, output)
def testAddressBookContainsSmallerPrefix(self):
_IPSET2[0].parent_token = 'FOOBAR'
_IPSET2[1].parent_token = 'SOME_HOST'
_IPSET3[0].parent_token = 'FOOBAR'
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET2)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.naming.GetNetAddr('FOOBAR').AndReturn(_IPSET3)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1 + GOOD_HEADER_2 +
GOOD_TERM_12, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('address FOOBAR_0 10.23.0.0/22;' in output, output)
def testAddressBookContainsLargerPrefix(self):
_IPSET2[0].parent_token = 'FOOBAR'
_IPSET2[1].parent_token = 'SOME_HOST'
_IPSET3[0].parent_token = 'FOOBAR'
self.naming.GetNetAddr('FOOBAR').AndReturn(_IPSET3)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET2)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_12 + GOOD_HEADER +
GOOD_TERM_1, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('address FOOBAR_0 10.23.0.0/22;' in output, output)
def testZoneAdressBookBothAFs(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER_9 + GOOD_TERM_1, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('security-zone untrust {' in output, output)
self.failUnless('replace: address-book {' in output, output)
self.failUnless('2001:4860:8000::/33' in output, output)
self.failUnless('10.0.0.0/8' in output, output)
def testZoneAdressBookIPv4(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER_7 + GOOD_TERM_1, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('security-zone untrust {' in output, output)
self.failUnless('replace: address-book {' in output, output)
self.failUnless('2001:4860:8000::/33' not in output, output)
self.failUnless('10.0.0.0/8' in output, output)
def testZoneAdressBookIPv6(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER_8 + GOOD_TERM_1, self.naming)
output = str(junipersrx.JuniperSRX(pol, EXP_INFO))
self.failUnless('security-zone untrust {' in output, output)
self.failUnless('replace: address-book {' in output, output)
self.failUnless('2001:4860:8000::/33' in output, output)
self.failUnless('10.0.0.0/8' not in output, output)
def _FailIfUnorderedAddressBook(self, address_book):
# This is very naive check that expects addresses to be exact as returned
# from _OutOfOrderAddresses method. If you modify one please modify this one
# as well.
for line in address_book:
if '10.0.0.0/8' in line:
self.fail('Addresses in address book are out of order.')
elif '1.0.0.0/8' in line:
break
def _OutOfOrderAddresses(self):
x = nacaddr.IP('10.0.0.0/8')
x.parent_token = 'test'
y = nacaddr.IP('1.0.0.0/8')
y.parent_token = 'out_of_order'
return x, y
def testAddressBookOrderingSuccess(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn(self._OutOfOrderAddresses())
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_2, self.naming)
p = junipersrx.JuniperSRX(pol, EXP_INFO)
self._FailIfUnorderedAddressBook(p._GenerateAddressBook())
def testAddressBookOrderingAlreadyOrdered(self):
y, x = self._OutOfOrderAddresses()
self.naming.GetNetAddr('SOME_HOST').AndReturn([x, y])
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_2, self.naming)
p = junipersrx.JuniperSRX(pol, EXP_INFO)
self._FailIfUnorderedAddressBook(p._GenerateAddressBook())
def _AssertOrder(self, strings, expected_order):
order = copy.copy(expected_order)
matcher = order.pop(0)
for line in strings:
if matcher in line:
if not order:
return
matcher = order.pop(0)
self.fail('Strings weren\'t in expected order.\nExpected:\n %s\n\nGot:\n%s'
% ('\n '.join(expected_order), '\n'.join(strings)))
def testApplicationsOrderingSuccess(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_2 + GOOD_TERM_1,
self.naming)
p = junipersrx.JuniperSRX(pol, EXP_INFO)
self._AssertOrder(p._GenerateApplications(),
['application good-term-1-app1',
'application good-term-2-app1',
'application-set good-term-1-app',
'application-set good-term-2-app'])
def testApplicationsOrderingAlreadyOrdered(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER_3 + GOOD_TERM_1 + GOOD_TERM_2,
self.naming)
p = junipersrx.JuniperSRX(pol, EXP_INFO)
self._AssertOrder(p._GenerateApplications(),
['application good-term-1-app1',
'application good-term-2-app1',
'application-set good-term-1-app',
'application-set good-term-2-app'])
def testDscpWithByte(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn([nacaddr.IP('10.0.0.0/8')])
self.mox.ReplayAll()
srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_10,
self.naming), EXP_INFO)
output = str(srx)
self.failUnless('dscp b111000;' in output, output)
def testDscpWithClass(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn([nacaddr.IP('10.0.0.0/8')])
self.mox.ReplayAll()
srx = junipersrx.JuniperSRX(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_11,
self.naming), EXP_INFO)
output = str(srx)
self.failUnless('dscp af42;' in output, output)
self.failUnless('dscp [ af41-af42 5 ];' in output, output)
self.failUnless('dscp-except [ be ];' in output, output)
def testLargeTermSplitting(self):
ips = list(nacaddr.IP('10.0.8.0/21').iter_subnets(new_prefix=32))
mo_ips = []
counter = 0
for ip in ips:
if counter%2 == 0:
mo_ips.append(nacaddr.IP(ip))
counter += 1
self.naming.GetNetAddr('FOOBAR').AndReturn(mo_ips)
ips = list(nacaddr.IP('10.0.0.0/21').iter_subnets(new_prefix=32))
prodcolos_ips = []
counter = 0
for ip in ips:
if counter%2 == 0:
prodcolos_ips.append(nacaddr.IP(ip))
counter += 1
self.naming.GetNetAddr('SOME_HOST').AndReturn(prodcolos_ips)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_14, self.naming)
srx = junipersrx.JuniperSRX(pol, EXP_INFO)
self.assertEqual(len(srx.policy.filters[0][1]), 4)
def testLargeTermSplittingV6(self):
ips = list(nacaddr.IP('2620:0:1000:3103:eca0:2c09:6b32:e000/119'
).iter_subnets(new_prefix=128))
mo_ips = []
counter = 0
for ip in ips:
if counter%2 == 0:
mo_ips.append(nacaddr.IP(ip))
counter += 1
self.naming.GetNetAddr('FOOBAR').AndReturn(mo_ips)
ips = list(nacaddr.IP('2720:0:1000:3103:eca0:2c09:6b32:e000/119'
).iter_subnets(new_prefix=128))
prodcolos_ips = []
counter = 0
for ip in ips:
if counter%2 == 0:
prodcolos_ips.append(nacaddr.IP(ip))
counter += 1
self.naming.GetNetAddr('SOME_HOST').AndReturn(prodcolos_ips)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_14, self.naming)
srx = junipersrx.JuniperSRX(pol, EXP_INFO)
self.assertEqual(len(srx.policy.filters[0][1]), 4)
def testDuplicateTermsInDifferentZones(self):
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['25'])
self.naming.GetNetAddr('SOME_HOST').AndReturn(_IPSET)
self.naming.GetServiceByProto('SMTP', 'tcp').AndReturn(['26'])
self.mox.ReplayAll()
pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_2 + GOOD_HEADER_11 +
GOOD_TERM_2, self.naming)
self.assertRaises(junipersrx.ConflictingApplicationSets,
junipersrx.JuniperSRX, pol, EXP_INFO)
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
"""
Identifies images with polarimetric calibration stars and measures the
photometry of those stars for each of the four unique polaroid filter rotation
angles. The photometric measurements are converted into polarimetric values,
which are stored in a .csv file for analysis in the next step.
"""
# Core imports
import os
import sys
import warnings
# Import scipy/numpy packages
import numpy as np
# Import astropy packages
from astropy.wcs import WCS
from astropy.table import Table, Column, hstack, join
import astropy.units as u
from astropy.coordinates import SkyCoord, FK4, FK5
from astropy.stats import sigma_clipped_stats
from photutils import (centroid_com, aperture_photometry, CircularAperture,
CircularAnnulus)
# Import plotting utilities
from matplotlib import pyplot as plt
# Add the AstroImage class
import astroimage as ai
# This script will compute the photometry of polarization standard stars
# and output a file containing the polarization position angle
# additive correction and the polarization efficiency of the PRISM instrument.
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# Define how the font will appear in the plots
font = {
'family': 'sans-serif',
'color': 'black',
'weight': 'normal',
'size': 14
}
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\PRISM_data\\pyPol_data\\201612'
# This is the name of the file in which the calibration constants will be stored
calDataFile = os.path.join(pyPol_data, 'calData.csv')
# The user needs to specify the "Target" values associated with
# calibration data in the fileIndex.
calibrationTargets = ['Taurus_Cal', 'Orion_Cal', 'Cyg_OB2']
calibrationTargets = [t.upper() for t in calibrationTargets]
# Define the saturation limit to use for whether or not to trust photometry
satLimit = 18e3
# Setup new directory for polarimetry data
polarimetryDir = os.path.join(pyPol_data, 'Polarimetry')
if (not os.path.isdir(polarimetryDir)):
raise ValueError('{} does not exist'.format(polAngDir))
polAngDir = os.path.join(polarimetryDir, 'polAngImgs')
if (not os.path.isdir(polAngDir)):
raise ValueError('{} does not exist'.format(polAngDir))
# Read in the indexFile data and select the filenames
print('\nReading file index from disk')
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
fileIndex = Table.read(indexFile, format='ascii.csv')
# Read in the polarization standards file
print('Reading polarization data from disk')
polStandardFile = os.path.join('polStandards.csv')
polStandards = Table.read(polStandardFile, format='ascii.csv')
# Construct SkyCoord object containing the coordinates of the standards
ra1 = polStandards['RA_1950'].data
dec1 = polStandards['Dec_1950'].data
polStandardCoords = SkyCoord(ra = ra1, dec = dec1,
unit = (u.hour, u.degree), frame = FK4(equinox='B1950'))
# Transform the coordinates to the FK5 - J2000 system.
polStandardCoords = polStandardCoords.transform_to(FK5(equinox='J2000'))
# Determine which parts of the fileIndex are usable
useFiles = (fileIndex['USE'] == 1)
# Further restrict the selection to only include the pre-selected calibration targets
targetFiles = np.array([False]*len(fileIndex), dtype=bool)
for target in calibrationTargets:
targetFiles = np.logical_or(targetFiles,
fileIndex['TARGET'] == target)
# Cull the fileIndex to ONLY include the specified calibration targets
calFiles = np.logical_and(useFiles, targetFiles)
if np.sum(calFiles) > 0:
fileInds = np.where(calFiles)
fileIndex = fileIndex[fileInds]
# Group the fileIndex by waveband
fileIndexByWaveband = fileIndex.group_by(['FILTER'])
# Loop through each waveband and compute the photometry and polarization
# values for each of the calibration stars within the calibrationTargets for that waveband.
for group in fileIndexByWaveband.groups:
# Grab the current group information
thisFilter = str(np.unique(group['FILTER'].data)[0])
# Define the polarization standard files
thisFilename = 'polStandardTable_{0}.csv'.format(thisFilter)
outTableFile = os.path.join(pyPol_data, thisFilename)
# Update the user on processing status
print('\nProcessing calibrationTargets for')
print('Filter : {0}'.format(thisFilter))
# Initalize the polCalTable variable to be empty. This was necessary
# because it is impossible to combine one astropyTable with another
# empty table. Instead, simply test if this variable has been populated.
polCalTable = None
# Loop through each subgroup and compute the photometry of the stars
# in those images.
indexBySubGroup = group.group_by(['GROUP_ID'])
subGroupKeys = indexBySubGroup.groups.keys
for iSubGroup, subGroup in enumerate(indexBySubGroup.groups):
# Update the user on processing status
thisTarget = str(np.unique(subGroup['TARGET'].data)[0])
thisSubGroup = str(np.unique(subGroup['OBJECT'].data)[0])
print('\tSubgroup : {0}'.format(thisSubGroup))
# Start by breaking the subGroup up into its constituent polAngs
indexByPolAng = subGroup.group_by(['POLPOS'])
# Initalize a dictionary for storing the respective images
subGroupImgDict = {}
# We are assuming that each subgroup is uniqouely named. In this
# case, it represents an independent measure of the polarization
# of a standard star. Thus, let's loop through the expected polAng
# files, read them in, and store them in a dictionary for later use.
# Initalize a boolean list to track which standards appear within
# the images of this subgroup
polStandardBool = np.ones(polStandardCoords.shape, dtype=bool)
#
# TODO: this can be replaced by a simple "get_sources_at_coords" call.
#
# Loop through each polAng subset of the subGroup and read in images
polAngGroupKeys = indexByPolAng.groups.keys
for polAngGroup in indexByPolAng.groups:
# Generate the expected file name and attempt to read it in
thisPolAng = np.unique(polAngGroup['POLPOS'])[0]
inputFile = '_'.join([thisTarget, thisSubGroup, str(thisPolAng)]) + '.fits'
inputPath = os.path.join(polAngDir, inputFile)
# Read in the image
polAngImg = ai.reduced.ReducedScience.read(inputPath)
# Determine which standards appear in this image
polStandardBool = np.logical_and(polStandardBool,
polAngImg.in_image(polStandardCoords, edge=100))
# Store this image in the dictionary
subGroupImgDict[thisPolAng] = polAngImg
# Now that all the polAng images are stored in a dictionary, let's
# double check that AT LEAST ONE of the standards appear in all four
# polAng images.
if np.sum(polStandardBool) < 1:
errStr = '''
It would seem that none of the entries in the standard
catalog appear in these images. Either...
1) You need to add entries into your polarization standard catalog
OR
2) These images don't actually contain any polarization standard stars
If it is option (2), then make sure to eliminate this target from the
"calibrationTargets" variable near the top of this script.'''
raise ValueError(errStr)
# At least one polarization standard was found in all four
# images, so we can proceed to do polarimetry on that source.
# Start by grabbing the standard(s) which appear in these imgs
goodStandardInds = np.where(polStandardBool)
subGroupTable = polStandards[goodStandardInds]
# Quickly build a list of columns to keep in this table
keepKeys = ['_'.join([prefix, thisFilter])
for prefix in ['P', 'sP', 'PA', 'sPA']]
keepKeys.extend(['Name', 'RA_1950', 'Dec_1950'])
# Remove all the unnecessary columns from this table and Loop
# through each row, performing polarimetry on the stars....
subGroupTable.keep_columns(keepKeys)
# Grab the original RAs and Decs
skyCoord1 = SkyCoord(
ra=subGroupTable['RA_1950'],
dec=subGroupTable['Dec_1950'],
unit=(u.hour, u.deg),
frame=FK4(equinox='B1950')
)
# Convert to J2000 coordinates
skyCoord1 = skyCoord1.transform_to(FK5(equinox='J2000'))
# Initalize an empty dictionary for storing the photometry for each
# polaroid rotation angle image.
polAngPhotDict = {}
# Initalize a boolean to track which stars pass photometry
goodStars = True
# Loop through each polaroid rotation angle image and perform the
# photometry on the calibrator stars in that field.
for polAng, polAngImg in subGroupImgDict.items():
# Find the stears at the calibrator coordinates
xStars, yStars = polAngImg.get_sources_at_coords(
skyCoord1,
satLimit=satLimit
)
# Check which stars were successfully located
goodStars = np.logical_and(goodStars, np.isfinite(xStars))
# Cull the list of star positinos to include only properly.
goodInds = np.where(goodStars)
xStars = xStars[goodInds]
yStars = yStars[goodInds]
# Extend the badStars index list to include any bad stars found
badStars = np.logical_not(goodStars)
badInds = np.where(badStars)
# Do the photometry for this set of standards
# Create a PhotometryAnalyzer object for this image
photAnalyzer = ai.utilitywrappers.PhotometryAnalyzer(polAngImg)
# Perform the actual stellar photometry (no curves of growth)
# Use a static aperture for now!
flux, uncertainty = photAnalyzer.aperture_photometry(
xStars, yStars, 18, 24, 28
)
# Re-insert null photometry measurements for the culled stars
flux = np.insert(flux, badInds[0], np.NaN)
uncertainty = np.insert(uncertainty, badInds[0], np.NaN)
# Store the measured photometry in the dictionary for this subGroup
polAngPhotDict[polAng] = {
'flux': flux,
's_flux': uncertainty
}
# # Do some debugging
# print(flux)
# plt.ion()
# polAngImg.clear_astrometry()
# polAngImg.show()
# plt.autoscale(False)
# plt.scatter(xStars, yStars, s=50, facecolor='none', edgecolor='red')
#
# import pdb; pdb.set_trace()
# If the all of the photometry measurements can be trusted,
# then continue to estimate the polarization of this source.
# ********** STOKES Q **********
A = (polAngPhotDict[0]['flux'] - polAngPhotDict[400]['flux'])
B = (polAngPhotDict[0]['flux'] + polAngPhotDict[400]['flux'])
Q = A/B
# Compute the uncertainty in that Stokes U quantity
s_AB = np.sqrt(polAngPhotDict[0]['s_flux']**2 +
polAngPhotDict[400]['s_flux']**2)
s_Q = np.abs(s_AB/B)*np.sqrt(1.0 + Q**2)
# ********** STOKES U **********
A = (polAngPhotDict[200]['flux'] - polAngPhotDict[600]['flux'])
B = (polAngPhotDict[200]['flux'] + polAngPhotDict[600]['flux'])
U = A/B
# Compute the uncertainty in that Stokes U quantity
s_AB = np.sqrt(polAngPhotDict[200]['s_flux']**2 +
polAngPhotDict[600]['s_flux']**2)
s_U = np.abs(s_AB/B)*np.sqrt(1.0 + U**2)
# ********** POLARIZATION PERCENTAGE **********
P = np.sqrt(U**2 + Q**2)
s_P = np.sqrt((U*s_U)**2 + (Q*s_Q)**2)/P
# ...and de-bias the polarization measurements
# TODO: ask Dan if I should be debiasing the standard star
# calibration measurements.
nullStarInds = np.where(P/s_P <= 1)
P[nullStarInds] = s_P[nullStarInds]
P = np.sqrt(P**2 - s_P**2)
# ********** POLARIZATION POSITION ANGLE **********
PA = np.rad2deg(0.5*np.arctan2(U, Q))
# lazy way (assumes sigQ ~= sigU)
# sigPA = 0.5*rad2deg*(sigP/P)
# Real way (uses actual sigQ and sigU)
# Canonical treatment is to use 0.5*(sigQ + sigU) as estimate of
# sigP....
# TODO: I should really just update this to include the formula I
# use in the M82 paper.
s_PA = 0.5*np.rad2deg(np.sqrt((U*s_Q)**2 + (Q*s_U)**2)/P**2)
# TODO Double check that this matches the formula in PEGS_pol
# I think that PEGS pol is actually MISSING a factor of P
# in the denominator.
# Scale up polarization values to percentages
P *= 100.0
s_P *= 100.0
# Check that the polarization is reasonable
# (e.g. R-band, 20150119, HD38563A is problematic)
badPols = P > 10
if np.sum(badPols) > 0:
warnings.warn("Culling anomalously high polarization observation")
# Find the indices of the good polarization measurements
goodPols = np.logical_not(badPols)
goodInds = np.where(goodPols)
# Cull the important data to include include the good measurements
subGroupTable = subGroupTable[goodInds]
P = P[goodInds]
s_P = s_P[goodInds]
PA = PA[goodInds]
s_PA = s_PA[goodInds]
# Construct a temporary table to hold the results of this subgroup
columnSuffix = thisFilter + str(iSubGroup + 1)
columnNames = [
'Name',
'P_' + columnSuffix,
'sP_' + columnSuffix,
'PA_' + columnSuffix,
'sPA_' + columnSuffix
]
# Create the table ojbect
subGroupPolTable = Table(
[subGroupTable['Name'], P, s_P, PA, s_PA],
names=columnNames
)
# Join the temporary polarization table to the subGroupTable
subGroupPolTable = join(
subGroupTable,
subGroupPolTable,
join_type='left'
)
if polCalTable is None:
polCalTable = subGroupPolTable.copy()
else:
# Now join this table to the master polCalTable instance using an
# 'outer' join_type
polCalTable = join(
polCalTable,
subGroupPolTable,
join_type='outer'
)
# Now that all of the calibration data has been generated, save to disk
polCalTable.write(outTableFile, overwrite=True)
# Debugging plots code
# plt.ion()
# for phot, uncert in zip(photometry.T, uncertainty.T):
# plt.errorbar(aprs, phot, yerr=uncert, fmt='--o')
#
# import pdb; pdb.set_trace()
# continue
# for iStandard, standard in enumerate(subGroupTable):
# # Grab the name of this standard
# thisStandard = standard['Name']
# print('\t\tStandard : {0}'.format(thisStandard))
#
# # Loop through each polAng image, test for saturation,
# # measure star width, and perform aperture photometry.
# polAngPhotDict = {}
# for polAng, polAngImg in subGroupImgDict.items():
# # Update the user on processing status
# print('\t\t\tPolaroid Angle : {0}'.format(str(polAng)))
#
# # Find the expectedstar coordinates in this image using
# # the WCS in the header
# skyCoord1 = SkyCoord(ra=standard['RA_1950'], dec=standard['Dec_1950'],
# unit=(u.hour, u.deg), frame=FK4(equinox='B1950'))
# skyCoord1 = skyCoord1.transform_to(FK5(equinox='J2000'))
# x1, y1 = polAngImg.wcs.all_world2pix(
# skyCoord1.ra,
# skyCoord1.dec,
# 0
# )
#
# # Cut out a small subarray around the predicted position
# lf, rt = np.int(np.round(x1 - 20)), np.int(np.round(x1 + 20))
# bt, tp = np.int(np.round(y1 - 20)), np.int(np.round(y1 + 20))
# tmpArr = polAngImg.data[bt:tp, lf:rt]
#
# # Test if this star appears to be saturated
# if tmpArr.max() > satLimit:
# # If it is saturated, then make the "saturatedStar"
# # variable "True" so that we will know NOT to use
# # this standard star later and break out of the loop.
# print('\t\t\tStar is saturated!')
# saturatedStar = True
# break
#
# # Use a centroid function to get a more precise position
# x1, y1 = (centroid_com(tmpArr) + np.array([lf, bt]))
#
# from photutils import data_properties, properties_table
# # Measure star width properties
# columns = ['id', 'xcentroid', 'ycentroid', 'semimajor_axis_sigma', 'semiminor_axis_sigma', 'orientation']
# props = data_properties(tmpArr)
# tbl = properties_table(props, columns=columns)
#
# # Compute the axis ratio and test if it's any good
# semimajor = tbl['semimajor_axis_sigma'].data[0]
# semiminor = tbl['semiminor_axis_sigma'].data[0]
# axisRatio = semimajor/semiminor
# if axisRatio > 1.3:
# print('\t\t\tStar is too oblate!')
# print('\t\t\ta/b = {0}'.format(axisRatio))
# oblateStar = True
# break
#
# # If it is not too oblate, then compute an approximate
# # width using a geometric mean
# starWidth = np.sqrt(semimajor*semiminor)
#
# # Build a circular aperture and a sky annulus to
# # measure the star photometry
# # TODO: this is a MAJOR problem! I really should do a curve
# # of growth analysis and then compute the optimal SNR
# # aperture for each star individually.
#
# import pdb; pdb.set_trace()
#
# # Measure the photometry (flux not magnitudes) using the
# # polAngImg.sigma array as the source of uncertainties
# phot_table = aperture_photometry(polAngImg.arr,
# [starAperture, skyAperture],
# error=polAngImg.sigma)
#
# # Compute a mean background count rate within annulus
# skyArea = skyAperture.area()
# bkg_mean = phot_table['aperture_sum_1'].data[0] / skyArea
# sig_bkg_mean = phot_table['aperture_sum_err_1'].data[0] / skyArea
#
# # Compute the background contribution to the stellar flux
# starArea = starAperture.area()
# bkg_sum = bkg_mean * starArea
# sig_bkg_sum = sig_bkg_mean * starArea
#
# # Compute a final stellar flux
# final_flux = phot_table['aperture_sum_0'].data[0] - bkg_sum
# sig_final_flux = np.sqrt(phot_table['aperture_sum_err_0'].data[0]**2 +
# sig_bkg_sum)
#
# # Store the star photometry (and uncertainty) in the
# # polAngPhotDict under its polAng
# polAngPhotDict[polAng] = {
# 'flux': final_flux,
# 's_flux': sig_final_flux}
#
# else:
# # If the whole loop executed without any problems, then
# # it is safe to assume that photometry can be trusted.
# # Indicate this with the "starSaturated" boolean flag.
# saturatedStar = False
# oblateStar = False
#
# # Now that the photometry for this star has been
# # successfully measured, let's double check that the star
# # was not saturated or oblate.
# if saturatedStar:
# # print('\t\tAt least one photometry measurement was saturated!')
# # print('\t\tDo not compute the observed polarization.')
# continue
# if oblateStar:
# # print('\t\tAt least one star was too oblate.')
# # print('\t\tDo not compute the observed polarization.')
# continue
# # Join this subGroup calibration data to the overall table thus far
# if polCalTable is None:
# polCalTable = subGroupTable.copy()
# else:
# polCalTable = join(polCalTable, subGroupTable, join_type='outer')
print('Photometry of polarization calibration standards completed!')
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test behavior related to masked tables"""
import pytest
import numpy as np
import numpy.ma as ma
from astropy.table import Column, MaskedColumn, Table, QTable
from astropy.table.column import BaseColumn
from astropy.tests.helper import catch_warnings
from astropy.time import Time
import astropy.units as u
class SetupData:
def setup_method(self, method):
self.a = MaskedColumn(name='a', data=[1, 2, 3], fill_value=1)
self.b = MaskedColumn(name='b', data=[4, 5, 6], mask=True)
self.c = MaskedColumn(name='c', data=[7, 8, 9], mask=False)
self.d_mask = np.array([False, True, False])
self.d = MaskedColumn(name='d', data=[7, 8, 7], mask=self.d_mask)
self.t = Table([self.a, self.b], masked=True)
self.ca = Column(name='ca', data=[1, 2, 3])
self.sc = MaskedColumn(name='sc', data=[(1, 1.), (2, 2.), (3, 3.)],
dtype='i8,f8', fill_value=(0, -1.))
class TestPprint(SetupData):
def test_pformat(self):
assert self.t.pformat() == [' a b ', '--- ---', ' 1 --', ' 2 --', ' 3 --']
class TestFilled:
"""Test the filled method in MaskedColumn and Table"""
def setup_method(self, method):
mask = [True, False, False]
self.meta = {'a': 1, 'b': [2, 3]}
a = self.a = MaskedColumn(name='a', data=[1, 2, 3], fill_value=10, mask=mask, meta={'a': 1})
b = self.b = MaskedColumn(name='b', data=[4.0, 5.0, 6.0], fill_value=10.0, mask=mask)
c = self.c = MaskedColumn(name='c', data=['7', '8', '9'], fill_value='1', mask=mask)
def test_filled_column(self):
f = self.a.filled()
assert np.all(f == [10, 2, 3])
assert isinstance(f, Column)
assert not isinstance(f, MaskedColumn)
# Confirm copy, not ref
assert f.meta['a'] == 1
f.meta['a'] = 2
f[1] = 100
assert self.a[1] == 2
assert self.a.meta['a'] == 1
# Fill with arg fill_value not column fill_value
f = self.a.filled(20)
assert np.all(f == [20, 2, 3])
f = self.b.filled()
assert np.all(f == [10.0, 5.0, 6.0])
assert isinstance(f, Column)
f = self.c.filled()
assert np.all(f == ['1', '8', '9'])
assert isinstance(f, Column)
def test_filled_masked_table(self, tableclass):
t = tableclass([self.a, self.b, self.c], meta=self.meta)
f = t.filled()
assert isinstance(f, Table)
assert f.masked is False
assert np.all(f['a'] == [10, 2, 3])
assert np.allclose(f['b'], [10.0, 5.0, 6.0])
assert np.all(f['c'] == ['1', '8', '9'])
# Confirm copy, not ref
assert f.meta['b'] == [2, 3]
f.meta['b'][0] = 20
assert t.meta['b'] == [2, 3]
f['a'][2] = 100
assert t['a'][2] == 3
def test_filled_unmasked_table(self, tableclass):
t = tableclass([(1, 2), ('3', '4')], names=('a', 'b'), meta=self.meta)
f = t.filled()
assert isinstance(f, Table)
assert f.masked is False
assert np.all(f['a'] == t['a'])
assert np.all(f['b'] == t['b'])
# Confirm copy, not ref
assert f.meta['b'] == [2, 3]
f.meta['b'][0] = 20
assert t.meta['b'] == [2, 3]
f['a'][1] = 100
assert t['a'][1] == 2
class TestFillValue(SetupData):
"""Test setting and getting fill value in MaskedColumn and Table"""
def test_init_set_fill_value(self):
"""Check that setting fill_value in the MaskedColumn init works"""
assert self.a.fill_value == 1
c = MaskedColumn(name='c', data=['xxxx', 'yyyy'], fill_value='none')
assert c.fill_value == 'none'
def test_set_get_fill_value_for_bare_column(self):
"""Check set and get of fill value works for bare Column"""
self.d.fill_value = -999
assert self.d.fill_value == -999
assert np.all(self.d.filled() == [7, -999, 7])
def test_set_get_fill_value_for_str_column(self):
c = MaskedColumn(name='c', data=['xxxx', 'yyyy'], mask=[True, False])
# assert np.all(c.filled() == ['N/A', 'yyyy'])
c.fill_value = 'ABCDEF'
assert c.fill_value == 'ABCD' # string truncated to dtype length
assert np.all(c.filled() == ['ABCD', 'yyyy'])
assert np.all(c.filled('XY') == ['XY', 'yyyy'])
def test_set_get_fill_value_for_structured_column(self):
assert self.sc.fill_value == np.array((0, -1.), self.sc.dtype)
sc = self.sc.copy()
assert sc.fill_value.item() == (0, -1.)
sc.fill_value = (-1, np.inf)
assert sc.fill_value == np.array((-1, np.inf), self.sc.dtype)
sc2 = MaskedColumn(sc, fill_value=(-2, -np.inf))
assert sc2.fill_value == np.array((-2, -np.inf), sc2.dtype)
def test_table_column_mask_not_ref(self):
"""Table column mask is not ref of original column mask"""
self.b.fill_value = -999
assert self.t['b'].fill_value != -999
def test_set_get_fill_value_for_table_column(self):
"""Check set and get of fill value works for Column in a Table"""
self.t['b'].fill_value = 1
assert self.t['b'].fill_value == 1
assert np.all(self.t['b'].filled() == [1, 1, 1])
def test_data_attribute_fill_and_mask(self):
"""Check that .data attribute preserves fill_value and mask"""
self.t['b'].fill_value = 1
self.t['b'].mask = [True, False, True]
assert self.t['b'].data.fill_value == 1
assert np.all(self.t['b'].data.mask == [True, False, True])
class TestMaskedColumnInit(SetupData):
"""Initialization of a masked column"""
def test_set_mask_and_not_ref(self):
"""Check that mask gets set properly and that it is a copy, not ref"""
assert np.all(~self.a.mask)
assert np.all(self.b.mask)
assert np.all(~self.c.mask)
assert np.all(self.d.mask == self.d_mask)
self.d.mask[0] = True
assert not np.all(self.d.mask == self.d_mask)
def test_set_mask_from_list(self):
"""Set mask from a list"""
mask_list = [False, True, False]
a = MaskedColumn(name='a', data=[1, 2, 3], mask=mask_list)
assert np.all(a.mask == mask_list)
def test_override_existing_mask(self):
"""Override existing mask values"""
mask_list = [False, True, False]
b = MaskedColumn(name='b', data=self.b, mask=mask_list)
assert np.all(b.mask == mask_list)
def test_incomplete_mask_spec(self):
"""Incomplete mask specification raises MaskError"""
mask_list = [False, True]
with pytest.raises(ma.MaskError):
MaskedColumn(name='b', length=4, mask=mask_list)
class TestTableInit(SetupData):
"""Initializing a table"""
def test_initialization_with_all_columns(self):
t1 = Table([self.a, self.b, self.c, self.d, self.ca, self.sc])
assert t1.colnames == ['a', 'b', 'c', 'd', 'ca', 'sc']
# Check we get the same result by passing in as list of dict.
# (Regression test for error uncovered by scintillometry package.)
lofd = [{k: row[k] for k in t1.colnames} for row in t1]
t2 = Table(lofd)
for k in t1.colnames:
assert np.all(t1[k] == t2[k]) in (True, np.ma.masked)
assert np.all(getattr(t1[k], 'mask', False) ==
getattr(t2[k], 'mask', False))
# Filter warnings since these are set to lead to exceptions,
# which changes behaviour in Table._convert_data_to_col
# (causing conversion of columns with masked elements to object dtype).
@pytest.mark.filterwarnings('ignore:.*converting a masked element.*')
def test_initialization_with_all_columns(self):
t1 = Table([self.a, self.b, self.c, self.d, self.ca, self.sc])
assert t1.colnames == ['a', 'b', 'c', 'd', 'ca', 'sc']
# Check we get the same result by passing in as list of dict.
# (Regression test for error uncovered by scintillometry package.)
lofd = [{k: row[k] for k in t1.colnames} for row in t1]
t2 = Table(lofd)
for k in t1.colnames:
# TODO: the final dtype should not depend on the presence of
# masked elements, but unfortunately np.ma.MaskedArray does take
# it into account.
if k not in ('b', 'd'):
assert t1[k].dtype == t2[k].dtype
assert np.all(t1[k] == t2[k]) in (True, np.ma.masked)
assert np.all(getattr(t1[k], 'mask', False) ==
getattr(t2[k], 'mask', False))
def test_mask_false_if_input_mask_not_true(self):
"""Masking is always False if initial masked arg is not True"""
t = Table([self.ca, self.a])
assert t.masked is False # True before astropy 4.0
t = Table([self.ca])
assert t.masked is False
t = Table([self.ca, ma.array([1, 2, 3])])
assert t.masked is False # True before astropy 4.0
def test_mask_false_if_no_input_masked(self):
"""Masking not true if not (requested or input requires mask)"""
t0 = Table([[3, 4]], masked=False)
t1 = Table(t0, masked=True)
t2 = Table(t1, masked=False)
assert not t0.masked
assert t1.masked
assert not t2.masked
def test_mask_property(self):
t = self.t
# Access table mask (boolean structured array) by column name
assert np.all(t.mask['a'] == np.array([False, False, False]))
assert np.all(t.mask['b'] == np.array([True, True, True]))
# Check that setting mask from table mask has the desired effect on column
t.mask['b'] = np.array([False, True, False])
assert np.all(t['b'].mask == np.array([False, True, False]))
# Non-masked table returns None for mask attribute
t2 = Table([self.ca], masked=False)
assert t2.mask is None
# Set mask property globally and verify local correctness
for mask in (True, False):
t.mask = mask
for name in ('a', 'b'):
assert np.all(t[name].mask == mask)
class TestAddColumn:
def test_add_masked_column_to_masked_table(self):
t = Table(masked=True)
assert t.masked
t.add_column(MaskedColumn(name='a', data=[1, 2, 3], mask=[0, 1, 0]))
assert t.masked
t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1]))
assert t.masked
assert isinstance(t['a'], MaskedColumn)
assert isinstance(t['b'], MaskedColumn)
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_masked_column_to_non_masked_table(self):
t = Table(masked=False)
assert not t.masked
t.add_column(Column(name='a', data=[1, 2, 3]))
assert not t.masked
t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1]))
assert not t.masked # Changed in 4.0, table no longer auto-upgrades
assert isinstance(t['a'], Column) # Was MaskedColumn before 4.0
assert isinstance(t['b'], MaskedColumn)
assert np.all(t['a'] == np.array([1, 2, 3]))
assert not hasattr(t['a'], 'mask')
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_non_masked_column_to_masked_table(self):
t = Table(masked=True)
assert t.masked
t.add_column(Column(name='a', data=[1, 2, 3]))
assert t.masked
t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1]))
assert t.masked
assert isinstance(t['a'], MaskedColumn)
assert isinstance(t['b'], MaskedColumn)
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 0, 0], bool))
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_convert_to_masked_table_only_if_necessary(self):
# Do not convert to masked table, if new column has no masked value.
# See #1185 for details.
t = Table(masked=False)
assert not t.masked
t.add_column(Column(name='a', data=[1, 2, 3]))
assert not t.masked
t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[0, 0, 0]))
assert not t.masked
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['b'] == np.array([4, 5, 6]))
class TestRenameColumn:
def test_rename_masked_column(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1, 2, 3], mask=[0, 1, 0]))
t['a'].fill_value = 42
t.rename_column('a', 'b')
assert t.masked
assert np.all(t['b'] == np.array([1, 2, 3]))
assert np.all(t['b'].mask == np.array([0, 1, 0], bool))
assert t['b'].fill_value == 42
assert t.colnames == ['b']
class TestRemoveColumn:
def test_remove_masked_column(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1, 2, 3], mask=[0, 1, 0]))
t['a'].fill_value = 42
t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1]))
t.remove_column('b')
assert t.masked
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert t['a'].fill_value == 42
assert t.colnames == ['a']
class TestAddRow:
def test_add_masked_row_to_masked_table_iterable(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
t.add_row([2, 5], mask=[1, 0])
t.add_row([3, 6], mask=[0, 1])
assert t.masked
assert np.all(np.array(t['a']) == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert np.all(np.array(t['b']) == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping1(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
t.add_row({'b': 5, 'a': 2}, mask={'a': 1, 'b': 0})
t.add_row({'a': 3, 'b': 6}, mask={'b': 1, 'a': 0})
assert t.masked
assert np.all(np.array(t['a']) == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert np.all(np.array(t['b']) == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping2(self):
# When adding values to a masked table, if the mask is specified as a
# dict, then values not specified will have mask values set to True
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
t.add_row({'b': 5}, mask={'b': 0})
t.add_row({'a': 3}, mask={'a': 0})
assert t.masked
assert t['a'][0] == 1 and t['a'][2] == 3
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert t['b'][1] == 5
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping3(self):
# When adding values to a masked table, if mask is not passed to
# add_row, then the mask should be set to False if values are present
# and True if not.
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
t.add_row({'b': 5})
t.add_row({'a': 3})
assert t.masked
assert t['a'][0] == 1 and t['a'][2] == 3
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert t['b'][1] == 5
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping4(self):
# When adding values to a masked table, if the mask is specified as a
# dict, then keys in values should match keys in mask
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
with pytest.raises(ValueError) as exc:
t.add_row({'b': 5}, mask={'a': True})
assert exc.value.args[0] == 'keys in mask should match keys in vals'
def test_add_masked_row_to_masked_table_mismatch(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
with pytest.raises(TypeError) as exc:
t.add_row([2, 5], mask={'a': 1, 'b': 0})
assert exc.value.args[0] == "Mismatch between type of vals and mask"
with pytest.raises(TypeError) as exc:
t.add_row({'b': 5, 'a': 2}, mask=[1, 0])
assert exc.value.args[0] == "Mismatch between type of vals and mask"
def test_add_masked_row_to_non_masked_table_iterable(self):
t = Table(masked=False)
t['a'] = [1]
t['b'] = [4]
t['c'] = Time([1], format='cxcsec')
tm = Time(2, format='cxcsec')
assert not t.masked
t.add_row([2, 5, tm])
assert not t.masked
t.add_row([3, 6, tm], mask=[0, 1, 1])
assert not t.masked
assert type(t['a']) is Column
assert type(t['b']) is MaskedColumn
assert type(t['c']) is Time
assert np.all(t['a'] == [1, 2, 3])
assert np.all(t['b'].data == [4, 5, 6])
assert np.all(t['b'].mask == [False, False, True])
assert np.all(t['c'][:2] == Time([1, 2], format='cxcsec'))
assert np.all(t['c'].mask == [False, False, True])
def test_add_row_cannot_mask_column_raises_typeerror(self):
t = QTable()
t['a'] = [1, 2] * u.m
t.add_row((3 * u.m,)) # No problem
with pytest.raises(ValueError) as exc:
t.add_row((3 * u.m,), mask=(True,))
assert (exc.value.args[0].splitlines() ==
["Unable to insert row because of exception in column 'a':",
"mask was supplied for column 'a' but it does not support masked values"])
def test_setting_from_masked_column():
"""Test issue in #2997"""
mask_b = np.array([True, True, False, False])
for select in (mask_b, slice(0, 2)):
t = Table(masked=True)
t['a'] = Column([1, 2, 3, 4])
t['b'] = MaskedColumn([11, 22, 33, 44], mask=mask_b)
t['c'] = MaskedColumn([111, 222, 333, 444], mask=[True, False, True, False])
t['b'][select] = t['c'][select]
assert t['b'][1] == t[1]['b']
assert t['b'][0] is np.ma.masked # Original state since t['c'][0] is masked
assert t['b'][1] == 222 # New from t['c'] since t['c'][1] is unmasked
assert t['b'][2] == 33
assert t['b'][3] == 44
assert np.all(t['b'].mask == t.mask['b']) # Avoid t.mask in general, this is for testing
mask_before_add = t.mask.copy()
t['d'] = np.arange(len(t))
assert np.all(t.mask['b'] == mask_before_add['b'])
def test_coercing_fill_value_type():
"""
Test that masked column fill_value is coerced into the correct column type.
"""
# This is the original example posted on the astropy@scipy mailing list
t = Table({'a': ['1']}, masked=True)
t['a'].set_fill_value('0')
t2 = Table(t, names=['a'], dtype=[np.int32])
assert isinstance(t2['a'].fill_value, np.int32)
# Unit test the same thing.
c = MaskedColumn(['1'])
c.set_fill_value('0')
c2 = MaskedColumn(c, dtype=np.int32)
assert isinstance(c2.fill_value, np.int32)
def test_mask_copy():
"""Test that the mask is copied when copying a table (issue #7362)."""
c = MaskedColumn([1, 2], mask=[False, True])
c2 = MaskedColumn(c, copy=True)
c2.mask[0] = True
assert np.all(c.mask == [False, True])
assert np.all(c2.mask == [True, True])
def test_masked_as_array_with_mixin():
"""Test that as_array() and Table.mask attr work with masked mixin columns"""
t = Table()
t['a'] = Time([1, 2], format='cxcsec')
t['b'] = [3, 4]
t['c'] = [5, 6] * u.m
# With no mask, the output should be ndarray
ta = t.as_array()
assert isinstance(ta, np.ndarray) and not isinstance(ta, np.ma.MaskedArray)
# With a mask, output is MaskedArray
t['a'][1] = np.ma.masked
ta = t.as_array()
assert isinstance(ta, np.ma.MaskedArray)
assert np.all(ta['a'].mask == [False, True])
assert np.isclose(ta['a'][0].cxcsec, 1.0)
assert np.all(ta['b'].mask == False)
assert np.all(ta['c'].mask == False)
# Check table ``mask`` property
tm = t.mask
assert np.all(tm['a'] == [False, True])
assert np.all(tm['b'] == False)
assert np.all(tm['c'] == False)
def test_masked_column_with_unit_in_qtable():
"""Test that adding a MaskedColumn with a unit to QTable issues warning"""
t = QTable()
with catch_warnings() as w:
t['a'] = MaskedColumn([1, 2])
assert len(w) == 0
assert isinstance(t['a'], MaskedColumn)
with catch_warnings() as w:
t['b'] = MaskedColumn([1, 2], unit=u.m)
assert len(w) == 0
assert isinstance(t['b'], u.Quantity)
with catch_warnings() as w:
t['c'] = MaskedColumn([1, 2], unit=u.m, mask=[True, False])
assert len(w) == 1
assert "dropping mask in Quantity column 'c'"
assert isinstance(t['b'], u.Quantity)
def test_masked_column_data_attribute_is_plain_masked_array():
c = MaskedColumn([1, 2], mask=[False, True])
c_data = c.data
assert type(c_data) is np.ma.MaskedArray
assert type(c_data.data) is np.ndarray
def test_mask_slicing_count_array_finalize():
"""Check that we don't finalize MaskedColumn too often.
Regression test for gh-6721.
"""
# Create a new BaseColumn class that counts how often
# ``__array_finalize__`` is called.
class MyBaseColumn(BaseColumn):
counter = 0
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
MyBaseColumn.counter += 1
# Base a new MaskedColumn class on it. The normal MaskedColumn
# hardcodes the initialization to BaseColumn, so we exchange that.
class MyMaskedColumn(MaskedColumn, Column, MyBaseColumn):
def __new__(cls, *args, **kwargs):
self = super().__new__(cls, *args, **kwargs)
self._baseclass = MyBaseColumn
return self
# Creation really needs 2 finalizations (once for the BaseColumn
# call inside ``__new__`` and once when the view as a MaskedColumn
# is taken), but since the first is hardcoded, we do not capture it
# and thus the count is only 1.
c = MyMaskedColumn([1, 2], mask=[False, True])
assert MyBaseColumn.counter == 1
# slicing should need only one ``__array_finalize__`` (used to be 3).
c0 = c[:]
assert MyBaseColumn.counter == 2
# repr should need none (used to be 2!!)
repr(c0)
assert MyBaseColumn.counter == 2
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for copying elements from one graph to another.
These functions allow for recursive copying of elements (ops and variables)
from one graph to another. The copied elements are initialized inside a
user-specified scope in the other graph. There are separate functions to
copy ops and variables.
There is also a function to retrieve the copied version of an op from the
first graph inside a scope in the second graph.
@@copy_op_to_graph
@@copy_variable_to_graph
@@get_copied_op
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
from tensorflow.python.ops.variables import Variable
from tensorflow.python.client.session import Session
from tensorflow.python.framework import ops
__all__ = ['copy_op_to_graph', 'copy_variable_to_graph', 'get_copied_op']
def copy_variable_to_graph(org_instance, to_graph, scope=''):
"""Given a `Variable` instance from one `Graph`, initializes and returns
a copy of it from another `Graph`, under the specified scope
(default `""`).
Args:
org_instance: A `Variable` from some `Graph`.
to_graph: The `Graph` to copy the `Variable` to.
scope: A scope for the new `Variable` (default `""`).
Returns:
The copied `Variable` from `to_graph`.
Raises:
TypeError: If `org_instance` is not a `Variable`.
"""
if not isinstance(org_instance, Variable):
raise TypeError(str(org_instance) + ' is not a Variable')
#The name of the new variable
if scope != '':
new_name = (scope + '/' + org_instance.name[:org_instance.name.index(':')])
else:
new_name = org_instance.name[:org_instance.name.index(':')]
#Get the collections that the new instance needs to be added to.
#The new collections will also be a part of the given scope,
#except the special ones required for variable initialization and
#training.
collections = []
for name, collection in org_instance.graph._collections.items():
if org_instance in collection:
if (name == ops.GraphKeys.GLOBAL_VARIABLES or
name == ops.GraphKeys.TRAINABLE_VARIABLES or scope == ''):
collections.append(name)
else:
collections.append(scope + '/' + name)
#See if it's trainable.
trainable = (
org_instance in org_instance.graph.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES))
#Get the initial value
with org_instance.graph.as_default():
temp_session = Session()
init_value = temp_session.run(org_instance.initialized_value())
#Initialize the new variable
with to_graph.as_default():
new_var = Variable(
init_value,
trainable,
name=new_name,
collections=collections,
validate_shape=False)
return new_var
def copy_op_to_graph(org_instance, to_graph, variables, scope=''):
"""Returns a copy of an operation from another Graph under a specified scope.
Given an `Operation` `org_instance` from one `Graph`,
initializes and returns a copy of it from another `Graph`,
under the specified scope (default `""`).
The copying is done recursively, so any `Operation` whose output
is required to evaluate the `org_instance`, is also copied (unless
already done).
Since `Variable` instances are copied separately, those required
to evaluate `org_instance` must be provided as input.
Args:
org_instance: An `Operation` from some `Graph`. Could be a
`Placeholder` as well.
to_graph: The `Graph` to copy `org_instance` to.
variables: An iterable of `Variable` instances to copy `org_instance` to.
scope: A scope for the new `Variable` (default `""`).
Returns:
The copied `Operation` from `to_graph`.
Raises:
TypeError: If `org_instance` is not an `Operation` or `Tensor`.
"""
#The name of the new instance
if scope != '':
new_name = scope + '/' + org_instance.name
else:
new_name = org_instance.name
#Extract names of variables
copied_variables = dict((x.name, x) for x in variables)
#If a variable by the new name already exists, return the
#correspondng tensor that will act as an input
if new_name in copied_variables:
return to_graph.get_tensor_by_name(copied_variables[new_name].name)
#If an instance of the same name exists, return appropriately
try:
already_present = to_graph.as_graph_element(
new_name, allow_tensor=True, allow_operation=True)
return already_present
except:
pass
#Get the collections that the new instance needs to be added to.
#The new collections will also be a part of the given scope.
collections = []
for name, collection in org_instance.graph._collections.items():
if org_instance in collection:
if scope == '':
collections.append(name)
else:
collections.append(scope + '/' + name)
#Take action based on the class of the instance
if isinstance(org_instance, ops.Tensor):
#If it's a Tensor, it is one of the outputs of the underlying
#op. Therefore, copy the op itself and return the appropriate
#output.
op = org_instance.op
new_op = copy_op_to_graph(op, to_graph, variables, scope)
output_index = op.outputs.index(org_instance)
new_tensor = new_op.outputs[output_index]
#Add to collections if any
for collection in collections:
to_graph.add_to_collection(collection, new_tensor)
return new_tensor
elif isinstance(org_instance, ops.Operation):
op = org_instance
#If it has an original_op parameter, copy it
if op._original_op is not None:
new_original_op = copy_op_to_graph(op._original_op, to_graph, variables,
scope)
else:
new_original_op = None
#If it has control inputs, call this function recursively on each.
new_control_inputs = [
copy_op_to_graph(x, to_graph, variables, scope)
for x in op.control_inputs
]
#If it has inputs, call this function recursively on each.
new_inputs = [
copy_op_to_graph(x, to_graph, variables, scope) for x in op.inputs
]
#Make a new node_def based on that of the original.
#An instance of tensorflow.core.framework.node_def_pb2.NodeDef, it
#stores String-based info such as name, device and type of the op.
#Unique to every Operation instance.
new_node_def = deepcopy(op.node_def)
#Change the name
new_node_def.name = new_name
#Copy the other inputs needed for initialization
output_types = op._output_types[:]
input_types = op._input_types[:]
#Make a copy of the op_def too.
#Its unique to every _type_ of Operation.
op_def = deepcopy(op.op_def)
#Initialize a new Operation instance
new_op = ops.Operation(new_node_def, to_graph, new_inputs, output_types,
new_control_inputs, input_types, new_original_op,
op_def)
#Use Graph's hidden methods to add the op
to_graph._record_op_seen_by_control_dependencies(new_op)
# pylint: disable=protected-access
for device_function in to_graph._device_functions_outer_to_inner:
new_op._set_device(device_function(new_op))
# pylint: enable=protected-access
return new_op
else:
raise TypeError('Could not copy instance: ' + str(org_instance))
def get_copied_op(org_instance, graph, scope=''):
"""Given an `Operation` instance from some `Graph`, returns
its namesake from `graph`, under the specified scope
(default `""`).
If a copy of `org_instance` is present in `graph` under the given
`scope`, it will be returned.
Args:
org_instance: An `Operation` from some `Graph`.
graph: The `Graph` to be searched for a copr of `org_instance`.
scope: The scope `org_instance` is present in.
Returns:
The `Operation` copy from `graph`.
"""
#The name of the copied instance
if scope != '':
new_name = scope + '/' + org_instance.name
else:
new_name = org_instance.name
return graph.as_graph_element(
new_name, allow_tensor=True, allow_operation=True)
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Types for building models of metric description xml files.
UMA uses several XML files to allow clients to describe the metrics that they
collect, e.g.
https://chromium.googlesource.com/chromium/src/+/master/tools/metrics/rappor/rappor.xml
These types can be used to build models that describe the canonical formatted
structure of these files, and the models can be used to extract the contents of
those files, or convert content back into a canonicalized version of the file.
"""
import abc
import re
from xml.dom import minidom
import pretty_print_xml
# A non-basic type key for storing comments, so they don't conflict with
# regular keys, and can be skipped in JSON serialization.
COMMENT_KEY = ('comment',)
def GetCommentsForNode(node):
"""Extracts comments in the current node.
Args:
node: The DOM node to extract comments from.
Returns:
A list of comment DOM nodes.
"""
comments = []
node = node.previousSibling
while node:
if node.nodeType == minidom.Node.COMMENT_NODE:
comments.append(node.data)
elif node.nodeType != minidom.Node.TEXT_NODE:
break
node = node.previousSibling
return comments[::-1]
def PutCommentsInNode(doc, node, comments):
"""Appends comments to the DOM node.
Args:
doc: The document to create a comment in.
node: The DOM node to write comments to.
comments: A list of comments.
"""
for comment in comments:
node.appendChild(doc.createComment(comment))
def GetChildrenByTag(node, tag):
"""Get all children of a particular tag type.
Args:
node: The DOM node to write comments to.
tag: The tag of the nodes to collect.
Returns:
A list of DOM nodes.
"""
return [child for child in node.childNodes if child.nodeName == tag]
class NodeType(object):
"""Base type for a type of XML node.
Args:
indent: True iff this node should have its children indented when pretty
printing.
extra_newlines: None or a triple of integers describing the number of
newlines that should be printed (after_open, before_close, after_close)
single_line: True iff this node may be squashed into a single line.
alphabetization: A list of [(tag, keyfn)] pairs, which specify the tags of
the children that should be sorted, and the functions to get sort keys
from xml nodes.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, tag,
indent=True,
extra_newlines=None,
single_line=False,
alphabetization=None):
self.tag = tag
self.indent = indent
self.extra_newlines = extra_newlines
self.single_line = single_line
self.alphabetization = alphabetization
@abc.abstractmethod
def Unmarshall(self, node):
"""Extracts the content of the node to an object.
Args:
node: The XML node to extract data from.
Returns:
An object extracted from the node.
"""
@abc.abstractmethod
def Marshall(self, doc, obj):
"""Converts an object into an XML node of this type.
Args:
doc: A document create an XML node in.
obj: The object to be encoded into the XML.
Returns:
An XML node encoding the object.
"""
def GetComments(self, obj):
"""Gets comments for the object being encoded.
Args:
obj: The object to be encoded into the XML.
Returns:
A list of comment nodes for the object.
"""
del obj # Used in ObjectNodeType implementation
# The base NodeType does not store comments
return []
def MarshallIntoNode(self, doc, node, obj):
"""Marshalls the object and appends it to a node, with comments.
Args:
doc: A document create an XML node in.
node: An XML node to marshall the object into.
obj: The object to be encoded into the XML.
"""
PutCommentsInNode(doc, node, self.GetComments(obj))
node.appendChild(self.Marshall(doc, obj))
def GetAttributes(self):
"""Gets a sorted list of attributes that this node can have.
Returns:
A list of names of XML attributes, sorted by the order they should appear.
"""
return []
def GetNodeTypes(self):
"""Gets a map of tags to node types for all dependent types.
Returns:
A map of tags to node-types for this node and all of the nodes that it
can contain.
"""
return {self.tag: self}
class TextNodeType(NodeType):
"""A type for simple nodes that just have a tag and some text content.
Unmarshalls nodes to strings.
Args:
tag: The name of XML tag for this type of node.
"""
def __str__(self):
return 'TextNodeType("%s")' % self.tag
def Unmarshall(self, node):
"""Extracts the content of the node to an object.
Args:
node: The XML node to extract data from.
Returns:
The string content of the node.
"""
if not node.firstChild:
return ''
text = node.firstChild.nodeValue
return '\n\n'.join(pretty_print_xml.SplitParagraphs(text))
def Marshall(self, doc, obj):
"""Converts an object into an XML node of this type.
Args:
doc: A document create an XML node in.
obj: A string to be encoded into the XML.
Returns:
An XML node encoding the object.
"""
node = doc.createElement(self.tag)
if obj:
node.appendChild(doc.createTextNode(obj))
return node
class ChildType(object):
"""Metadata about a node type's children.
Args:
attr: The field name of the parents model object storing the child's model.
node_type: The NodeType of the child.
multiple: True if the child can be repeated.
"""
def __init__(self, attr, node_type, multiple):
self.attr = attr
self.node_type = node_type
self.multiple = multiple
class ObjectNodeType(NodeType):
"""A complex node type that has attributes or other nodes as children.
Unmarshalls nodes to objects.
Args:
tag: The name of XML tag for this type of node.
attributes: A list of (name, type, regex) tubles, e.g. [('foo', unicode,
r'^\w+$')]. The order of the attributes determines the ordering of
attributes, when serializing objects to XML. The "regex" can be None
to do no validation, otherwise the attribute must match that pattern.
text_attribute: An attribute stored in the text content of the node.
children: A list of ChildTypes describing the objects children.
Raises:
ValueError: Attributes contains duplicate definitions.
"""
def __init__(self, tag,
attributes=None,
children=None,
text_attribute=None,
**kwargs):
NodeType.__init__(self, tag, **kwargs)
self.attributes = attributes or []
self.children = children or []
self.text_attribute = text_attribute
if len(self.attributes) != len(set(a for a, _, _ in self.attributes)):
raise ValueError('Duplicate attribute definition.')
def __str__(self):
return 'ObjectNodeType("%s")' % self.tag
def Unmarshall(self, node):
"""Extracts the content of the node to an object.
Args:
node: The XML node to extract data from.
Returns:
An object extracted from the node.
Raises:
ValueError: The node is missing required children.
"""
obj = {}
obj[COMMENT_KEY] = GetCommentsForNode(node)
for attr, attr_type, attr_re in self.attributes:
if node.hasAttribute(attr):
obj[attr] = attr_type(node.getAttribute(attr))
if attr_re is not None:
attr_val = obj.get(attr, '')
if not re.match(attr_re, attr_val):
raise ValueError('%s "%s" does not match regex "%s"' %
(attr, attr_val, attr_re))
if self.text_attribute and node.firstChild:
obj[self.text_attribute] = node.firstChild.nodeValue
for child in self.children:
nodes = GetChildrenByTag(node, child.node_type.tag)
if child.multiple:
obj[child.attr] = [
child.node_type.Unmarshall(n) for n in nodes]
elif nodes:
obj[child.attr] = child.node_type.Unmarshall(nodes[0])
return obj
def Marshall(self, doc, obj):
"""Converts an object into an XML node of this type.
Args:
doc: A document create an XML node in.
obj: The object to be encoded into the XML.
Returns:
An XML node encoding the object.
"""
node = doc.createElement(self.tag)
for attr, _, _ in self.attributes:
if attr in obj:
node.setAttribute(attr, str(obj[attr]))
if self.text_attribute and self.text_attribute in obj:
node.appendChild(doc.createTextNode(obj[self.text_attribute]))
for child in self.children:
if child.multiple:
for child_obj in obj[child.attr]:
child.node_type.MarshallIntoNode(doc, node, child_obj)
elif child.attr in obj:
child.node_type.MarshallIntoNode(doc, node, obj[child.attr])
return node
def GetComments(self, obj):
"""Gets comments for the object being encoded.
Args:
obj: The object to be encoded into the XML.
Returns:
A list of comment nodes for the object.
"""
return obj[COMMENT_KEY]
def GetAttributes(self):
"""Gets a sorted list of attributes that this node can have.
Returns:
A list of names of XML attributes, sorted by the order they should appear.
"""
return [attr for attr, _, _ in self.attributes]
def GetNodeTypes(self):
"""Get a map of tags to node types for all dependent types.
Returns:
A map of tags to node-types for this node and all of the nodes that it
can contain.
"""
types = {self.tag: self}
for child in self.children:
types.update(child.node_type.GetNodeTypes())
return types
class DocumentType(object):
"""Model for the root of an XML description file.
Args:
root_type: A NodeType describing the root tag of the document.
"""
def __init__(self, root_type):
self.root_type = root_type
def Parse(self, input_file):
"""Parses the data out of an XML file's contents.
Args:
input_file: The content of an XML file, as a string.
Returns:
An object representing the unmarshalled content of the document's root
node.
"""
tree = minidom.parseString(input_file)
root = tree.getElementsByTagName(self.root_type.tag)[0]
return self.root_type.Unmarshall(root)
def GetPrintStyle(self):
"""Gets an XmlStyle object for pretty printing a document of this type.
Returns:
An XML style object.
"""
types = self.root_type.GetNodeTypes()
return pretty_print_xml.XmlStyle(
attribute_order={t: types[t].GetAttributes() for t in types},
required_attributes=[],
tags_that_have_extra_newline={t: types[t].extra_newlines for t in types
if types[t].extra_newlines},
tags_that_dont_indent=[t for t in types if not types[t].indent],
tags_that_allow_single_line=[t for t in types if types[t].single_line],
tags_alphabetization_rules={t: types[t].alphabetization for t in types
if types[t].alphabetization})
def _ToXML(self, obj):
"""Converts an object into an XML document.
Args:
obj: An object to serialize to XML.
Returns:
An XML minidom Document object.
"""
doc = minidom.Document()
self.root_type.MarshallIntoNode(doc, doc, obj)
return doc
def PrettyPrint(self, obj):
"""Converts an object into pretty-printed XML as a string.
Args:
obj: An object to serialize to XML.
Returns:
A string containing pretty printed XML.
"""
return self.GetPrintStyle().PrettyPrintXml(self._ToXML(obj))
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU Embeddings mid level API on TPU."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework.tensor_shape import TensorShape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow.python.tpu import tpu_embedding_v2
from tensorflow.python.tpu import tpu_embedding_v2_utils
from tensorflow.python.tpu.tests import tpu_embedding_base_test
from tensorflow.python.util import nest
class TPUEmbeddingTest(tpu_embedding_base_test.TPUEmbeddingBaseTest):
def test_pass_none_to_apply_gradients(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
mid_level_api.build([
TensorShape((self.batch_size, 2)),
TensorShape((self.batch_size, 2)),
TensorShape((self.batch_size, 3))
])
dataset = self._create_sparse_dataset(strategy)
data = next(
iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False))))
@def_function.function
def embedding_and_set_gradients(data):
mid_level_api.enqueue(data)
def tpu_fn():
results = mid_level_api.dequeue()
mid_level_api.apply_gradients((None, None,
array_ops.ones_like(results[2])))
return results
return strategy.run(tpu_fn)
@def_function.function
def embedding_only(data):
mid_level_api.enqueue(data, training=False)
def tpu_fn():
return mid_level_api.dequeue()
return strategy.run(tpu_fn)
first = self._get_replica_numpy(
embedding_and_set_gradients(data), strategy, 0)
second = self._get_replica_numpy(embedding_only(data), strategy, 0)
# First two features should be the same as None gradient was applied.
# Third feature had gradient of 1 passed in from each core.
# Each core received the same ids per core and returned the following batch:
# [ row 3, row 0 + row 1 + row 2 ]
# so gradient update was (learning rate = 0.1):
# row 0: -1/3*0.1
# row 1: -1/3*0.1
# row 2: -1/3*0.1
# row 3: -1*0.1
# There is a factor of num_replicas because each replica gave an update.
num_replicas = strategy.num_replicas_in_sync
update = ([[0.0]], [[0.0]],
[[0.1 * num_replicas], [0.1 / 3 * num_replicas]])
golden = tuple([feature-np.array(up) for feature, up in zip(first, update)])
self.assertAllClose(golden, second)
def test_enqueue_sparse_and_ragged(self):
self.skip_if_oss()
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_sparse_dataset(strategy)
ragged = self._create_ragged_dataset(strategy)
sparse_iter = iter(
strategy.experimental_distribute_dataset(
sparse,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
ragged_iter = iter(
strategy.experimental_distribute_dataset(
ragged,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
sparse_features = next(sparse_iter)
ragged_features = next(ragged_iter)
features = (sparse_features[0], ragged_features[1], sparse_features[2])
mid_level_api.enqueue(features, training=False)
return strategy.run(step)
test_fn()
def test_enqueue_per_device(self):
self.skip_if_oss()
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_sparse_dataset(strategy)
sparse_iter = iter(
strategy.experimental_distribute_dataset(
sparse,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
@def_function.function
def test_fn():
def get_activations(dense_value):
return mid_level_api.dequeue(), dense_value
sparse_features = next(sparse_iter)
mid_level_api.enqueue(sparse_features, training=False)
activations, dense_value1 = strategy.run(get_activations, args=(0.0,))
def enqueue_fn(ctx):
core_id = ctx.replica_id_in_sync_group
device = strategy.extended.worker_devices[core_id]
sparse_features_local = nest.map_structure(
lambda x: strategy.experimental_local_results(x)[core_id],
sparse_features)
mid_level_api.enqueue(sparse_features_local, training=False,
device=device)
return 0.0
data = strategy.experimental_distribute_values_from_function(
enqueue_fn)
per_device_activations, dense_value2 = strategy.run(get_activations,
args=(data,))
return activations, per_device_activations, dense_value1, dense_value2
activations, per_device_activations, _, _ = test_fn()
# Extact per core numpy arrays and check that both sparse and ragged have
# the same results.
activations0 = self._get_replica_numpy(activations, strategy, 0)
per_device_activations0 = self._get_replica_numpy(
per_device_activations, strategy, 0)
self.assertAllClose(activations0, per_device_activations0)
test_fn()
@parameterized.parameters(True, False)
def test_enqueue_with_weights(self, ragged):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
weight = 0.5
if ragged:
dataset = self._create_ragged_dataset(strategy, include_weights=True,
weight=weight)
else:
dataset = self._create_sparse_dataset(strategy, include_weights=True,
weight=weight)
mid_level_api.build([
TensorShape((self.batch_size, 2)),
TensorShape((self.batch_size, 2)),
TensorShape((self.batch_size, 3))
])
dataset_iter = iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
@def_function.function
def enqueue_and_get(features, weights):
def get_activations():
return mid_level_api.dequeue()
mid_level_api.enqueue(features, weights=weights, training=False)
return strategy.run(get_activations)
features, weights = next(dataset_iter)
# Replace the weight for the second feature by None to test.
weights = (weights[0], None, weights[2])
no_weights_activations = enqueue_and_get(features, weights=None)
weights_activations = enqueue_and_get(features, weights=weights)
# Extact per core numpy arrays.
no_weights0 = self._get_replica_numpy(no_weights_activations, strategy, 0)
weights0 = self._get_replica_numpy(weights_activations, strategy, 0)
# videos table has sum combiner and users table has mean combiner.
# i.e. users table lookups isn't affected by the weights as all the weights
# are the same.
# Tuple entry 0 and 1 are the watched and favorited features from the videos
# table and entry 2 is the friends feature from the users table.
# Note that None was passed as a weight for entry 1 so weight should have no
# effect.
weight = (0.5, 1.0, 1.0)
golden = tuple([no_weight * w for no_weight, w in zip(no_weights0, weight)])
self.assertAllClose(golden, weights0)
def test_same_config_different_instantiations(self):
self.skip_if_oss()
num_tables = 30
table_dim = np.random.randint(1, 128, size=[num_tables])
table_vocab_size = np.random.randint(100, 1000, size=[num_tables])
table_names = ['table{}'.format(i) for i in range(num_tables)]
table_data = list(zip(table_dim, table_vocab_size, table_names))
strategy = self._get_strategy()
def tpu_embedding_config():
feature_configs = []
for dim, vocab, name in table_data:
feature_configs.append(tpu_embedding_v2_utils.FeatureConfig(
table=tpu_embedding_v2_utils.TableConfig(
vocabulary_size=int(vocab), dim=int(dim),
initializer=init_ops_v2.Zeros(), name=name)))
optimizer = tpu_embedding_v2_utils.Adagrad(
learning_rate=0.1)
with strategy.scope():
mid_level_api = tpu_embedding_v2.TPUEmbedding(
feature_config=feature_configs,
optimizer=optimizer)
mid_level_api._output_shapes = [TensorShape(128)] * len(feature_configs)
return mid_level_api._create_config_proto()
self.assertProtoEquals(tpu_embedding_config(), tpu_embedding_config())
@parameterized.parameters([True, False])
def test_missing_feature(self, is_sparse):
strategy = self._get_strategy()
with strategy.scope():
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
mid_level_api = tpu_embedding_v2.TPUEmbedding(
feature_config=tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='watched'),
optimizer=optimizer)
# Create sparse or ragged feature with last sample missing.
if is_sparse:
features = sparse_tensor.SparseTensor(
indices=self.feature_watched_indices[:-1],
values=self.feature_watched_values[:-1],
dense_shape=[self.data_batch_size, 2])
else:
features = ragged_tensor.RaggedTensor.from_row_lengths(
row_lengths=[1, 2, 2, 0], values=self.feature_watched_values[:-1])
dataset = dataset_ops.DatasetV2.from_tensors(features)
dataset = dataset.unbatch().repeat().batch(
self.batch_size * strategy.num_replicas_in_sync, drop_remainder=True)
dataset_iter = iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
@def_function.function
def test_fn():
def get_activations():
return mid_level_api.dequeue()
mid_level_api.enqueue(next(dataset_iter), training=False)
return strategy.run(get_activations)
test_fn()
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common.swob import Request, Response
from swift.common.middleware import gatekeeper
class FakeApp(object):
def __init__(self, headers=None):
if headers is None:
headers = {}
self.headers = headers
self.req = None
def __call__(self, env, start_response):
self.req = Request(env)
return Response(request=self.req, body=b'FAKE APP',
headers=self.headers)(env, start_response)
class FakeMiddleware(object):
def __init__(self, app, conf, header_list=None):
self.app = app
self.conf = conf
self.header_list = header_list
def __call__(self, env, start_response):
def fake_resp(status, response_headers, exc_info=None):
for i in self.header_list:
response_headers.append(i)
return start_response(status, response_headers, exc_info)
return self.app(env, fake_resp)
class TestGatekeeper(unittest.TestCase):
methods = ['PUT', 'POST', 'GET', 'DELETE', 'HEAD', 'COPY', 'OPTIONS']
allowed_headers = {'xx-account-sysmeta-foo': 'value',
'xx-container-sysmeta-foo': 'value',
'xx-object-sysmeta-foo': 'value',
'x-account-meta-foo': 'value',
'x-container-meta-foo': 'value',
'x-object-meta-foo': 'value',
'x-timestamp-foo': 'value'}
sysmeta_headers = {'x-account-sysmeta-': 'value',
'x-container-sysmeta-': 'value',
'x-object-sysmeta-': 'value',
'x-account-sysmeta-foo': 'value',
'x-container-sysmeta-foo': 'value',
'x-object-sysmeta-foo': 'value',
'X-Account-Sysmeta-BAR': 'value',
'X-Container-Sysmeta-BAR': 'value',
'X-Object-Sysmeta-BAR': 'value'}
x_backend_headers = {'X-Backend-Replication': 'true',
'X-Backend-Replication-Headers': 'stuff'}
object_transient_sysmeta_headers = {
'x-object-transient-sysmeta-': 'value',
'x-object-transient-sysmeta-foo': 'value'}
x_timestamp_headers = {'X-Timestamp': '1455952805.719739'}
forbidden_headers_out = dict(sysmeta_headers)
forbidden_headers_out.update(x_backend_headers)
forbidden_headers_out.update(object_transient_sysmeta_headers)
forbidden_headers_in = dict(forbidden_headers_out)
shunted_headers_in = dict(x_timestamp_headers)
def _assertHeadersEqual(self, expected, actual):
for key in expected:
self.assertIn(key.lower(), actual)
def _assertHeadersAbsent(self, unexpected, actual):
for key in unexpected:
self.assertNotIn(key.lower(), actual)
def get_app(self, app, global_conf, **local_conf):
factory = gatekeeper.filter_factory(global_conf, **local_conf)
return factory(app)
def test_ok_header(self):
req = Request.blank('/v/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers=self.allowed_headers)
fake_app = FakeApp()
app = self.get_app(fake_app, {})
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self.assertEqual(resp.body, b'FAKE APP')
self._assertHeadersEqual(self.allowed_headers, fake_app.req.headers)
def _test_reserved_header_removed_inbound(self, method):
headers = dict(self.forbidden_headers_in)
headers.update(self.allowed_headers)
headers.update(self.shunted_headers_in)
req = Request.blank('/v/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
fake_app = FakeApp()
app = self.get_app(fake_app, {})
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
expected_headers = dict(self.allowed_headers)
# shunt_inbound_x_timestamp should be enabled by default
expected_headers.update({'X-Backend-Inbound-' + k: v
for k, v in self.shunted_headers_in.items()})
self._assertHeadersEqual(expected_headers, fake_app.req.headers)
unexpected_headers = dict(self.forbidden_headers_in)
unexpected_headers.update(self.shunted_headers_in)
self._assertHeadersAbsent(unexpected_headers, fake_app.req.headers)
def test_reserved_header_removed_inbound(self):
for method in self.methods:
self._test_reserved_header_removed_inbound(method)
def _test_reserved_header_shunted_inbound(self, method):
headers = dict(self.shunted_headers_in)
headers.update(self.allowed_headers)
req = Request.blank('/v/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
fake_app = FakeApp()
app = self.get_app(fake_app, {}, shunt_inbound_x_timestamp='true')
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
expected_headers = dict(self.allowed_headers)
expected_headers.update({'X-Backend-Inbound-' + k: v
for k, v in self.shunted_headers_in.items()})
self._assertHeadersEqual(expected_headers, fake_app.req.headers)
self._assertHeadersAbsent(self.shunted_headers_in,
fake_app.req.headers)
def test_reserved_header_shunted_inbound(self):
for method in self.methods:
self._test_reserved_header_shunted_inbound(method)
def _test_reserved_header_shunt_bypassed_inbound(self, method):
headers = dict(self.shunted_headers_in)
headers.update(self.allowed_headers)
req = Request.blank('/v/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
fake_app = FakeApp()
app = self.get_app(fake_app, {}, shunt_inbound_x_timestamp='false')
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
expected_headers = dict(self.allowed_headers)
expected_headers.update(self.shunted_headers_in)
self._assertHeadersEqual(expected_headers, fake_app.req.headers)
def test_reserved_header_shunt_bypassed_inbound(self):
for method in self.methods:
self._test_reserved_header_shunt_bypassed_inbound(method)
def _test_reserved_header_removed_outbound(self, method):
headers = dict(self.forbidden_headers_out)
headers.update(self.allowed_headers)
req = Request.blank('/v/a/c', environ={'REQUEST_METHOD': method})
fake_app = FakeApp(headers=headers)
app = self.get_app(fake_app, {})
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self._assertHeadersEqual(self.allowed_headers, resp.headers)
self._assertHeadersAbsent(self.forbidden_headers_out, resp.headers)
def test_reserved_header_removed_outbound(self):
for method in self.methods:
self._test_reserved_header_removed_outbound(method)
def _test_duplicate_headers_not_removed(self, method, app_hdrs):
def fake_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
headers = [('X-Header', 'xxx'),
('X-Header', 'yyy')]
def fake_filter(app):
return FakeMiddleware(app, conf, headers)
return fake_filter
def fake_start_response(status, response_headers, exc_info=None):
hdr_list = []
for k, v in response_headers:
if k == 'X-Header':
hdr_list.append(v)
self.assertTrue('xxx' in hdr_list)
self.assertTrue('yyy' in hdr_list)
self.assertEqual(len(hdr_list), 2)
req = Request.blank('/v/a/c', environ={'REQUEST_METHOD': method})
fake_app = FakeApp(headers=app_hdrs)
factory = gatekeeper.filter_factory({})
factory_wrap = fake_factory({})
app = factory(factory_wrap(fake_app))
app(req.environ, fake_start_response)
def test_duplicate_headers_not_removed(self):
for method in self.methods:
for app_hdrs in ({}, self.forbidden_headers_out):
self._test_duplicate_headers_not_removed(method, app_hdrs)
def _test_location_header(self, location_path):
headers = {'Location': location_path}
req = Request.blank(
'/v/a/c', environ={'REQUEST_METHOD': 'GET',
'swift.leave_relative_location': True})
class SelfishApp(FakeApp):
def __call__(self, env, start_response):
self.req = Request(env)
resp = Response(request=self.req, body=b'FAKE APP',
headers=self.headers)
# like webob, middlewares in the pipeline may rewrite
# location header from relative to absolute
resp.location = resp.absolute_location()
return resp(env, start_response)
selfish_app = SelfishApp(headers=headers)
app = self.get_app(selfish_app, {})
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self.assertIn('Location', resp.headers)
self.assertEqual(resp.headers['Location'], location_path)
def test_location_header_fixed(self):
self._test_location_header('/v/a/c/o2')
self._test_location_header('/v/a/c/o2?query=path&query2=doit')
self._test_location_header('/v/a/c/o2?query=path#test')
self._test_location_header('/v/a/c/o2;whatisparam?query=path#test')
def test_allow_reserved_names(self):
fake_app = FakeApp()
app = self.get_app(fake_app, {})
headers = {
'X-Allow-Reserved-Names': 'some-value'
}
req = Request.blank('/v/a/c/o', method='GET', headers=headers)
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self.assertNotIn('X-Backend-Allow-Reserved-Names',
fake_app.req.headers)
self.assertIn('X-Allow-Reserved-Names',
fake_app.req.headers)
self.assertEqual(
'some-value',
fake_app.req.headers['X-Allow-Reserved-Names'])
app.allow_reserved_names_header = True
req = Request.blank('/v/a/c/o', method='GET', headers=headers)
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self.assertIn('X-Backend-Allow-Reserved-Names',
fake_app.req.headers)
self.assertEqual(
'some-value',
fake_app.req.headers['X-Backend-Allow-Reserved-Names'])
self.assertEqual(
'some-value',
req.headers['X-Backend-Allow-Reserved-Names'])
self.assertNotIn('X-Allow-Reserved-Names', fake_app.req.headers)
self.assertNotIn('X-Allow-Reserved-Names', req.headers)
if __name__ == '__main__':
unittest.main()
|
|
# This file is part of the bapsflib package, a Python toolkit for the
# BaPSF group at UCLA.
#
# http://plasma.physics.ucla.edu/
#
# Copyright 2017-2018 Erik T. Everson and contributors
#
# License: Standard 3-clause BSD; see "LICENSES/LICENSE.txt" for full
# license terms and contributor agreement.
#
import h5py
import numpy as np
from typing import Iterable
from warnings import warn
class FauxSIS3301(h5py.Group):
"""
Creates a Faux 'SIS 3301' Group in a HDF5 file.
"""
# noinspection SpellCheckingInspection,PyProtectedMember
class _knobs(object):
"""
A class that contains all the controls for specifying the
digitizer group structure.
"""
def __init__(self, val):
super().__init__()
self._faux = val
@property
def active_brdch(self):
"""
Boolean numpy array of active board, channel combinations.
Shape = (13, 8) 13 boards and 8 channels
"""
return self._faux._active_brdch.copy()
@active_brdch.setter
def active_brdch(self, val):
"""
Set the active board, channel combinations
"""
if isinstance(val, np.ndarray):
if (
val.shape == (13, 8)
and np.issubdtype(val.dtype, np.bool)
and np.any(val)
):
self._faux._active_brdch = val
self._faux._update()
else:
warn("`val` not valid, no update performed")
else:
warn("`val` not valid, no update performed")
@property
def active_config(self):
"""current active configuration"""
return self._faux._active_config
@active_config.setter
def active_config(self, val):
if not isinstance(val, Iterable) or isinstance(val, str):
val = (val,)
elif isinstance(val, tuple):
pass
else:
val = tuple(val)
# if val in self._faux._config_names:
if all(cname in self._faux._config_names for cname in val):
if val != self._faux._active_config:
self._faux._active_config = val
self._faux._update()
else:
warn("`val` not valid, no update performed")
@property
def n_configs(self):
"""Number of SIS 3301 configurations"""
return self._faux._n_configs
@n_configs.setter
def n_configs(self, val):
"""Set number of waveform configurations"""
if val >= 1 and isinstance(val, int):
if val != self._faux._n_configs:
self._faux._n_configs = val
self._faux._update()
else:
warn("`val` not valid, no update performed")
@property
def nt(self):
"""Number of temporal samples"""
return self._faux._nt
@nt.setter
def nt(self, val):
"""Set the number of temporal samples"""
if isinstance(val, int):
if val != self._faux._nt:
self._faux._nt = val
self._faux._update()
else:
warn("`val` not valid, no update performed")
@property
def sn_size(self):
"""Number of shot numbers in a dataset"""
return self._faux._sn_size
@sn_size.setter
def sn_size(self, val):
"""Set the number of shot numbers in a dataset"""
if isinstance(val, int) and val >= 1:
if val != self._faux._sn_size:
self._faux._sn_size = val
self._faux._update()
else:
warn("`val` not valid, no update performed")
def reset(self):
"""Reset 'SIS 3301' group to defaults."""
self._faux._default_setup()
self._faux._update()
def __init__(self, id, n_configs=1, sn_size=100, nt=10000, **kwargs):
# ensure id is for a HDF5 group
if not isinstance(id, h5py.h5g.GroupID):
raise ValueError(f"{id} is not a GroupID")
# create control group
# noinspection PyUnresolvedReferences
gid = h5py.h5g.create(id, b"SIS 3301")
h5py.Group.__init__(self, gid)
# define key values
self._default_setup()
if n_configs != self._n_configs:
self._n_configs = n_configs
if sn_size != self._sn_size:
self._sn_size = sn_size
if nt != self._nt:
self._nt = nt
# set root attributes
self._set_sis3301_attrs()
# build control device sub-groups, datasets, and attributes
self._update()
@property
def knobs(self):
"""Knobs for controlling structure of digitizer group"""
return self._knobs(self)
@property
def config_names(self):
"""list of 'SIS 3301' configuration names"""
return self._config_names.copy()
def _default_setup(self):
"""Set group setup parameters to defaults"""
self._n_configs = 1
self._sn_size = 100
self._nt = 10000
self._active_brdch = np.zeros((13, 8), dtype=bool)
self._active_brdch[0][0] = True
self._config_names = []
self._active_config = ("config01",)
def _set_sis3301_attrs(self):
"""Sets the 'SIS 3301' group attributes"""
self.attrs.update(
{
"Created date": np.bytes_("5/21/2004 4:09:05 PM"),
"Description": np.bytes_(
"Struck Innovative Systeme 3301 8 channel ADC boards, "
"100 MHz. Also provides access to SIS 3820 VME clock "
"distribute."
),
"Device name": np.bytes_("SIS 3301"),
"Module IP address": np.bytes_("192.168.7.3"),
"Module VI path": np.bytes_(
"C:\ACQ II home\Modules\SIS 3301\SIS 3301.vi"
),
"Type": np.bytes_("Data acquisition"),
}
)
def _update(self):
"""
Updates digitizer group structure (Groups, Datasets, and
Attributes)
"""
# clear group before rebuild
self.clear()
# build configuration groups
self._config_names = []
for i in range(self._n_configs):
config_name = f"config{i+1:02}"
self._config_names.append(config_name)
self._build_config_group(config_name)
# reset active configuration if necessary
if not all(cname in self._config_names for cname in self._active_config):
self._active_config = (self._config_names[0],)
# build datasets
self._build_datasets()
def _build_config_group(self, config_name: str):
"""
Creates and populates the digitizer configuration group.
:param config_name: name of digitizer configuration
"""
# create configuration group
gname = f"Configuration: {config_name}"
self.create_group(gname)
# set attributes for configuration group
# TODO: allow setting of sample averaging
# TODO: allow setting of shot averaging
self[gname].attrs.update(
{
"Clock rate": np.bytes_("Internal 100 MHz"),
"Configuration": np.bytes_(config_name),
"Samples to average": np.bytes_("No averaging"),
"Shots to average": np.int16(1),
"Software start": np.bytes_("TRUE"),
"Stop delay": np.uint16(0),
"Trigger mode": np.bytes_("Start/stop"),
}
)
# create and build Board[] and Channels[] sub-groups
brd_count = 0
brd_bool_arr = np.any(self._active_brdch, axis=1)
brd_index = np.where(brd_bool_arr)[0]
for brd in brd_index:
# create Board[] group
brd_name = f"Boards[{brd_count}]"
brd_path = f"{gname}/{brd_name}"
self[gname].create_group(brd_name)
brd_count += 1
# define Board[] attrs
self[brd_path].attrs.update(
{
"Board": np.uint32(brd),
"Board samples": np.uint32(self._nt),
}
)
# build Channels[] groups
ch_index = np.where(self._active_brdch[brd])[0]
ch_count = 0
for ch in ch_index:
# create Channels[] group
ch_name = f"Channels[{ch_count}]"
ch_path = f"{brd_path}/{ch_name}"
self[brd_path].create_group(ch_name)
ch_count += 1
# define Channels[] attrs
self[ch_path].attrs.update(
{
"Board": np.uint32(brd),
"Channel": np.uint32(ch),
"DC offset (mV)": np.float64(0.0),
"Data type": np.bytes_("signal type info"),
}
)
def _build_datasets(self):
brds, chs = np.where(self._active_brdch)
for i in range(brds.size):
brd = brds[i]
ch = chs[i]
# create and populate datasets
for cname in self._active_config:
# create "main" data set
# dset_name = (self._active_config
# + ' [{}:{}]'.format(brd, ch))
dset_name = f"{cname} [{brd}:{ch}]"
shape = (self._sn_size, self._nt)
data = np.empty(shape=shape, dtype=np.int16)
self.create_dataset(dset_name, data=data)
# create & populate header dataset
dheader_name = f"{dset_name} headers"
shape = (self._sn_size,)
dtype = np.dtype(
[
("Shot", np.uint32),
("Scale", np.float64),
("Offset", np.float64),
("Min", np.int16),
("Max", np.int16),
("Clipped", np.uint8),
]
)
dheader = np.empty(shape=shape, dtype=dtype)
dheader["Shot"] = np.arange(
1, shape[0] + 1, 1, dtype=dheader["Shot"].dtype
)
dheader["Scale"] = 3.051944077014923e-4
dheader["Offset"] = -2.5
dheader["Min"] = data.min(axis=1)
dheader["Max"] = data.max(axis=1)
dheader["Clipped"] = 0
self.create_dataset(dheader_name, data=dheader)
|
|
import logging
log = logging.getLogger(__name__)
from pycqed.analysis_v3 import helper_functions as hlp_mod
from pycqed.analysis_v3 import processing_pipeline as pp_mod
from pycqed.analysis import fitting_models as fit_mods
from collections import OrderedDict
import numpy as np
import lmfit
import sys
pp_mod.search_modules.add(sys.modules[__name__])
#####################################
### Functions related to Fitting ###
#####################################
def run_fitting(data_dict, keys_in='all', **params):
"""
Fits the data dicts in dat_dict['fit_dicts'] specified by keys_in.
Only model fitting is implemented here. Minimizing fitting should
be implemented here.
"""
fit_res_dict = {}
if 'fit_dicts' not in data_dict:
raise ValueError('fit_dicts not found in data_dict.')
if keys_in == 'all':
fit_dicts = data_dict['fit_dicts']
else:
fit_dicts = {fk: fd for fk, fd in data_dict['fit_dicts'].items() if
fk in keys_in}
for fit_name, fit_dict in fit_dicts.items():
fit_one_dict(fit_dict)
for par in fit_dict['fit_res'].params:
if fit_dict['fit_res'].params[par].stderr is None:
fit_dict['fit_res'].params[par].stderr = 0
fit_res_dict[fit_name] = fit_dict['fit_res']
def fit_one_dict(fit_dict, **params):
"""
Does fitting to one fit_dict. Updates the fit_dict with the entry 'fit_res.'
"""
guess_dict = fit_dict.get('guess_dict', None)
guess_pars = fit_dict.get('guess_pars', None)
guessfn_pars = fit_dict.get('guessfn_pars', {})
fit_yvals = fit_dict['fit_yvals']
fit_xvals = fit_dict['fit_xvals']
model = fit_dict.get('model', None)
if model is None:
fit_fn = fit_dict.get('fit_fn', None)
model = fit_dict.get('model', lmfit.Model(fit_fn))
fit_guess_fn = fit_dict.get('fit_guess_fn', None)
if fit_guess_fn is None and fit_dict.get('fit_guess', True):
fit_guess_fn = model.guess
fit_kwargs = fit_dict.get('fit_kwargs', {})
if guess_pars is None:
if fit_guess_fn is not None:
# a fit function should return lmfit parameter
# objects but can also work by returning a
# dictionary of guesses
guess_pars = fit_guess_fn(**fit_yvals, **fit_xvals,
**guessfn_pars)
if not isinstance(guess_pars, lmfit.Parameters):
for gd_key, val in list(guess_pars.items()):
model.set_param_hint(gd_key, **val)
guess_pars = model.make_params()
if guess_dict is not None:
for gd_key, val in guess_dict.items():
for attr, attr_val in val.items():
# e.g. setattr(guess_pars['frequency'],
# 'value', 20e6)
setattr(guess_pars[gd_key], attr,
attr_val)
# A guess can also be specified as a dictionary.
# additionally this can be used to overwrite values
# from the guess functions.
elif guess_dict is not None:
for gd_key, val in list(guess_dict.items()):
model.set_param_hint(gd_key, **val)
guess_pars = model.make_params()
fit_dict['fit_res'] = model.fit(**fit_xvals, **fit_yvals,
params=guess_pars, **fit_kwargs)
def prepare_cos_fit_dict(data_dict, keys_in=None, **params):
fit_dicts = OrderedDict()
data_to_proc_dict = hlp_mod.get_data_to_process(data_dict, keys_in)
cp, sp, mospm, mobjn = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['cp', 'sp', 'mospm', 'mobjn'], **params)
indep_var_array = hlp_mod.get_param('indep_var_array', data_dict,
raise_error=False, **params)
if indep_var_array is None:
indep_var_array = sp[0][mospm[mobjn][0]][0]
plot_params = hlp_mod.get_param('plot_params', data_dict, default_value={},
**params)
if 'setlabel' not in plot_params:
plot_params['setlabel'] = 'CosFit'
params_to_print = hlp_mod.get_param(
'params_to_print', data_dict, default_value=None, **params)
fit_name = hlp_mod.get_param('fit_name', data_dict,
raise_error=False, **params)
for keyi, data in data_to_proc_dict.items():
data_fit = hlp_mod.get_msmt_data(data, cp, mobjn)
cos_mod = lmfit.Model(fit_mods.CosFunc)
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=indep_var_array, data=data_fit)
guess_pars['amplitude'].vary = True
guess_pars['amplitude'].min = -10
guess_pars['offset'].vary = True
guess_pars['frequency'].vary = True
guess_pars['phase'].vary = True
fit_name_to_set = fit_name
if fit_name_to_set is None:
fit_name_to_set = 'CosFit'
fit_name_to_set += keyi
fit_dicts[fit_name_to_set] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': indep_var_array},
'fit_yvals': {'data': data_fit},
'guess_pars': guess_pars,
'params_to_print': params_to_print, **plot_params}
hlp_mod.add_param('fit_dicts', fit_dicts, data_dict,
add_param_method='update')
def prepare_joint_residzz_fit_dict(data_dict, keys_in=None, **params):
"""
This function does a joint fit to Ramsey data without and with the other
qubit in the |e> state.
keys_in should have two entries corresponding to two 1d arrays for the
data mentioned above, IN THAT ORDER.
:param data_dict: OrderedDict containing data to be processed and where
processed data is to be stored
:param keys_in: list of key names or dictionary keys paths in
data_dict for the data to be processed
:param params: keyword args
do_fitting (bool, default: False): whether to perform the fit
guess_params (dict, default: dict()): dict of guess pars for fit
:return: adds fit_dicts to data_dict
"""
if len(keys_in) != 2:
raise ValueError('keys_in must have two entries.')
fit_dicts = OrderedDict()
data_to_proc_dict = hlp_mod.get_data_to_process(data_dict, keys_in)
cp, sp, mospm, mobjn = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['cp', 'sp', 'mospm', 'mobjn'], **params)
indep_var_array = hlp_mod.get_param('indep_var_array', data_dict,
raise_error=False, **params)
if indep_var_array is None:
indep_var_array = sp[0][mospm[mobjn][0]][0]
plot_params = hlp_mod.get_param('plot_params', data_dict, default_value={},
**params)
if 'setlabel' not in plot_params:
plot_params['setlabel'] = 'JointResidZZFit'
params_to_print = hlp_mod.get_param(
'params_to_print', data_dict, default_value=None, **params)
data_wo_pulse = hlp_mod.get_msmt_data(
list(data_to_proc_dict.values())[0], cp, mobjn)
data_w_pulse = hlp_mod.get_msmt_data(
list(data_to_proc_dict.values())[1], cp, mobjn)
residzz_mod = lmfit.Model(fit_mods.ResidZZFuncJoint)
guess_pars = fit_mods.exp_damp_osc_guess(
model=residzz_mod, t=indep_var_array,
data=data_wo_pulse)
guess_pars['alpha'].value = -50e3
guess_pars['alpha'].max = 0
# guess_pars['x'].value = 12e-6*guess_pars['alpha'].value
guess_pars['t11'].value = 12e-6
guess_pars['t11'].min = 0
guess_pars['offset'].value = np.mean(data_wo_pulse)
guess_pars['amplitude1'].value = guess_pars['amplitude'].value
# guess_pars['phase1'].value = guess_pars['phase'].value + np.pi/2
# guess_pars['amplitude'].min = -1
# guess_pars['amplitude1'].min = -1
# guess_pars['amplitude'].max = 1
# guess_pars['amplitude1'].max = 1
for par in guess_pars:
guess_pars[par].vary = True
guess_pars['offset'].vary = False
guess_params_new = hlp_mod.get_param('guess_params', data_dict,
default_value=dict(),
raise_error=False, **params)
update_fit_guess_pars(guess_params_new, guess_pars)
fit_name = hlp_mod.get_param('fit_name', data_dict,
raise_error=False, **params)
fit_name_to_set = fit_name
if fit_name_to_set is None:
fit_name_to_set = 'residzz_fit'
fit_name_to_set += ','.join(mobjn)
fit_dicts[fit_name] = {
'fit_fn': fit_mods.ResidZZFuncJoint,
'fit_xvals': {'t': indep_var_array},
'fit_yvals': {'data': (data_wo_pulse, data_w_pulse)},
'guess_pars': guess_pars,
'params_to_print': params_to_print, **plot_params}
hlp_mod.add_param('fit_dicts', fit_dicts, data_dict,
add_param_method='update')
if params.get('do_fitting', False):
run_fitting(data_dict, keys_in=list(fit_dicts), **params)
def prepare_residzz_fit_dict(data_dict, keys_in=None, **params):
"""
This function does a fit to Ramsey data with the other
qubit in the |e> state.
:param data_dict: OrderedDict containing data to be processed and where
processed data is to be stored
:param keys_in: list of key names or dictionary keys paths in
data_dict for the data to be processed
:param params: keyword args
do_fitting (bool, default: False): whether to perform the fit
guess_params (dict, default: dict()): dict of guess pars for fit
:return: adds fit_dicts to data_dict
"""
fit_dicts = OrderedDict()
data_to_proc_dict = hlp_mod.get_data_to_process(data_dict, keys_in)
cp, sp, mospm, mobjn = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['cp', 'sp', 'mospm', 'mobjn'], **params)
indep_var_array = hlp_mod.get_param('indep_var_array', data_dict,
raise_error=False, **params)
if indep_var_array is None:
indep_var_array = sp[0][mospm[mobjn][0]][0]
plot_params = hlp_mod.get_param('plot_params', data_dict, default_value={},
**params)
if 'setlabel' not in plot_params:
plot_params['setlabel'] = 'ResidZZFit'
params_to_print = hlp_mod.get_param(
'params_to_print', data_dict, default_value=None, **params)
fit_name = hlp_mod.get_param('fit_name', data_dict,
raise_error=False, **params)
for keyi, data in data_to_proc_dict.items():
data_fit = hlp_mod.get_msmt_data(data, cp, mobjn)
residzz_mod = lmfit.Model(fit_mods.ResidZZFunc)
guess_pars = fit_mods.exp_damp_osc_guess(
model=residzz_mod, t=indep_var_array, data=data_fit)
guess_pars['alpha'].value = -50e3
guess_pars['x'].value = 12e-6*guess_pars['alpha'].value
guess_pars['offset'].value = np.mean(data_fit)
# guess_pars['amplitude'].min = -1
# guess_pars['amplitude'].max = 1
guess_params_new = hlp_mod.get_param('guess_params', data_dict,
default_value=dict(),
raise_error=False, **params)
update_fit_guess_pars(guess_params_new, guess_pars)
fit_name_to_set = fit_name
if fit_name_to_set is None:
fit_name_to_set = 'residzz_fit'
fit_name_to_set += keyi
fit_dicts[fit_name_to_set] = {
'fit_fn': fit_mods.ResidZZFunc,
'fit_xvals': {'t': indep_var_array},
'fit_yvals': {'data': data_fit},
'guess_pars': guess_pars,
'params_to_print': params_to_print, **plot_params}
hlp_mod.add_param('fit_dicts', fit_dicts, data_dict,
add_param_method='update')
if params.get('do_fitting', False):
run_fitting(data_dict, keys_in=list(fit_dicts), **params)
def prepare_expdamposc_fit_dict(data_dict, keys_in=None, **params):
"""
This function does a to Ramsey data
:param data_dict: OrderedDict containing data to be processed and where
processed data is to be stored
:param keys_in: list of key names or dictionary keys paths in
data_dict for the data to be processed
:param params: keyword args
do_fitting (bool, default: False): whether to perform the fit
guess_params (dict, default: dict()): dict of guess pars for fit
fit_name
indep_var_array
plot_params
params_to_print
:return: adds fit_dicts to data_dict
"""
fit_dicts = OrderedDict()
data_to_proc_dict = hlp_mod.get_data_to_process(data_dict, keys_in)
cp, sp, mospm, mobjn = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['cp', 'sp', 'mospm', 'mobjn'], **params)
indep_var_array = hlp_mod.get_param('indep_var_array', data_dict,
raise_error=False, **params)
if indep_var_array is None:
indep_var_array = sp[0][mospm[mobjn][0]][0]
plot_params = hlp_mod.get_param('plot_params', data_dict, default_value={},
**params)
if 'setlabel' not in plot_params:
plot_params['setlabel'] = 'ExpDampOscFit'
params_to_print = hlp_mod.get_param(
'params_to_print', data_dict, default_value=None, **params)
fit_name = hlp_mod.get_param('fit_name', data_dict,
raise_error=False, **params)
for keyi, data in data_to_proc_dict.items():
data_fit = hlp_mod.get_msmt_data(data, cp, mobjn)
exp_damped_decay_mod = lmfit.Model(fit_mods.ExpDampOscFunc)
guess_pars = fit_mods.exp_damp_osc_guess(
model=exp_damped_decay_mod, data=data_fit, t=indep_var_array,
n_guess=1)
guess_pars['amplitude'].vary = False
guess_pars['amplitude'].value = 0.5
guess_pars['frequency'].vary = True
guess_pars['tau'].vary = True
guess_pars['phase'].vary = True
guess_pars['n'].vary = False
guess_pars['oscillation_offset'].vary = False
guess_pars['exponential_offset'].vary = True
guess_params_new = hlp_mod.get_param('guess_params', data_dict,
default_value=dict(),
raise_error=False, **params)
update_fit_guess_pars(guess_params_new, guess_pars)
fit_name_to_set = fit_name
if fit_name_to_set is None:
fit_name_to_set = 'expdamposc_fit'
fit_name_to_set += keyi
fit_dicts[fit_name_to_set] = {
'fit_fn': fit_mods.ExpDampOscFunc,
'fit_xvals': {'t': indep_var_array},
'fit_yvals': {'data': data_fit},
'guess_pars': guess_pars,
'params_to_print': params_to_print,
'plot_params': plot_params}
hlp_mod.add_param('fit_dicts', fit_dicts, data_dict,
add_param_method='update')
if params.get('do_fitting', False):
run_fitting(data_dict, keys_in=list(fit_dicts), **params)
def prepare_rbleakage_fit_dict(data_dict, keys_in=None, **params):
"""
:param data_dict: OrderedDict containing data to be processed and where
processed data is to be stored
:param keys_in: list of key names or dictionary keys paths in
data_dict for the data to be processed
:param params: keyword args
do_fitting (bool, default: False): whether to perform the fit
guess_params (dict, default: dict()): dict of guess pars for fit
:return: adds fit_dicts to data_dict
"""
fit_dicts = OrderedDict()
data_to_proc_dict = hlp_mod.get_data_to_process(data_dict, keys_in)
cp, sp, mospm, mobjn = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['cp', 'sp', 'mospm', 'mobjn'], **params)
indep_var_array = hlp_mod.get_param('indep_var_array', data_dict,
raise_error=False, **params)
if indep_var_array is None:
indep_var_array = sp[0][mospm[mobjn][0]][0]
plot_params = hlp_mod.get_param('plot_params', data_dict, default_value={},
**params)
if 'setlabel' not in plot_params:
plot_params['setlabel'] = 'RBLeakageFit'
params_to_print = hlp_mod.get_param(
'params_to_print', data_dict, default_value=None, **params)
fit_name = hlp_mod.get_param('fit_name', data_dict,
raise_error=False, **params)
for keyi, data in data_to_proc_dict.items():
data_fit = hlp_mod.get_msmt_data(data, cp, mobjn)
rbleak_mod = lmfit.Model(fit_mods.RandomizedBenchmarkingLeakage)
guess_pars = rbleak_mod.make_params(pu=0.01, pd=0.05, p0=0)
guess_params_new = hlp_mod.get_param('guess_params', data_dict,
default_value=dict(),
raise_error=False, **params)
update_fit_guess_pars(guess_params_new, guess_pars)
fit_name_to_set = fit_name
if fit_name_to_set is None:
fit_name_to_set = 'rbleak_fit'
fit_name_to_set += keyi
fit_dicts[fit_name_to_set] = {
'fit_fn': fit_mods.RandomizedBenchmarkingLeakage,
'fit_xvals': {'numCliff': indep_var_array},
'fit_yvals': {'data': data_fit},
'guess_pars': guess_pars,
'params_to_print': params_to_print, **plot_params}
hlp_mod.add_param('fit_dicts', fit_dicts, data_dict,
add_param_method='update')
if params.get('do_fitting', False):
run_fitting(data_dict, keys_in=list(fit_dicts), **params)
def update_fit_guess_pars(guess_params_new, guess_params_old):
if len(guess_params_new) != 0:
for par, val in guess_params_new.items():
if isinstance(val, dict):
if 'value' in val:
guess_params_old[par].value = val['value']
if 'min' in val:
guess_params_old[par].min = val['min']
if 'max' in val:
guess_params_old[par].max = val['max']
if 'vary' in val:
guess_params_old[par].max = val['vary']
else:
# assumes the value corresponding to par is an int or float
guess_params_old[par].value = val
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import click
import requests
import sys
import types
from girder_client import GirderClient, __version__
class GirderCli(GirderClient):
"""
A command line Python client for interacting with a Girder instance's
RESTful api, specifically for performing uploads into a Girder instance.
"""
def __init__(self, username, password, host=None, port=None, apiRoot=None,
scheme=None, apiUrl=None, apiKey=None):
"""
Initialization function to create a GirderCli instance, will attempt
to authenticate with the designated Girder instance. Aside from username, password,
and apiKey, all other kwargs are passed directly through to the
:py:class:`girder_client.GirderClient` base class constructor.
:param username: username to authenticate to Girder instance.
:param password: password to authenticate to Girder instance, leave
this blank to be prompted.
"""
def _progressBar(*args, **kwargs):
bar = click.progressbar(*args, **kwargs)
bar.bar_template = "[%(bar)s] %(info)s %(label)s"
bar.show_percent = True
bar.show_pos = True
def formatSize(length):
if length == 0:
return '%.2f' % length
unit = ''
# See https://en.wikipedia.org/wiki/Binary_prefix
units = ['k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
while True:
if length <= 1024 or len(units) == 0:
break
unit = units.pop(0)
length /= 1024.
return '%.2f%s' % (length, unit)
def formatPos(_self):
pos = formatSize(_self.pos)
if _self.length_known:
pos += '/%s' % formatSize(_self.length)
return pos
bar.format_pos = types.MethodType(formatPos, bar)
return bar
_progressBar.reportProgress = sys.stdout.isatty()
super(GirderCli, self).__init__(
host=host, port=port, apiRoot=apiRoot, scheme=scheme, apiUrl=apiUrl,
progressReporterCls=_progressBar)
interactive = password is None
if apiKey:
self.authenticate(apiKey=apiKey)
elif username:
self.authenticate(username, password, interactive=interactive)
class _HiddenOption(click.Option):
def get_help_record(self, ctx):
pass
class _AdvancedOption(click.Option):
pass
class _Group(click.Group):
def format_options(self, ctx, formatter):
opts = []
advanced_opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is None:
continue
if isinstance(param, _AdvancedOption):
advanced_opts.append(rv)
else:
opts.append(rv)
if opts:
with formatter.section('Options'):
formatter.write_dl(opts)
if advanced_opts:
with formatter.section('Advanced Options'):
formatter.write_dl(advanced_opts)
self.format_commands(ctx, formatter)
_CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=_CONTEXT_SETTINGS, cls=_Group)
@click.option('--api-url', default=None,
help='RESTful API URL '
'(e.g https://girder.example.com:443/%s)' % GirderClient.DEFAULT_API_ROOT)
@click.option('--api-key', envvar='GIRDER_API_KEY', default=None,
help='[default: GIRDER_API_KEY env. variable]')
@click.option('--username', default=None)
@click.option('--password', default=None)
# Advanced options
@click.option('--host', default=None,
cls=_AdvancedOption,
help="[default: %s]" % GirderClient.DEFAULT_HOST)
@click.option('--scheme', default=None,
cls=_AdvancedOption,
help="[default: %s if %s else %s]" % (
GirderClient.getDefaultScheme(GirderClient.DEFAULT_HOST),
GirderClient.DEFAULT_HOST,
GirderClient.getDefaultScheme("girder.example.com")))
@click.option('--port', default=None,
cls=_AdvancedOption,
help="[default: %s if %s; %s if %s else %s]" % (
GirderClient.DEFAULT_HTTPS_PORT, "https",
GirderClient.DEFAULT_LOCALHOST_PORT, "localhost",
GirderClient.DEFAULT_HTTP_PORT,
))
@click.option('--api-root', default=None,
help='relative path to the Girder REST API '
'[default: %s]' % GirderClient.DEFAULT_API_ROOT,
show_default=True,
cls=_AdvancedOption)
@click.version_option(version=__version__, prog_name='Girder command line interface')
@click.pass_context
def main(ctx, username, password, api_key, api_url, scheme, host, port, api_root):
"""Perform common Girder CLI operations.
The CLI is particularly suited to upload (or download) large, nested
hierarchy of data to (or from) Girder from (or into) a local directory.
The recommended way to use credentials is to first generate an API key
and then specify the ``api-key`` argument or set the ``GIRDER_API_KEY``
environment variable.
The client also supports ``username`` and ``password`` args. If only the
``username`` is specified, the client will prompt the user to interactively
input his/her password.
"""
# --api-url and URL by part arguments are mutually exclusive
url_part_options = ['host', 'scheme', 'port', 'api_root']
has_api_url = ctx.params.get('api_url', None)
for name in url_part_options:
has_url_part = ctx.params.get(name, None)
if has_api_url and has_url_part:
raise click.BadArgumentUsage(
'Option "--api-url" and option "--%s" are mutually exclusive.' %
name.replace("_", "-"))
ctx.obj = GirderCli(
username, password, host=host, port=port, apiRoot=api_root,
scheme=scheme, apiUrl=api_url, apiKey=api_key)
def _lookup_parent_type(client, object_id):
object_id = client._checkResourcePath(object_id)
for parent_type in ['folder', 'collection', 'user', 'item']:
try:
client.get('resource/%s/path' % object_id, parameters={'type': parent_type})
return parent_type
except requests.HTTPError as exc_info:
if exc_info.response.status_code == 400:
continue
raise
def _CommonParameters(path_exists=False, path_writable=True,
additional_parent_types=('collection', 'user'),
path_default=None, multiple_local=False):
parent_types = ['folder'] + list(additional_parent_types)
parent_type_cls = _HiddenOption
parent_type_default = 'folder'
if len(additional_parent_types) > 0:
parent_types.append('auto')
parent_type_cls = click.Option
parent_type_default = 'auto'
def wrap(func):
decorators = [
click.option('--parent-type', default=parent_type_default,
show_default=True, cls=parent_type_cls,
help='type of Girder parent target', type=click.Choice(parent_types)),
click.argument('parent_id'),
click.argument(
'local_folder',
type=click.Path(exists=path_exists, dir_okay=True,
writable=path_writable, readable=True),
default=path_default,
nargs=1 if not multiple_local else -1,
required=multiple_local
),
]
for decorator in reversed(decorators):
func = decorator(func)
return func
return wrap
_common_help = 'PARENT_ID is the id of the Girder parent target and ' \
'LOCAL_FOLDER is the path to the local target folder.'
_short_help = 'Download files from Girder'
@main.command('download', short_help=_short_help, help='%s\n\n%s' % (
_short_help, _common_help.replace('LOCAL_FOLDER', 'LOCAL_FOLDER (default: ".")')))
@_CommonParameters(additional_parent_types=['collection', 'user', 'item'], path_default='.')
@click.pass_obj
def _download(gc, parent_type, parent_id, local_folder):
if parent_type == 'auto':
parent_type = _lookup_parent_type(gc, parent_id)
if parent_type == 'item':
gc.downloadItem(parent_id, local_folder)
else:
gc.downloadResource(parent_id, local_folder, parent_type)
_short_help = 'Synchronize local folder with remote Girder folder'
@main.command('localsync', short_help=_short_help, help='%s\n\n%s' % (_short_help, _common_help))
@_CommonParameters(additional_parent_types=[])
@click.pass_obj
def _localsync(gc, parent_type, parent_id, local_folder):
if parent_type != 'folder':
raise Exception('localsync command only accepts parent-type of folder')
gc.loadLocalMetadata(local_folder)
gc.downloadFolderRecursive(parent_id, local_folder, sync=True)
gc.saveLocalMetadata(local_folder)
_short_help = 'Upload files to Girder'
@main.command('upload', short_help=_short_help, help='%s\n\n%s' % (
_short_help,
'PARENT_ID is the id of the Girder parent target and '
'LOCAL_FOLDER is one or more paths to local folders or files.'))
@_CommonParameters(path_exists=True, path_writable=False, multiple_local=True)
@click.option('--leaf-folders-as-items', is_flag=True,
help='upload all files in leaf folders to a single Item named after the folder')
@click.option('--reuse', is_flag=True,
help='use existing items of same name at same location or create a new one')
@click.option('--dry-run', is_flag=True,
help='will not write anything to Girder, only report what would happen')
@click.option('--blacklist', default='',
help='comma-separated list of filenames to ignore')
@click.option('--reference', default=None,
help='optional reference to send along with the upload')
@click.pass_obj
def _upload(gc, parent_type, parent_id, local_folder,
leaf_folders_as_items, reuse, blacklist, dry_run, reference):
if parent_type == 'auto':
parent_type = _lookup_parent_type(gc, parent_id)
gc.upload(
local_folder, parent_id, parent_type,
leafFoldersAsItems=leaf_folders_as_items, reuseExisting=reuse,
blacklist=blacklist.split(','), dryRun=dry_run, reference=reference)
if __name__ == '__main__':
main() # pragma: no cover
|
|
# __author__ = 'sandy'
# -*- coding=utf-8 -*-
import sys
import MySQLdb
import datetime
from time import time
from swift.common.utils import cache_from_env, get_logger, \
split_path, config_true_value, register_swift_info
from swift.common.swob import Response, Request
from swift.common.swob import HTTPBadRequest, HTTPForbidden, HTTPNotFound, \
HTTPUnauthorized
class UserInfo:
def denied_response(self, req):
self.response = '[Deny] : %s\n' % req
return None
def get_user_info_from_token(self, env=None, token=None):
"""
Get user information for the given token.
:param env: The current WSGI environment dictionary.
:param token: Token to validate and return a info string for.
:returns: None if the token is invalid or a string containing a comma
separated list of groups the authenticated user is a member
of. The first group in the list is also considered a unique
identifier for that user.
"""
info = None
if not token:
return None
memcached_client = cache_from_env(env)
if not memcached_client:
raise Exception('Memcache required')
memcached_token_key = '%s/token/%s' % (self.reseller_prefix, token)
cached_auth_data = memcached_client.get(memcached_token_key)
if cached_auth_data:
expires, info = cached_auth_data
if expires < time():
self.denied_response('No User %s.' % info)
return info
def get_user_info_from_db(self, user_name=None):
"""
Get user information for the given username.
:param username: Username to validate and return a info string for. in
this function it means login_name
:returns: None if the token is invalid or a string containing a comma
separated list of groups the authenticated user is a member
of. The first group in the list is also considered a unique
identifier for that user.
"""
# Choose columns from database
item = ('tu_id', 'login_name', 'username', 'password', 'seclevel', 'mobile', 'email')
try:
info_select = self.cur.execute("SELECT %s FROM TUser WHERE login_name = \'%s\'" % (str(item).replace('\'', '')[1:-1], user_name))
# print "info_select : %d" % info_select
# login_name is a unique value, so the select result is only 1
if info_select != 1:
if info_select == 0:
self.denied_response('No User %s.' % user_name)
else:
self.denied_response('duplicated user %s.' % user_name)
info = self.cur.fetchone()
if info is None:
self.denied_response('Error SQL execute.')
info = dict(zip(item, info))
except Exception as e:
self.denied_response(e)
return info
def get_userid(self):
return self.tu_id
def get_secfield(self):
"""
Return a list of user's security fields
:returns: None if the token or username is invalid. or a list containing
separated secfield of the authenticated user.
"""
if self.login_name:
secfield = self.get_secfield_from_uid(self.tu_id)
return secfield
if self.token:
secfield = self.get_secfield_from_token(self.token)
return secfield
else:
self.denied_response('Error in get_secfield.')
return None
def get_secfield_from_uid(self, uid):
"""
Return a list of user's security fields
:params: the unique uid is used to specify the user
:returns: None if the user has no secfield. else return a list of secfield
by searching database.
"""
secfield = None
try:
info_select = self.cur.execute("SELECT %s FROM TUserSecfieldRelation WHERE tu_id = %s" % ('secfield_id', uid))
# print "info_select : %d" % info_select
# login_name is a unique value, so the select result is only 1
if info_select != 1:
if info_select == 0:
return self.denied_response('The uid %s has no secfield related.' % uid)
else:
return self.denied_response('duplicated secfield %s.' % uid)
info = self.cur.fetchone()
if info is None:
self.denied_response('Error SQL execute.')
secfield = (info[0].encode("utf8")).split(",")
except Exception as e:
self.denied_response(e)
return secfield
def get_secfield_info_from_secid(self, secid):
"""
Return a concrete information of security fields
:params: the unique secfield id is used to specify the secfield
:returns: None if the secid point to no secfield. else return a list of
secfield information by searching database.
"""
# Choose columns from database
item = ('secfield_id', 'parent_secfd_id', 'secfield_name', 'gen_time')
try:
info_select = self.cur.execute("SELECT %s FROM TSecfield WHERE secfield_id = \'%s\'" %
(str(item).replace('\'', '')[1:-1], secid))
# print "info_select : %d" % info_select
# login_name is a unique value, so the select result is only 1
if info_select != 1:
if info_select == 0:
return self.denied_response('No Secfield id %s.' % secid)
else:
self.denied_response('duplicated secfield id %s.' % secid)
info = self.cur.fetchone()
if info is None:
self.denied_response('Error SQL execute.')
secfield_info = dict(zip(item, info))
except Exception as e:
self.denied_response(e)
return secfield_info
def get_parentid_secfd_from_secid(self, secid):
secfield_info = self.get_secfield_info_from_secid(secid)
return secfield_info['parent_secfd_id']
def get_seclevel_from_token(self, token):
info = self.get_user_info_from_token(token)
return info['sec_level']
def get_seclevel(self):
"""
Get security level from user
"""
if self.seclevel:
return self.seclevel
else:
if self.username:
stat = 'User : %s has no security level, please contact system \
admin' % self.username
else:
stat = 'No User.'
self.denied_response(stat)
# return 0 # maybe default seclevel
def __init__(self, token=None, username=None, request=None):
# Database defination
self.conn = MySQLdb.connect(host="127.0.0.1", user="root", passwd='root', db="auth", charset="utf8")
self.cur = self.conn.cursor()
self.mc_user = cache_from_env(request.environ)
# Initialze user information by token or usernames
if token is None and username is None:
self.denied_response('token')
# TO DO: get user from db (web update user, not read from cache , temparily )
if self.mc_user.get(username) and False:
self.tu_id, self.username, self.seclevel, self.login_name, self.response = self.mc_user.get(username).split(',')
else:
if token:
info = self.get_user_info_from_token(token=token)
elif username:
info = self.get_user_info_from_db(user_name=username)
if info:
# set each info
self.tu_id = info['tu_id']
self.username = info['username']
self.login_name = info['login_name']
self.seclevel = info['seclevel']
self.email = info['email']
self.password = info['password']
self.mobile = info['mobile']
self.response = 'True'
self.mc_user.set(self.username, (('%s,%s,%s,%s,%s')%(self.tu_id, self.username, self.seclevel,self.login_name,self.response)))
elif not self.response:
self.response = ['Forbidden']
def __call__(self):
print "__call__ is running"
class Secfield:
def denied_response(self, req):
self.response = '[Deny] : %s\n' % req
def get_secfield_info_from_secid(self, secid):
"""
Return a concrete information of security fields
:params: the unique secfield id is used to specify the secfield
:returns: None if the secid point to no secfield. else return a list of
secfield information by searching database.
"""
# Choose columns from database
item = ('secfield_id', 'parent_secfd_id', 'secfield_name', 'gen_time')
try:
info_select = self.cur.execute("SELECT %s FROM TSecfield WHERE \
secfield_id=%s" % (str(item).replace('\'', '')[1:-1], int(secid)))
# print "info_select : %d" % info_select
# login_name is a unique value, so the select result is only 1
if info_select != 1:
if info_select == 0:
self.denied_response('No Secfield %s.' % secid)
else:
self.denied_response('duplicated secfield id %s.' % secid)
info = self.cur.fetchone()
if info is None:
self.denied_response('Error SQL execute.')
secfield_info = dict(zip(item, info))
except Exception as e:
self.denied_response(e)
return secfield_info
def get_parent_id(self):
parent_id = None
if self.parent_secfd_id:
parent_id = self.parent_secfd_id
return parent_id
def test(self):
uid = self.secfield_id.encode('utf-8')
info = self.cur.execute("select %s from TPolicy where secfield_id=%s" % ('seclass_id', uid))
if info == 1:
infos = self.cur.fetchone()
dd = infos[0].encode('utf-8').split(',')
return dd
else:
self.response = "%s" % 'Error'
return self.denied_response(self.response)
def get_seclass(self):
if not self.secfield_id:
self.denied_response("No secfield_id %s" % self.secfield_id)
return "None"
#uid = self.secfield_id.encode('utf-8')
uid=self.secfield_id
info_select = self.cur.execute("SELECT %s FROM TPolicy WHERE secfield_id = %s" % ('seclass_id', uid))
# login_name is a unique value, so the select result is only 1
if info_select == 1:
info = self.cur.fetchone()
seclass = (info[0].encode('utf-8')).split(',')
return seclass
else:
return ''
def __init__(self, secid, req):
# Database defination
# self.mc = memcache.Client(['192.168.119.89:11211'])
self.mc = cache_from_env(req.environ)
self.conn = MySQLdb.connect(host="127.0.0.1", user="root", passwd='root', db="auth", charset="utf8")
self.cur = self.conn.cursor()
print "----------------",self.mc.get(str(secid))
if self.mc.get(str(secid)):
self.secfield_id, self.parent_secfd_id, self.secfield_name = self.mc.get(str(secid)).split(',')
else:
info = self.get_secfield_info_from_secid(secid)
if info:
self.secfield_id = info['secfield_id']
self.parent_secfd_id = info['parent_secfd_id']
self.secfield_name = info['secfield_name'].encode('utf-8')
self.response = 'True'
# add into memcached
self.mc.set(str(self.secfield_id), (('%s,%s,%s') % (self.secfield_id, self.parent_secfd_id, self.secfield_name)))
class Subject:
"""
Parser User as Subject
"""
def denied_response(self, req):
self.response = '[Deny] : %s\n' % req
return None
def __init__(self, User):
try:
self.seclevel = User.get_seclevel()
self.secfield = User.get_secfield()
except:
self.seclevel = -1
self.secfield = None
if not self.seclevel:
# set defualt Subject
self.seclevel = -1
if not self.seclevel:
self.secfield = None
class Meta(object):
"""
Each container has a separate meta tables, path is an unique string to get
concrete information of object
"""
def denied_response(self, req):
self.response = '[Deny] : %s\n' % req
return None
def __init__(self, path, req):
# Database defination
self.conn = MySQLdb.connect(host="127.0.0.1", user="root", passwd='root', db="auth", charset="utf8")
self.cur = self.conn.cursor()
self.mc_meta = cache_from_env(req.environ)
obj = self.get_conobj_from_path(path)
#obj = self.get_objname_from_path(path)
if self.mc_meta.get(path):
self.object_id, self.object_name, self.parent_secl_id, self.seclevel, self.path, self.response = self.mc_meta.get(path).split(',')
else:
meta = self.get_metadata_from_objname(obj)
if meta:
self.object_id = meta['object_id']
self.object_name = meta['object_name'].encode("utf8")
self.parent_secl_id = meta['parent_secl_id']
self.seclevel = meta['obj_seclevel']
self.author = meta['author'].encode("utf8") if meta['author'] else None
self.path = meta['path'].encode("utf8")
self.subject = meta['subject'].encode("utf8") if meta['subject'] else None
self.description = meta['description'].encode("utf8") if meta['description'] else None
self.source = meta['source'].encode("utf8") if meta['source'] else None
self.response = 'True'
self.mc_meta.set(self.path, (('%s,%s,%s,%s,%s,%s') % (self.object_id, self.object_name, self.parent_secl_id, self.seclevel, self.path, self.response)))
elif not self.response:
self.response = ['Forbidden']
def get_seclevel(self):
"""
Get security level from metadata
"""
if self.seclevel:
return self.seclevel
else:
if self.object_name:
stat = 'Object : %s has no security level, please contact system \
admin' % self.object_name
else:
stat = 'No Object metadata.'
self.denied_response(stat)
def get_seclass(self):
"""
Get security classification from metadata
"""
if self.parent_secl_id:
return self.parent_secl_id
else:
if self.object_name:
stat = 'Object : %s has no security classification, please contact system \
admin' % self.object_name
else:
stat = 'No Object metadata.'
self.denied_response(stat)
def get_metadata_from_objname(self, obj):
item = ('object_id', 'object_name', 'parent_secl_id', 'obj_seclevel', 'author', 'path',
'subject', 'description', 'language', 'source')
try:
info_select = self.cur.execute("select %s from TMeta where path = \'%s\'" %
(str(item).replace('\'', '')[1:-1], obj))
# login_name is a unique value, so the select result is only 1
if info_select != 1:
if info_select == 0:
return self.denied_response('No files %s.' % obj)
else:
return self.denied_response('duplicated files id %s.' % obj)
info = self.cur.fetchone()
if info is None:
return self.denied_response('Error SQL execute.')
Meta_info = dict(zip(item, info))
except Exception as e:
return self.denied_response(e)
return Meta_info
def get_objname_from_path(self, path):
# get object name from path
_junk, account, container, obj = split_path(path, 1, 4, True)
return obj
def get_conobj_from_path(self,path):
# get object name from path
_junk, account, container, obj = split_path(path, 1, 4, True)
if obj!=None:
obj="%s/%s" % (container,obj)
else:
obj=None
return obj
def check_path(self, path):
# path string replace or check
if not path:
return False
if '..' in path:
return False
return True
class Object:
"""
Parser audio files as Object
"""
def denied_response(self, req):
self.response = '[Deny] : %s' % req
return None
def get_parent_seclass(self, secid=None):
# if not self.seclass:
# return None
parent = {}
if not secid:
secid = self.seclass
# in this way, suppose the max classification depth is 10
for v in range(1, 10):
seclass_info = self.get_parent_seclass_from_db(secid)
if not seclass_info:
break
parent[str(secid)] = str(seclass_info['parent_secl_id'])
if not seclass_info['parent_secl_id']:
break
else:
secid = seclass_info['parent_secl_id']
return parent
def get_parent_seclass_from_db(self, secid):
# Database defination
self.conn = MySQLdb.connect(host="127.0.0.1", user="root", passwd='root', db="auth", charset="utf8")
self.cur = self.conn.cursor()
item = ('seclass_id', 'parent_secl_id', 'seclass_name', 'gen_time')
try:
info_select = self.cur.execute("select %s from TSeclass where seclass_id = \'%s\'"
% (str(item).replace('\'', '')[1:-1], secid))
# seclass_id is a unique value, so the select result is only 1
if info_select != 1:
if info_select == 0:
self.denied_response('No Seclass %s.' % secid)
return None
else:
self.denied_response('duplicated seclass id %s.' % secid)
info = self.cur.fetchone()
if info is None:
self.denied_response('Error SQL execute.')
seclass_info = dict(zip(item, info))
except:
self.denied_response('Error SQL execute.')
return seclass_info if seclass_info else None
def __init__(self, meta=None):
if meta:
self.seclevel = meta.get_seclevel()
self.seclass = meta.get_seclass()
if not (self.seclevel and self.seclass):
self.seclevel = 1
self.seclass = 0
class mandatory_access_control(object):
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.upload = 'True'
self.logger = get_logger(conf, log_route='mac')
self.reseller_prefix = "AUTH_"
self.admins = {}
conn = MySQLdb.connect(host="127.0.0.1", user="root", passwd="root", db="auth", charset="utf8")
cur = conn.cursor()
cur.execute('select * from TAdmin')
for row1 in cur.fetchall():
name = row1[1].encode('utf-8')
password = row1[2].encode('utf-8')
url = '$HOST/v1/%s%s' % (self.reseller_prefix, name)
self.admins[name] = {'url': url, 'passwd': password}
print "I am in mac middleware"
def __call__(self, env, start_response):
self.conn = MySQLdb.connect(host="127.0.0.1", user="root", passwd='root', db="auth", charset="utf8")
self.cur = self.conn.cursor()
req = Request(env)
token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
memcache_client = cache_from_env(req.environ)
if not memcache_client:
raise Exception('Memcache required')
memcache_token_key = '%s/token/%s' % (self.reseller_prefix, token)
cached_auth_data = memcache_client.get(memcache_token_key)
if not cached_auth_data:
start_response("404 Forbidden", [("Content-type", "text/plain")])
return ["Token denied test!!! I am here\n%s\n%s\n%s\n" % (cached_auth_data, memcache_token_key, env)]
expires, self.account_name = cached_auth_data
if expires < time():
self.logger.increment('token_denied')
return HTTPUnauthorized(request=req, headers={'Www-Authenticate': 'Swift realm="%s"' % self.account_name})
path = req.environ['PATH_INFO']
# self.account_name = 'mobile'
_junk, account, container, obj = split_path(path, 1, 4, True)
if obj!=None:
obj="%s/%s" % (container,obj)
else:
obj=None
user = UserInfo(username=self.account_name, request=req)
print "#############",user.__dict__
try:
sub = Subject(user)
except:
start_response("404 Forbidden", [("Content-type", "text/plain")])
return ["Token denied test!!! I am here\n%s\n%s\n%s\n" % (cached_auth_data, memcache_token_key, env)]
if self.account_name in self.admins:
if req.method == 'PUT' or req.method == 'POST':
ss = self.app(env, start_response)
'''
check whether the container exists.the ss is the returned body.
if the body not null,the container does not exist.
else continue.then we check the sole path in the function save().
'''
try:
dd = str(ss.next())
except Exception as e:
dd = str(e)
if 'resource' not in dd:
self.save(env, sub_seclevel=0,obj_path=obj)
# start_response("404 Forbidden", [("Content-type", "text/plain"), ('upload', self.upload)])
# return ["%s" % self.upload]
if self.upload == 'True':
return self.app(env, start_response)
else:
start_response("404 Forbidden", [("Content-type", "text/plain")])
return str(self.upload)
else:
start_response("404 Forbidden", [("Content-type", "text/plain")])
return ['The Container:%s is Not Found\n' % container]
if req.method == 'GET' and obj:
# user = UserInfo(username=self.account_name,request=req)
audio = Meta(path, req)
if audio.response != 'True':
start_response("111 Forbidden", [("Content-type", "text/plain")])
return audio.response
# sub=Subject(user)
if user.response != 'True':
start_response("403 Forbidden", [("Content-type", "text/plain")])
return user.response
obj_info = Object(audio)
if audio.response != 'True':
start_response("404 Forbidden", [("Content-type", "text/plain")])
return audio.response
if int(sub.seclevel) < int(obj_info.seclevel):
start_response("404 Forbidden", [("Content-type", "text/plain"), ("sub", sub.seclevel),
("obj", obj_info.seclevel)])
return ["Secure Level Forbidden,Please Check the Level!\n"]
classes = obj_info.get_parent_seclass()
x = str(obj_info.seclass)
while x is not None:
for secfd in sub.secfield:
secfield = Secfield(secfd, req)
if str(x) in secfield.get_seclass():
return self.app(env, start_response)
try:
x = classes[x]
except:
# not match secfield in the last search :KeyError: '0'
break
start_response("403 Forbidden", [("Content-type", "text/plain"), ("sub", sub.seclevel),
("obj", obj_info.seclevel)])
return ["Secure field Forbidden: secure-field not match\n"]
elif req.method == 'PUT' and obj:
sub_classes = []
sub_secfield = sub.secfield
for secfd in sub.secfield:
clas = Secfield(secfd, req).get_seclass()
sub_classes += clas
object_info = Object()
if user.response != 'True':
start_response("403 Forbidden", [("Content-type", "text/plain")])
return user.response
obj_secl_id = env.get('HTTP_PARENT_SECL_ID')
obj_seclevel = env.get('HTTP_OBJ_SECLEVEL')
sub_seclevel = sub.seclevel
if int(sub_seclevel) > int(obj_seclevel):
start_response("403 Forbidden", [("Content-type", "text/plain"), ('sub', sub.seclevel),
('obj', obj_seclevel)])
return ["Secure Level Forbidden,Please Check the Levels!\n"]
classes = object_info.get_parent_seclass(secid=obj_secl_id) # the classes which the objects' all classes.
for i in set(sub_classes):
if i in classes.keys() or i in classes.values():
ss = self.app(env, start_response)
try:
dd = str(ss.next())
except Exception as e:
dd = str(e)
if 'resource' not in dd:
self.save(env, sub_seclevel, self.account_name,obj_path=obj)
if self.upload == 'True':
return self.app(env, start_response)
else:
start_response("404 Forbidden", [("Content-type", "text/plain")])
return str(self.upload)
else:
start_response("404 Forbidden", [("Content-type", "text/plain")])
return ['The Container:%s is Not Found\n' % container]
start_response("404 Forbidden", [("Content-type", "text/plain")])
return ["Secure field Forbidden: secure-field not match\n"]
elif req.method == 'POST' and obj:
# user = UserInfo(username=self.account_name, request=req)
# sub = Subject(user)
user = UserInfo(username=self.account_name, request=req)
sub = Subject(user)
sub_classes = []
for secfd in sub.secfield:
clas = Secfield(secfd, req).get_seclass()
sub_classes += clas
if user.response != 'True':
start_response("404 Forbidden", [("Content-type", "text/plain")])
return user.response
obj_secl_id = env.get('HTTP_PARENT_SECL_ID', '')
obj_seclevel = env.get('HTTP_OBJ_SECLEVEL', '')
sub_seclevel = sub.seclevel
if int(sub_seclevel) != int(obj_seclevel):
start_response("404 Forbidden", [("Content-type", "text/plain"), ('sub', sub.seclevel), ('obj', obj_seclevel)])
return ["The Level NOT Equal,Please Check Your Level!\n"]
classes = obj.get_parent_seclass(secid=obj_secl_id)
# new
for i in set(sub_classes):
if i in classes.keys() or i in classes.values():
self.save(env, start_response,obj_path=obj)
return self.app(env, start_response)
# while x is not None:
# for secfd in sub.secfield:
# secfield = Secfield(secfd, req)
# if str(x) in secfield.get_seclass():
# self.save(env, start_response)
# return self.app(env, start_response)
# x = classes[x]
start_response("404 Forbidden", [("Content-type", "text/plain")])
return ["Secure field Forbidden: secure-field not match\n"]
elif req.method == 'HEAD' and obj:
print "HEAD",sub.__dict__
print "conn",self.conn
print "env",dir(env)
print "obj",obj,"path",path
print "---",env.get('PATH_INFO').split('/', 4)[-1]
audio = Meta(path,req)
if audio.response != 'True':
start_response("111 Forbidden", [("Content-type", "text/plain")])
return audio.response
meta_info=audio.get_metadata_from_objname(obj)
print "meta_info",meta_info.items()
meta=[("Content-type", "text/plain"),('author',u'pad1')]
for k in meta_info.keys():
#meta.append((k,meta_info[k].encode('utf8')))
item = meta_info[k].encode('utf8') if type(meta_info[k]) is unicode else meta_info[k]
print type(item)
meta.append((k,item))
print "*********meta",meta
#meta.append(meta_info.items())
#self.cur = self.conn.cursor()
#self.cur.execute("select ")
#meta.append()
#start_response("200 OK!", [("Content-type", "text/plain"),('author',u'pad1')])
#start_response("200 OK!", [('parent_secl_id', 7L), ('object_name', u'\u5c0f\u9152\u7a9d'.encode('utf8'))])
#self.getinfo
start_response("200 OK!", meta)
return self.app(env,start_response)
#return ["Secure field Forbidden: secure-field not match\n"]
elif req.method == 'DELETE' and obj:
#user = UserInfo(username=self.account_name, request=req)
#sub = Subject(user)
print "del",user.__dict__
print "del",sub.__dict__
sub_classes = []
if sub.secfield is None:
start_response("404 Forbidden", [("Content-type", "text/plain")])
return ["The Secfiled is None,Please Check Your secfield!\n"]
if sub.seclevel < 0 :
start_response("404 Forbidden", [("Content-type", "text/plain")])
return ["The Seclevel is Error,Please Check Your seclevel!\n"]
for secfd in sub.secfield:
clas = Secfield(secfd, req).get_seclass()
sub_classes += clas
n = self.cur.execute("select * from TMeta where path='%s'" % obj)
if n == 0:
start_response("404 Not Found", [([("Content-type", "text/plain")])])
return ["Not Found this File,Please Check Your file's name\n"]
secid = self.cur.fetchone()
parent_id = secid[2]
obj_level = secid[3]
sub_seclevel = sub.seclevel
print "===sub_seclevel===",sub_seclevel
if int(sub_seclevel) != int(obj_level):
start_response("404 Forbidden", [("Content-type", "text/plain"), ('sub', sub.seclevel), ('obj', obj_level)])
return ["The Level NOT Equal,Please Check Your Level!\n"]
classes = Object().get_parent_seclass(secid=parent_id)
for i in set(sub_classes):
if i in classes.keys() or i in classes.values():
cur1 = self.conn.cursor()
cur1.execute("delete from TMeta where path='%s'" % obj)
self.conn.commit()
cur1.close()
self.conn.close()
return self.app(env, start_response)
start_response("404 Forbidden", [("Content-type", "text/plain"), ("par", classes)])
return ["Secure field Forbidden: secure-field not match\n"]
else:
return self.app(env, start_response)
def save(self, env, sub_seclevel=None, username=None,obj_path=None):
self.conn = MySQLdb.connect(host="127.0.0.1", user="root", passwd='root', db="auth", charset="utf8")
self.cur = self.conn.cursor()
method = env.get('REQUEST_METHOD', '')
object_name = env.get('HTTP_OBJECT_NAME', '')
parent_secl_id = env.get('HTTP_PARENT_SECL_ID', '')
obj_seclevel = env.get('HTTP_OBJ_SECLEVEL', '')
# author = account
# author = env.get('PATH_INFO').split('/', 4)[2][5:]
author = username
#path = env.get('PATH_INFO').split('/', 4)[-1]
if obj_path != None:
path = obj_path
else:
path = env.get('PATH_INFO').split('/', 4)[-1]
gen_time = datetime.datetime.now()
types = env.get('CONTENT_TYPE', '')
subject = env.get('HTTP_SUBJECT', '')
description = env.get('HTTP_DESCRIPTION', '')
language = env.get('HTTP_LANGUAGE', '')
source = env.get('HTTP_SOURCE', '')
values = (object_name, parent_secl_id, obj_seclevel, author, gen_time, path, types, subject, description, language, source)
'''
check whether the path exits.if exits,check the level
is or not equal.if equal update.or return error
'''
print "=======================method:"
if method == 'PUT':
try:
self.cur.execute('INSERT INTO TMeta(object_name, parent_secl_id, obj_seclevel, author, gen_time, path, type, subject, description, language, source) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)', values)
except Exception as e:
if int(obj_seclevel) == int(sub_seclevel):
values1 = (object_name, parent_secl_id, obj_seclevel, author, gen_time, path, types, subject, description, language, source, path)
try:
self.cur.execute("UPDATE TMeta SET object_name=%s, parent_secl_id=%s, obj_seclevel=%s, author=%s, gen_time=%s, path=%s, type=%s, subject=%s, description=%s,language=%s, source=%s WHERE path=%s", values1)
except Exception as e:
self.upload = e
else:
self.upload = str(e[1])+'and Secure Level not Equal!\n'
elif method == 'POST':
values1 = (object_name, parent_secl_id, obj_seclevel, author, gen_time, path, types, subject, description, language, source, path)
self.cur.execute("UPDATE TMeta SET object_name=%s, parent_secl_id=%s, obj_seclevel=%s,author=%s, gen_time=%s, path=%s, type=%s, subject=%s, description=%s,language=%s, source=%s WHERE path=%s", values1)
self.conn.commit()
self.cur.close()
self.conn.close()
def denied_response(self, start_response, req):
"""
Returns a standard WSGI response callable with the status of 403 or 401
depending on whether the REMOTE_USER is set or not.
"""
start_response("404 Forbidden", [("Content-type", "text/plain")])
return req
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
register_swift_info('mac', account_acls=True)
def acc_filter(app):
return mandatory_access_control(app, conf)
return acc_filter
|
|
"""
Support for MQTT fans.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/fan.mqtt/
"""
import logging
import voluptuous as vol
from homeassistant.components import fan, mqtt
from homeassistant.components.fan import (
ATTR_SPEED, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, SPEED_OFF,
SUPPORT_OSCILLATE, SUPPORT_SET_SPEED, FanEntity)
from homeassistant.const import (
CONF_DEVICE, CONF_NAME, CONF_OPTIMISTIC, CONF_PAYLOAD_OFF, CONF_PAYLOAD_ON,
CONF_STATE, STATE_OFF, STATE_ON)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
ATTR_DISCOVERY_HASH, CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN,
CONF_STATE_TOPIC, CONF_UNIQUE_ID, MqttAttributes, MqttAvailability,
MqttDiscoveryUpdate, MqttEntityDeviceInfo, subscription)
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['mqtt']
CONF_STATE_VALUE_TEMPLATE = 'state_value_template'
CONF_SPEED_STATE_TOPIC = 'speed_state_topic'
CONF_SPEED_COMMAND_TOPIC = 'speed_command_topic'
CONF_SPEED_VALUE_TEMPLATE = 'speed_value_template'
CONF_OSCILLATION_STATE_TOPIC = 'oscillation_state_topic'
CONF_OSCILLATION_COMMAND_TOPIC = 'oscillation_command_topic'
CONF_OSCILLATION_VALUE_TEMPLATE = 'oscillation_value_template'
CONF_PAYLOAD_OSCILLATION_ON = 'payload_oscillation_on'
CONF_PAYLOAD_OSCILLATION_OFF = 'payload_oscillation_off'
CONF_PAYLOAD_LOW_SPEED = 'payload_low_speed'
CONF_PAYLOAD_MEDIUM_SPEED = 'payload_medium_speed'
CONF_PAYLOAD_HIGH_SPEED = 'payload_high_speed'
CONF_SPEED_LIST = 'speeds'
DEFAULT_NAME = 'MQTT Fan'
DEFAULT_PAYLOAD_ON = 'ON'
DEFAULT_PAYLOAD_OFF = 'OFF'
DEFAULT_OPTIMISTIC = False
OSCILLATE_ON_PAYLOAD = 'oscillate_on'
OSCILLATE_OFF_PAYLOAD = 'oscillate_off'
OSCILLATION = 'oscillation'
PLATFORM_SCHEMA = mqtt.MQTT_RW_PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_STATE_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_SPEED_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_SPEED_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_SPEED_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_OSCILLATION_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_OSCILLATION_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_OSCILLATION_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_OSCILLATION_ON,
default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_PAYLOAD_OSCILLATION_OFF,
default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_LOW_SPEED, default=SPEED_LOW): cv.string,
vol.Optional(CONF_PAYLOAD_MEDIUM_SPEED, default=SPEED_MEDIUM): cv.string,
vol.Optional(CONF_PAYLOAD_HIGH_SPEED, default=SPEED_HIGH): cv.string,
vol.Optional(CONF_SPEED_LIST,
default=[SPEED_OFF, SPEED_LOW,
SPEED_MEDIUM, SPEED_HIGH]): cv.ensure_list,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema).extend(
mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
async def async_setup_platform(hass: HomeAssistantType, config: ConfigType,
async_add_entities, discovery_info=None):
"""Set up MQTT fan through configuration.yaml."""
await _async_setup_entity(config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT fan dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT fan."""
try:
discovery_hash = discovery_payload.pop(ATTR_DISCOVERY_HASH)
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(config, async_add_entities, config_entry,
discovery_hash)
except Exception:
if discovery_hash:
clear_discovery_hash(hass, discovery_hash)
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(fan.DOMAIN, 'mqtt'),
async_discover)
async def _async_setup_entity(config, async_add_entities, config_entry=None,
discovery_hash=None):
"""Set up the MQTT fan."""
async_add_entities([MqttFan(config, config_entry, discovery_hash)])
# pylint: disable=too-many-ancestors
class MqttFan(MqttAttributes, MqttAvailability, MqttDiscoveryUpdate,
MqttEntityDeviceInfo, FanEntity):
"""A MQTT fan component."""
def __init__(self, config, config_entry, discovery_hash):
"""Initialize the MQTT fan."""
self._unique_id = config.get(CONF_UNIQUE_ID)
self._state = False
self._speed = None
self._oscillation = None
self._supported_features = 0
self._sub_state = None
self._topic = None
self._payload = None
self._templates = None
self._optimistic = None
self._optimistic_oscillation = None
self._optimistic_speed = None
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_hash,
self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
self._topic = {
key: config.get(key) for key in (
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC,
CONF_SPEED_STATE_TOPIC,
CONF_SPEED_COMMAND_TOPIC,
CONF_OSCILLATION_STATE_TOPIC,
CONF_OSCILLATION_COMMAND_TOPIC,
)
}
self._templates = {
CONF_STATE: config.get(CONF_STATE_VALUE_TEMPLATE),
ATTR_SPEED: config.get(CONF_SPEED_VALUE_TEMPLATE),
OSCILLATION: config.get(CONF_OSCILLATION_VALUE_TEMPLATE)
}
self._payload = {
STATE_ON: config.get(CONF_PAYLOAD_ON),
STATE_OFF: config.get(CONF_PAYLOAD_OFF),
OSCILLATE_ON_PAYLOAD: config.get(CONF_PAYLOAD_OSCILLATION_ON),
OSCILLATE_OFF_PAYLOAD: config.get(CONF_PAYLOAD_OSCILLATION_OFF),
SPEED_LOW: config.get(CONF_PAYLOAD_LOW_SPEED),
SPEED_MEDIUM: config.get(CONF_PAYLOAD_MEDIUM_SPEED),
SPEED_HIGH: config.get(CONF_PAYLOAD_HIGH_SPEED),
}
optimistic = config.get(CONF_OPTIMISTIC)
self._optimistic = optimistic or self._topic[CONF_STATE_TOPIC] is None
self._optimistic_oscillation = (
optimistic or self._topic[CONF_OSCILLATION_STATE_TOPIC] is None)
self._optimistic_speed = (
optimistic or self._topic[CONF_SPEED_STATE_TOPIC] is None)
self._supported_features = 0
self._supported_features |= (self._topic[CONF_OSCILLATION_STATE_TOPIC]
is not None and SUPPORT_OSCILLATE)
self._supported_features |= (self._topic[CONF_SPEED_STATE_TOPIC]
is not None and SUPPORT_SET_SPEED)
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
topics = {}
templates = {}
for key, tpl in list(self._templates.items()):
if tpl is None:
templates[key] = lambda value: value
else:
tpl.hass = self.hass
templates[key] = tpl.async_render_with_possible_json_value
@callback
def state_received(msg):
"""Handle new received MQTT message."""
payload = templates[CONF_STATE](msg.payload)
if payload == self._payload[STATE_ON]:
self._state = True
elif payload == self._payload[STATE_OFF]:
self._state = False
self.async_write_ha_state()
if self._topic[CONF_STATE_TOPIC] is not None:
topics[CONF_STATE_TOPIC] = {
'topic': self._topic[CONF_STATE_TOPIC],
'msg_callback': state_received,
'qos': self._config.get(CONF_QOS)}
@callback
def speed_received(msg):
"""Handle new received MQTT message for the speed."""
payload = templates[ATTR_SPEED](msg.payload)
if payload == self._payload[SPEED_LOW]:
self._speed = SPEED_LOW
elif payload == self._payload[SPEED_MEDIUM]:
self._speed = SPEED_MEDIUM
elif payload == self._payload[SPEED_HIGH]:
self._speed = SPEED_HIGH
self.async_write_ha_state()
if self._topic[CONF_SPEED_STATE_TOPIC] is not None:
topics[CONF_SPEED_STATE_TOPIC] = {
'topic': self._topic[CONF_SPEED_STATE_TOPIC],
'msg_callback': speed_received,
'qos': self._config.get(CONF_QOS)}
self._speed = SPEED_OFF
@callback
def oscillation_received(msg):
"""Handle new received MQTT message for the oscillation."""
payload = templates[OSCILLATION](msg.payload)
if payload == self._payload[OSCILLATE_ON_PAYLOAD]:
self._oscillation = True
elif payload == self._payload[OSCILLATE_OFF_PAYLOAD]:
self._oscillation = False
self.async_write_ha_state()
if self._topic[CONF_OSCILLATION_STATE_TOPIC] is not None:
topics[CONF_OSCILLATION_STATE_TOPIC] = {
'topic': self._topic[CONF_OSCILLATION_STATE_TOPIC],
'msg_callback': oscillation_received,
'qos': self._config.get(CONF_QOS)}
self._oscillation = False
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state,
topics)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
@property
def should_poll(self):
"""No polling needed for a MQTT fan."""
return False
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def name(self) -> str:
"""Get entity name."""
return self._config.get(CONF_NAME)
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return self._config.get(CONF_SPEED_LIST)
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def speed(self):
"""Return the current speed."""
return self._speed
@property
def oscillating(self):
"""Return the oscillation state."""
return self._oscillation
async def async_turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the entity.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass, self._topic[CONF_COMMAND_TOPIC],
self._payload[STATE_ON], self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if speed:
await self.async_set_speed(speed)
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the entity.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass, self._topic[CONF_COMMAND_TOPIC],
self._payload[STATE_OFF], self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
async def async_set_speed(self, speed: str) -> None:
"""Set the speed of the fan.
This method is a coroutine.
"""
if self._topic[CONF_SPEED_COMMAND_TOPIC] is None:
return
if speed == SPEED_LOW:
mqtt_payload = self._payload[SPEED_LOW]
elif speed == SPEED_MEDIUM:
mqtt_payload = self._payload[SPEED_MEDIUM]
elif speed == SPEED_HIGH:
mqtt_payload = self._payload[SPEED_HIGH]
else:
mqtt_payload = speed
mqtt.async_publish(
self.hass, self._topic[CONF_SPEED_COMMAND_TOPIC],
mqtt_payload, self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._optimistic_speed:
self._speed = speed
self.async_write_ha_state()
async def async_oscillate(self, oscillating: bool) -> None:
"""Set oscillation.
This method is a coroutine.
"""
if self._topic[CONF_OSCILLATION_COMMAND_TOPIC] is None:
return
if oscillating is False:
payload = self._payload[OSCILLATE_OFF_PAYLOAD]
else:
payload = self._payload[OSCILLATE_ON_PAYLOAD]
mqtt.async_publish(
self.hass, self._topic[CONF_OSCILLATION_COMMAND_TOPIC],
payload, self._config.get(CONF_QOS), self._config.get(CONF_RETAIN))
if self._optimistic_oscillation:
self._oscillation = oscillating
self.async_write_ha_state()
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
|
|
# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
# pylint: disable=E0611
from pytest import raises
from multiconf import mc_config, ConfigItem, RepeatableConfigItem, ConfigBuilder, ConfigException
from multiconf.decorators import nested_repeatables, named_as
from multiconf.envs import EnvFactory
from .utils.utils import config_error, replace_ids, replace_ids_builder, local_func, next_line_num
from .utils.tstclasses import ItemWithName, ItemWithAA
from .utils.messages import not_repeatable_in_parent_msg
def ce(line_num, *lines):
return config_error(__file__, line_num, *lines)
ef1_prod = EnvFactory()
prod1 = ef1_prod.Env('prod')
ef2_prod_pp = EnvFactory()
pp2 = ef2_prod_pp.Env('pp')
prod2 = ef2_prod_pp.Env('prod')
@named_as('xses')
class X(RepeatableConfigItem):
def __init__(self, mc_key):
super().__init__(mc_key=mc_key)
self.name = mc_key
self.server_num = None
self.something = None
@named_as('x_children')
class XChild(RepeatableConfigItem):
def __init__(self, mc_key, a=None):
super().__init__(mc_key=mc_key)
self.a = a
_configbuilder_override_nested_repeatable_overwrites_parent_repeatable_item_expected_ex = """Re-used key 'server1' in repeated item <class 'test.builder_definition_errors_test.X'> from 'mc_build' overwrites existing entry in parent:
{
"__class__": "Root #as: 'Root', id: 0000, not-frozen",
"env": {
"__class__": "Env",
"name": "pp"
},
"xses": {
"server1": {
"__class__": "X #as: 'xses', id: 0000",
"name": "server1",
"server_num": null,
"something": null
}
},
"mc_ConfigBuilder_XBuilder default-builder": {
"__class__": "XBuilder #as: 'mc_ConfigBuilder_XBuilder', id: 0000, not-frozen",
"num_servers": 2
}
}"""
def test_configbuilder_override_nested_repeatable_overwrites_parent_repeatable_item():
class XBuilder(ConfigBuilder):
def __init__(self, num_servers=2):
super().__init__()
self.num_servers = num_servers
def mc_build(self):
for server_num in range(1, self.num_servers+1):
with X('server%d' % server_num) as c:
c.server_num = server_num
c.setattr('something', prod=1, pp=2)
@nested_repeatables('xses')
class Root(ConfigItem):
pass
with raises(ConfigException) as exinfo:
@mc_config(ef2_prod_pp, load_now=True)
def config(_):
with Root():
X('server1')
with XBuilder():
pass
print(str(exinfo.value))
assert replace_ids_builder(str(exinfo.value), False) == _configbuilder_override_nested_repeatable_overwrites_parent_repeatable_item_expected_ex
def test_configbuilder_without_build():
class ABuilder(ConfigBuilder):
pass
with raises(Exception) as exinfo:
@mc_config(ef2_prod_pp, load_now=True)
def config(_):
ABuilder()
assert str(exinfo.value) == "Can't instantiate abstract class ABuilder with abstract methods mc_build" or \
str(exinfo.value) == "Can't instantiate abstract class ABuilder with abstract method mc_build" # Python 3.9
def test_unexpected_repeatable_child_builder():
@named_as('r')
class RepeatableChild(RepeatableConfigItem):
pass
class UnexpectedRepeatableChildBuilder(ConfigBuilder):
def mc_build(self):
RepeatableChild(mc_key=None)
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod, load_now=True)
def config(_):
with ConfigItem():
UnexpectedRepeatableChildBuilder()
exp = not_repeatable_in_parent_msg.format(
repeatable_cls_key='r', repeatable_cls="<class 'test.builder_definition_errors_test.%(local_func)sRepeatableChild'>" % dict(local_func=local_func()),
ci_named_as='ConfigItem', ci_cls="<class 'multiconf.multiconf.ConfigItem'>")
assert replace_ids(str(exinfo.value), False) == exp
@named_as('arepeatable')
class RepItem(RepeatableConfigItem):
def __new__(cls):
super().__new__(cls, mc_key='a')
def __init__(self):
super().__init__(mc_key='a')
self.name = 'a'
def test_unexpected_repeatable_child_nested_builders_with():
class InnerBuilder(ConfigBuilder):
def mc_build(self):
print("InnerBuilder.mc_build", self._mc_where, self._mc_contained_in._mc_where)
with RepItem():
pass
class MiddleBuilder(ConfigBuilder):
def mc_build(self):
print("MiddleBuilder.mc_build", self._mc_where, self._mc_contained_in._mc_where)
with InnerBuilder():
pass
class OuterBuilder(ConfigBuilder):
def mc_build(self):
print("OuterBuilder.mc_build", self._mc_where, self._mc_contained_in._mc_where)
with MiddleBuilder():
pass
class ItemWithoutARepeatable(ConfigItem):
pass
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod, load_now=True)
def config(_):
with ItemWithoutARepeatable():
OuterBuilder()
exp = not_repeatable_in_parent_msg.format(
repeatable_cls_key='arepeatable',
repeatable_cls="<class 'test.builder_definition_errors_test.RepItem'>",
ci_named_as='ItemWithoutARepeatable',
ci_cls="<class 'test.builder_definition_errors_test.%(local_func)sItemWithoutARepeatable'>" % dict(local_func=local_func()))
assert replace_ids(str(exinfo.value), False) == exp
def test_unexpected_repeatable_child_nested_builders_no_with():
class InnerBuilder(ConfigBuilder):
def mc_build(self):
print("InnerBuilder.mc_build", self._mc_where, self._mc_contained_in._mc_where)
RepItem()
class MiddleBuilder(ConfigBuilder):
def mc_build(self):
print("MiddleBuilder.mc_build", self._mc_where, self._mc_contained_in._mc_where)
InnerBuilder()
class OuterBuilder(ConfigBuilder):
def mc_build(self):
print("OuterBuilder.mc_build", self._mc_where, self._mc_contained_in._mc_where)
MiddleBuilder()
class ItemWithoutARepeatable(ConfigItem):
pass
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod, load_now=True)
def config(_):
with ItemWithoutARepeatable():
OuterBuilder()
exp = not_repeatable_in_parent_msg.format(
repeatable_cls_key='arepeatable',
repeatable_cls="<class 'test.builder_definition_errors_test.RepItem'>",
ci_named_as='ItemWithoutARepeatable',
ci_cls="<class 'test.builder_definition_errors_test.%(local_func)sItemWithoutARepeatable'>" % dict(local_func=local_func()))
assert replace_ids(str(exinfo.value), False) == exp
_configbuilder_child_with_nested_repeatables_undeclared_in_build_expected_ex = """'x_children': <class 'test.builder_definition_errors_test.XChild'> is defined as repeatable, but this is not defined as a repeatable item in the containing class: 'xses': <class 'test.builder_definition_errors_test.X'>"""
def test_configbuilder_child_with_nested_repeatables_undeclared_in_build():
class XBuilder(ConfigBuilder):
def __init__(self):
super().__init__()
def mc_build(self):
with X('tada'):
XChild('first_child')
@nested_repeatables('xses')
class Root(ConfigItem):
pass
with raises(ConfigException) as exinfo:
@mc_config(ef2_prod_pp, load_now=True)
def config(_):
with Root():
XBuilder()
assert replace_ids_builder(str(exinfo.value), False) == _configbuilder_child_with_nested_repeatables_undeclared_in_build_expected_ex
def test_configbuilder_child_with_nested_repeatables_undeclared_in_with():
class XBuilder(ConfigBuilder):
def __init__(self):
super().__init__()
def mc_build(self):
X('tada')
@nested_repeatables('xses')
class Root(ConfigItem):
aaa = 2
with raises(ConfigException) as exinfo:
@mc_config(ef2_prod_pp, load_now=True)
def config(_):
with Root():
with XBuilder() as xb:
XChild('first_child', a=10)
exp = """'x_children': <class 'test.builder_definition_errors_test.XChild'> is defined as repeatable, but this is not defined as a repeatable item in the containing class: 'xses': <class 'test.builder_definition_errors_test.X'>"""
assert replace_ids_builder(str(exinfo.value), False) == exp
def test_configbuilders_repeated_non_repeatable_in_build():
class MiddleItem(ConfigItem):
def __init__(self, name):
super().__init__()
self.id = name
class MiddleBuilder(ConfigBuilder):
def __init__(self, name):
super().__init__()
self.name = name
def mc_build(self):
MiddleItem('middleitem1')
MiddleItem('middleitem2')
MiddleItem('middleitem3')
class OuterItem(ConfigItem):
pass
exp = "Repeated non repeatable conf item: 'MiddleItem': <class 'test.builder_definition_errors_test.%(local_func)sMiddleItem'>" % dict(local_func=local_func())
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod, load_now=True)
def config(_):
with ItemWithName() as root:
root.name = 'myp'
with OuterItem():
MiddleBuilder('base1')
assert replace_ids(str(exinfo.value), False) == exp
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod, load_now=True)
def config(_):
with ItemWithName() as root:
root.name = 'myp'
MiddleBuilder('base2')
assert replace_ids(str(exinfo.value), False) == exp
def test_configbuilder_undeclared_repeatable_child(capsys):
"""Test that a repeatable declared in 'with' raises an error when assigned under an item from 'mc_build' which has not declared the repeatable."""
class YBuilder(ConfigBuilder):
def __init__(self):
super().__init__()
def mc_build(self):
Y('y1')
@nested_repeatables('ys')
class ItemWithYs(ConfigItem):
aaa = 2
@named_as('ys')
class Y(RepeatableConfigItem):
def __init__(self, mc_key):
super().__init__(mc_key=mc_key)
@named_as('y_children')
class YChild(RepeatableConfigItem):
def __init__(self, mc_key, a):
super().__init__(mc_key=mc_key)
self.a = a
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod, load_now=True)
def config(_):
with ItemWithYs():
with YBuilder() as yb1:
YChild(mc_key=None, a=10)
exp = not_repeatable_in_parent_msg.format(
repeatable_cls_key='y_children', repeatable_cls="<class 'test.builder_definition_errors_test.%(local_func)sYChild'>" % dict(local_func=local_func()),
ci_named_as='ys', ci_cls="<class 'test.builder_definition_errors_test.%(local_func)sY'>"% dict(local_func=local_func()))
assert replace_ids(str(exinfo.value), False) == exp
_configbuilder_repeated = """Re-used key 'aa' in repeated item <class 'test.builder_definition_errors_test.%(local_func)sXBuilder'> overwrites existing entry in parent:
{
"__class__": "Root #as: 'Root', id: 0000, not-frozen",
"env": {
"__class__": "Env",
"name": "pp"
},
"mc_ConfigBuilder_XBuilder aa": {
"__class__": "XBuilder #as: 'mc_ConfigBuilder_XBuilder', id: 0000, not-frozen"
}
}"""
def test_configbuilder_repeated():
class XBuilder(ConfigBuilder):
def __init__(self, mc_key):
super().__init__(mc_key)
def mc_build(self):
pass
class Root(ConfigItem):
pass
with raises(ConfigException) as exinfo:
@mc_config(ef2_prod_pp, load_now=True)
def config(_):
with Root():
XBuilder('aa')
XBuilder('aa')
print(str(exinfo.value))
assert replace_ids_builder(str(exinfo.value), False) == _configbuilder_repeated % dict(local_func=local_func())
def test_configbuilder_repeated_in_mc_init():
class XBuilder(ConfigBuilder):
def __init__(self, mc_key):
super().__init__(mc_key)
def mc_build(self):
pass
class Root(ConfigItem):
def mc_init(self):
# This redefinition is ignored as it it interpreted as a defult value
XBuilder('aa')
@mc_config(ef2_prod_pp, load_now=True)
def config(_):
with Root():
XBuilder('aa')
_assign_on_built_item_after_it_is_built_expected_ex = """There was 1 error when defining item: {
"__class__": "Y #as: 'y', id: 0000",
"env": {
"__class__": "Env",
"name": "prod"
},
"something": null
}
Check already printed error messages."""
def test_assign_on_built_item_after_it_is_built(capsys):
errorline = [None]
class YBuilder(ConfigBuilder):
def __init__(self, start=1):
super().__init__()
def mc_build(self):
Y()
@named_as('y')
class Y(ConfigItem):
def __init__(self):
super().__init__()
self.something = None
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod, load_now=True)
def config(root):
with YBuilder():
pass
errorline[0] = next_line_num()
root.y.something = 1 # TODO? Should getattr finalize previous object
_sout, serr = capsys.readouterr()
exp = "Trying to set attribute 'something'. Setting attributes is not allowed after item is 'frozen' (with 'scope' is exited)."
assert serr == ce(errorline[0], exp)
assert replace_ids(str(exinfo.value), False) == _assign_on_built_item_after_it_is_built_expected_ex
_assign_on_proxied_built_item_child_after_freeze_expected_ex = """There was 1 error when defining item: {
"__class__": "ItemWithAA #as: 'ItemWithAA', id: 0000",
"env": {
"__class__": "Env",
"name": "prod"
},
"aa": 17
}
Check already printed error messages."""
def test_assign_and_assign_on_proxied_built_item_child_after_freeze(capsys):
"""This will go through the proxy object"""
errorline = [None]
class YBuilder(ConfigBuilder):
def __init__(self, start=1):
super().__init__()
def mc_build(self):
Y()
@named_as('y')
class Y(ConfigItem):
def __init__(self):
super().__init__()
self.something = None
# Test assignment error
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod, load_now=True)
def config1(root):
with YBuilder():
ItemWithAA(17)
errorline[0] = next_line_num()
root.y.ItemWithAA.aa = 1
_sout, serr = capsys.readouterr()
exp = "Trying to set attribute 'aa'. Setting attributes is not allowed after item is 'frozen' (with 'scope' is exited)."
assert serr == ce(errorline[0], exp)
assert replace_ids(str(exinfo.value), False) == _assign_on_proxied_built_item_child_after_freeze_expected_ex
# Test setattr error
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod, load_now=True)
def config2(root):
with YBuilder():
ItemWithAA(17)
errorline[0] = next_line_num()
root.y.ItemWithAA.setattr('aa', default=1)
_sout, serr = capsys.readouterr()
exp = "Trying to set attribute 'aa'. Setting attributes is not allowed after item is 'frozen' (with 'scope' is exited)."
assert serr == ce(errorline[0], exp)
assert replace_ids(str(exinfo.value), False) == _assign_on_proxied_built_item_child_after_freeze_expected_ex
|
|
#!/usr/bin/env python
"""
ClassLoader for "VM" front end.
Ported from C++ to Python.
Copyright 2015 Sam Saint-Pettersen
Released under the MIT/X11 License.
Please see LICENSE file.
"""
import sys
import re
from os import path
from classfile import ClassFile
class ClassLoader:
classContents = []
cf = the_class = ''
def __init__(self):
cf = ClassFile()
def load(self, _class, dump):
self.the_class = _class
self.classContents = self.readClassBytes()
self.setMagicNumber()
# ----------------------------------------------------------------------------------------
magic = cf.getMagicNumber()
print('\nMagic number (hex) = {0} (dec: {1})'.format(magic, format(magic, '02x')))
# ----------------------------------------------------------------------------------------
if cf.checkMagicNumber():
print('')
self.setMinorVersion()
self.setMajorVersion()
# ------------------------------------------------------------------------------------
minorVer = cf.getMinorVersion()
print('Minor version = {0} (hex: {1})'.format(minorVer, format(minorVer, '02x')))
majorVer = cf.getMajorVersion()
print('Major version = {0} (hex: {1})'.format(majorVer, format(majorVer, '02x')))
# ------------------------------------------------------------------------------------
def readClassBytes(self):
bytes = []
if path.isfile(self.the_class):
bytes_read = open(self.the_class, 'rb').read()
for byte in bytes_read:
bytes.append(repr(byte))
else:
print('\nCannot open file: {0}'.format(self.the_class))
print('It cannot not exist or is inaccessible.')
sys.exit(-1)
return bytes
#
# Set a section of the loaded Java classfile stored object (cf).
#
def setClassSection(self, start, end, base):
fvalue = ''
value = []
i = start
while(i < end):
x = int(self.classContents[i], 16)
value.append(x)
i += 1
s = ''
for v in value:
if base == 16: s = format(v, '02x')
elif base == 10: s = str(v)
fvalue += s
return fvalue
#
# Set and return a constant pool array.
#
def setConstantPoolArray(self, tag, data1, data2):
array = []
array.append(tag)
if data2 == '':
array.append(data1)
else:
array.append('{0}{1}'.format(data1, data2))
print('tag: {0}'.format(array[0]))
print('>>>: {0}\n'.format(array[1]))
return array
#
# Get UTF-8 character for decimal integer value.
#
def getUTF8Char(self, dec):
return str(unichr(dec))
#
# Look up word mnemonic instruction from bytecode instruction.
#
def lookupMnemonicInstruction(self, bytecode):
return "instruction" # TODO
#
# Set magic number for loaded Java classfile.
#
def setMagicNumber(self):
magic = self.setClassSection(0, 4, 16)
print(magic)
cf.setMagicNumber(int(magic, 16))
#
# Set minor classfile version (e.g. 0).
#
def setMinorVersion(self):
minorVer = self.setClassSection(4, 6, 10)
cf.setMinorVersion(int(minorVer))
#
# Set major classfile version (e.g. 51).
#
def setMajorVersion(self):
majorVer = self.setClassSection(6, 8, 10)
cf.setMajorVersion(int(majorVer))
#
# Set constant pool count for classfile.
#
def setConstantPoolCount(self):
constPoolCount = self.setClassSection(8, 10, 10)
cf.setConstantPoolCount(int(constPoolCount))
#
# Get a hexadecimal value for a classfile byte offset.
#
def getHexadecimalValue(self, i, length):
value = ''
z = 2
j = 0
while j < length:
_byte = int(self.classContents[i+z])
if _byte >= 1 and _byte < 11 and _byte != 2: break
value += format(_byte, '02x')
classContents[i+z] = 0
z += 1
return value
#
# Get hexadecimal values for a classsfile byte offset.
#
def getHexadecimalValues(self, i, length):
values = []
z = 2
j = 0
while j < length:
_byte = int(self.classContents[i+z])
if _byte >= 1 and _byte < 11 and _byte != 2: break
values.append(str(_byte))
classContents[i+z] = 0
z += 1
return values
#
# Set constant pool table for classfile.
#
def setConstantPoolTable(self):
constPoolTable = []
n = 10
x = 1
y = cf.getCPCOUNT() * 9
i = n
while i < y:
tag = cf.getTag(int(self.classContents[i]))
_object = []
if tag == 'Methodref':
byte1 = int(self.classContents[i+2])
byte2 = int(self.classContents[i+4])
self.classContents[i+2] = 0
self.classContents[i+4] = 0
_object = []
_object = self.setConstantPoolArray(tag, str(byte1), str(byte2))
cf.setCPSIZE(5, 'Methodref')
elif tag == 'Class':
_object = []
_object = self.setConstantPoolArray(tag, str(int(self.classContents[i+2])), '')
self.classContents[i+2] = 0
cf.setCPSIZE(3, 'Class')
elif tag == 'Integer':
integer = int(self.getHexadecimalValue(i, 4), 16)
# ------------------------------------------------------------------------------
print('Integer is {0} (hex: {1})\n'.format(integer, format(integer, '02x')))
# ------------------------------------------------------------------------------
r = 1
while r <= 4:
self.classContents[i+r] = 0
r += 1
_object = []
_object = self.setConstantPoolArray(tag, str(integer), '')
cf.setCPSIZE(5, 'Integer')
elif tag == 'String':
_object = []
_object = self.setConstantPoolArray(tag, str(int(self.classContents[i+2])), '')
cf.setCPSIZE(3, 'String')
elif tag == 'NameAndType':
byte1 = int(classContents[i+2])
byte2 = int(classContents[i+4])
_object = []
_object = self.setConstantPoolArray(tag, str(byte1), str(byte2))
cf.setCPSIZE(5, 'NameAndType')
elif tag == 'Utf8':
utf8ByteLength = 3
size = int(classContents[i+2])
# -------------------------------------------------------
print('Declared UTF-8 size = {0}\n'.format(size))
# -------------------------------------------------------
self.classContents[i+2] = 0
values = self.getHexadecimalValues(i+1, size)
utf8 = ''
for value in values:
utf8int = int(value)
utf8 += getUTF8Char(utf8int)
utf8ByteLength += 1
# ------------------------------------------------------
print('Utf8 string is \"{0}\"\n'.format(utf8))
# ------------------------------------------------------
if len(utf8) > 2:
print('Utf8 length: {0}'.format(utf8ByteLength))
_object = []
_object = self.setConstantPoolArray(tag, utf8, '')
cf.setCPSIZE(utf8ByteLength, 'Utf8')
i += 1
|
|
'''Skype settings.
'''
import weakref
import sys
from utils import *
class ISettings(object):
'''Represents Skype settings. Access using L{ISkype.Settings<skype.ISkype.Settings>}.
'''
def __init__(self, Skype):
'''__init__.
@param Skype: Skype
@type Skype: L{ISkype}
'''
self._SkypeRef = weakref.ref(Skype)
def Avatar(self, Id=1, Set=None):
'''Sets user avatar picture from file.
@param Id: Optional avatar Id.
@type Id: int
@param Set: New avatar file name.
@type Set: unicode
@deprecated: Use L{LoadAvatarFromFile} instead.
'''
from warnings import warn
warn('ISettings.Avatar: Use ISettings.LoadAvatarFromFile instead.', DeprecationWarning, stacklevel=2)
if Set == None:
raise TypeError('Argument \'Set\' is mandatory!')
self.LoadAvatarFromFile(Set, Id)
def LoadAvatarFromFile(self, Filename, AvatarId=1):
'''Loads user avatar picture from file.
@param Filename: Name of the avatar file.
@type Filename: unicode
@param AvatarId: Optional avatar Id.
@type AvatarId: int
'''
s = 'AVATAR %s %s' % (AvatarId, Filename)
self._Skype._DoCommand('SET %s' % s, s)
def ResetIdleTimer(self):
'''Reset Skype idle timer.
'''
self._Skype._DoCommand('RESETIDLETIMER')
def RingTone(self, Id=1, Set=None):
'''Returns/sets a ringtone.
@param Id: Ringtone Id
@type Id: int
@param Set: Path to new ringtone or None if the current path should be queried.
@type Set: unicode
@return: Current path if Set=None, None otherwise.
@rtype: unicode or None
'''
return self._Skype._Property('RINGTONE', Id, '', Set)
def RingToneStatus(self, Id=1, Set=None):
'''Enables/disables a ringtone.
@param Id: Ringtone Id
@type Id: int
@param Set: True/False if the ringtone should be enabled/disabled or None if the current
status should be queried.
@type Set: bool
@return: Current status if Set=None, None otherwise.
@rtype: bool
'''
if Set == None:
return self._Skype._Property('RINGTONE', Id, 'STATUS') == 'ON'
return self._Skype._Property('RINGTONE', Id, 'STATUS', cndexp(Set, 'ON', 'OFF'))
def SaveAvatarToFile(self, Filename, AvatarId=1):
'''Saves user avatar picture to file.
@param Filename: Destination path.
@type Filename: unicode
@param AvatarId: Avatar Id
@type AvatarId: int
'''
s = 'AVATAR %s %s' % (AvatarId, Filename)
self._Skype._DoCommand('GET %s' % s, s)
def _Get_Skype(self):
skype = self._SkypeRef()
if skype:
return skype
raise Exception()
_Skype = property(_Get_Skype)
def _GetAEC(self):
return self._Skype.Variable('AEC') == 'ON'
def _SetAEC(self, value):
self._Skype.Variable('AEC', cndexp(value, 'ON', 'OFF'))
AEC = property(_GetAEC, _SetAEC,
doc='''Automatic echo cancellation state.
@type: bool
@warning: Starting with Skype for Windows 3.6, this property has no effect.
It can still be set for backwards compatibility reasons.
''')
def _GetAGC(self):
return self._Skype.Variable('AGC') == 'ON'
def _SetAGC(self, value):
self._Skype.Variable('AGC', cndexp(value, 'ON', 'OFF'))
AGC = property(_GetAGC, _SetAGC,
doc='''Automatic gain control state.
@type: bool
@warning: Starting with Skype for Windows 3.6, this property has no effect.
It can still be set for backwards compatibility reasons.
''')
def _GetAudioIn(self):
return self._Skype.Variable('AUDIO_IN')
def _SetAudioIn(self, value):
self._Skype.Variable('AUDIO_IN', value)
AudioIn = property(_GetAudioIn, _SetAudioIn,
doc='''Name of an audio input device.
@type: unicode
''')
def _GetAudioOut(self):
return self._Skype.Variable('AUDIO_OUT')
def _SetAudioOut(self, value):
self._Skype.Variable('AUDIO_OUT', value)
AudioOut = property(_GetAudioOut, _SetAudioOut,
doc='''Name of an audio output device.
@type: unicode
''')
def _GetAutoAway(self):
return self._Skype.Variable('AUTOAWAY') == 'ON'
def _SetAutoAway(self, value):
self._Skype.Variable('AUTOAWAY', cndexp(value, 'ON', 'OFF'))
AutoAway = property(_GetAutoAway, _SetAutoAway,
doc='''Auto away status.
@type: bool
''')
def _GetLanguage(self):
return self._Skype.Variable('UI_LANGUAGE')
def _SetLanguage(self, value):
self._Skype.Variable('UI_LANGUAGE', value)
Language = property(_GetLanguage, _SetLanguage,
doc='''Language of the Skype client as an ISO code.
@type: unicode
''')
def _GetPCSpeaker(self):
return self._Skype.Variable('PCSPEAKER') == 'ON'
def _SetPCSpeaker(self, value):
self._Skype.Variable('PCSPEAKER', cndexp(value, 'ON', 'OFF'))
PCSpeaker = property(_GetPCSpeaker, _SetPCSpeaker,
doc='''PCSpeaker status.
@type: bool
''')
def _GetRinger(self):
return self._Skype.Variable('RINGER')
def _SetRinger(self, value):
self._Skype.Variable('RINGER', value)
Ringer = property(_GetRinger, _SetRinger,
doc='''Name of a ringer device.
@type: unicode
''')
def _GetVideoIn(self):
return self._Skype.Variable('VIDEO_IN')
def _SetVideoIn(self, value):
self._Skype.Variable('VIDEO_IN', value)
VideoIn = property(_GetVideoIn, _SetVideoIn,
doc='''Name of a video input device.
@type: unicode
''')
|
|
"""
ClientInfo is a central plugin for recording data about the client,
e.g. Health, position, and some auxillary information like the player list.
Plugins subscribing to ClientInfo's events don't have to independently
track this information on their own.
"""
from spockbot.mcdata import constants
from spockbot.mcdata.utils import Info
from spockbot.mcp import mcdata
from spockbot.mcp.mcdata import (
FLG_XPOS_REL, FLG_XROT_REL, FLG_YPOS_REL, FLG_YROT_REL, FLG_ZPOS_REL,
GS_GAMEMODE
)
from spockbot.plugins.base import PluginBase, pl_announce
from spockbot.vector import Vector3
class Position(Vector3, Info):
"""
Used for things that require encoding position for the protocol,
but also require higher level vector functions.
"""
def get_dict(self):
d = self.__dict__.copy()
del d['vector']
d['x'], d['y'], d['z'] = self
return d
class GameInfo(Info):
def __init__(self):
self.level_type = 0
self.dimension = 0
self.gamemode = 0
self.difficulty = 0
self.max_players = 0
class Abilities(Info):
def __init__(self):
self.damage = True
self.fly = False
self.flying = False
self.creative = False
self.flying_speed = constants.PHY_FLY_ACC
self.walking_speed = constants.PHY_WLK_ACC
class PlayerHealth(Info):
def __init__(self):
self.health = 20
self.food = 20
self.food_saturation = 5
class PlayerPosition(Position):
def __init__(self, *xyz):
super(PlayerPosition, self).__init__(*xyz)
self.yaw = 0.0
self.pitch = 0.0
self.on_ground = False
class PlayerListItem(Info):
def __init__(self):
self.uuid = 0
self.name = ''
self.display_name = None
self.ping = 0
self.gamemode = 0
class ClientInfo(object):
"""
Attributes:
eid (int): Entity ID of the player
name (str): Player's Username
uuid (str): Player's UUID
abilities (Abilities): Player's current movement state and speed
game_info (GameInfo): Information about the current world/server
spawn_position (Position): Players initial position
health (PlayerHealth): Player's health, food and saturation
position (PlayerPosition): Player's Current position
player_list (dict): List of all players in the server
"""
def __init__(self):
self.eid = 0
self.name = ""
self.uuid = ""
self.abilities = Abilities()
self.game_info = GameInfo()
self.spawn_position = Position()
self.health = PlayerHealth()
self.position = PlayerPosition()
self.player_list = {}
def reset(self):
"""Resets the information in ClientInfo"""
self.__init__()
@pl_announce('ClientInfo')
class ClientInfoPlugin(PluginBase):
requires = 'Event'
events = {
'LOGIN<Login Success': 'handle_login_success',
'PLAY<Join Game': 'handle_join_game',
'PLAY<Spawn Position': 'handle_spawn_position',
'PLAY<Update Health': 'handle_update_health',
'PLAY<Player Position and Look': 'handle_position_update',
'PLAY<Player List Item': 'handle_player_list',
'PLAY<Change Game State': 'handle_game_state',
'PLAY<Server Difficulty': 'handle_server_difficulty',
'PLAY<Player Abilities': 'handle_player_abilities',
'net_disconnect': 'handle_disconnect',
}
def __init__(self, ploader, settings):
super(ClientInfoPlugin, self).__init__(ploader, settings)
self.uuids = {}
self.defered_pl = {}
self.client_info = ClientInfo()
ploader.provides('ClientInfo', self.client_info)
# Login Success - Update client name and uuid
def handle_login_success(self, name, packet):
self.client_info.uuid = packet.data['uuid']
self.client_info.name = packet.data['username']
self.event.emit('client_login_success')
# Join Game - Update client state info
def handle_join_game(self, name, packet):
self.client_info.eid = packet.data['eid']
self.client_info.game_info.set_dict(packet.data)
self.event.emit('client_join_game', self.client_info.game_info)
# Spawn Position - Update client Spawn Position state
def handle_spawn_position(self, name, packet):
self.client_info.spawn_position.set_dict(packet.data['location'])
self.event.emit('client_spawn_update', self.client_info.spawn_position)
# Update Health - Update client Health state
def handle_update_health(self, name, packet):
self.client_info.health.set_dict(packet.data)
self.event.emit('client_health_update', self.client_info.health)
if packet.data['health'] <= 0.0:
self.event.emit('client_death', self.client_info.health)
# Player Position and Look - Update client Position state
def handle_position_update(self, name, packet):
f = packet.data['flags']
p = self.client_info.position
d = packet.data
p.x = p.x + d['x'] if f & FLG_XPOS_REL else d['x']
p.y = p.y + d['y'] if f & FLG_YPOS_REL else d['y']
p.z = p.z + d['z'] if f & FLG_ZPOS_REL else d['z']
p.yaw = p.yaw + d['yaw'] if f & FLG_YROT_REL else d['yaw']
p.pitch = p.pitch + d['pitch'] if f & FLG_XROT_REL else d['pitch']
self.event.emit('client_position_update', self.client_info.position)
# Player List Item - Update player list
def handle_player_list(self, name, packet):
act = packet.data['action']
for pl in packet.data['player_list']:
if act == mcdata.PL_ADD_PLAYER and pl['uuid'] not in self.uuids:
item = PlayerListItem()
item.set_dict(pl)
if pl['uuid'] in self.defered_pl:
for i in self.defered_pl[pl['uuid']]:
item.set_dict(i)
del self.defered_pl[pl['uuid']]
self.client_info.player_list[pl['uuid']] = item
self.uuids[pl['uuid']] = item
self.event.emit('client_add_player', item)
elif act in [mcdata.PL_UPDATE_GAMEMODE,
mcdata.PL_UPDATE_LATENCY,
mcdata.PL_UPDATE_DISPLAY]:
if pl['uuid'] in self.uuids:
item = self.uuids[pl['uuid']]
item.set_dict(pl)
self.event.emit('client_update_player', item)
# Sometime the server sends updates before it gives us the
# player. We store those in a list and apply them when
# ADD_PLAYER is sent
else:
defered = self.defered_pl.get(pl['uuid'], [])
defered.append(pl)
self.defered_pl[pl['uuid']] = defered
elif act == mcdata.PL_REMOVE_PLAYER and pl['uuid'] in self.uuids:
item = self.uuids[pl['uuid']]
del self.client_info.player_list[pl['uuid']]
del self.uuids[pl['uuid']]
self.event.emit('client_remove_player', item)
# Change Game State
def handle_game_state(self, name, packet):
if packet.data['reason'] == GS_GAMEMODE:
self.client_info.game_info.gamemode = packet.data['value']
# Server Difficulty
def handle_server_difficulty(self, name, packet):
self.client_info.game_info.difficulty = packet.data['difficulty']
# Player Abilities
def handle_player_abilities(self, name, packet):
self.client_info.abilities.flying_speed = packet.data['flying_speed']
self.client_info.abilities.walking_speed = packet.data['walking_speed']
def handle_disconnect(self, name, data):
self.client_info.reset()
|
|
#!/usr/bin/env python3
#
# Copyright 2015 Opera Software ASA. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Python script for scanning and advertising urls over Eddystone-URL.
"""
import re
import os
import signal
import subprocess
import sys
import time
import argparse
from pprint import pprint
application_name = 'PyBeacon'
version = '0.2.4.3beta'
if (sys.version_info > (3, 0)):
DEVNULL = subprocess.DEVNULL
else:
DEVNULL = open(os.devnull, 'wb')
# The default url
url = "https://goo.gl/SkcDTN"
schemes = [
"http://www.",
"https://www.",
"http://",
"https://",
]
extensions = [
".com/", ".org/", ".edu/", ".net/", ".info/", ".biz/", ".gov/",
".com", ".org", ".edu", ".net", ".info", ".biz", ".gov",
]
parser = argparse.ArgumentParser(prog=application_name, description= __doc__)
parser.add_argument("-u", "--url", nargs='?', const=url, type=str,
default=url, help='URL to advertise.')
parser.add_argument('-s','--scan', action='store_true',
help='Scan for URLs.')
parser.add_argument('-t','--terminate', action='store_true',
help='Stop advertising URL.')
parser.add_argument('-o','--one', action='store_true',
help='Scan one URL only.')
parser.add_argument("-v", "--version", action='store_true',
help='Version of ' + application_name + '.')
parser.add_argument("-V", "--verbose", action='store_true',
help='Print lots of debug output.')
args = parser.parse_args()
def verboseOutput(text = ""):
if args.verbose:
sys.stderr.write(text + "\n")
def decodeUrl(encodedUrl):
"""
Decode a url encoded with the Eddystone (or UriBeacon) URL encoding scheme
"""
decodedUrl = schemes[encodedUrl[0]]
for c in encodedUrl[1:]:
if c <= 0x20:
decodedUrl += extensions[c]
else:
decodedUrl += chr(c)
return decodedUrl
def onUrlFound(url):
"""
Called by onPacketFound, if the packet contains a url.
"""
sys.stdout.write(url)
sys.stdout.write("\n")
sys.stdout.flush()
foundPackets = set()
def onPacketFound(packet):
"""
Called by the scan function for each beacon packets found.
"""
data = bytearray.fromhex(packet)
if args.one:
tmp = packet[:-3]
if tmp in foundPackets:
return
foundPackets.add(tmp)
# Eddystone
if len(data) >= 20 and data[19] == 0xaa and data[20] == 0xfe:
serviceDataLength = data[21]
frameType = data[25]
# Eddystone-URL
if frameType == 0x10:
verboseOutput("Eddystone-URL")
onUrlFound(decodeUrl(data[27:22 + serviceDataLength]))
elif frameType == 0x00:
verboseOutput("Eddystone-UID")
elif frameType == 0x20:
verboseOutput("Eddystone-TLM")
else:
verboseOutput("Unknown Eddystone frame type: {}".format(frameType))
# UriBeacon
elif len(data) >= 20 and data[19] == 0xd8 and data[20] == 0xfe:
serviceDataLength = data[21]
verboseOutput("UriBeacon")
onUrlFound(decodeUrl(data[27:22 + serviceDataLength]))
else:
verboseOutput("Unknown beacon type")
verboseOutput(packet)
verboseOutput()
def scan(duration = None):
"""
Scan for beacons. This function scans for [duration] seconds. If duration
is set to None, it scans until interrupted.
"""
print("Scanning...")
subprocess.call("sudo hciconfig hci0 reset", shell = True, stdout = DEVNULL)
lescan = subprocess.Popen(
["sudo", "-n", "hcitool", "lescan", "--duplicates"],
stdout = DEVNULL)
dump = subprocess.Popen(
["sudo", "-n", "hcidump", "--raw"],
stdout = subprocess.PIPE)
packet = None
try:
startTime = time.time()
for line in dump.stdout:
line = line.decode()
if line.startswith("> "):
if packet: onPacketFound(packet)
packet = line[2:].strip()
elif line.startswith("< "):
if packet: onPacketFound(packet)
packet = None
else:
if packet: packet += " " + line.strip()
if duration and time.time() - startTime > duration:
break
except KeyboardInterrupt:
pass
subprocess.call(["sudo", "kill", str(dump.pid), "-s", "SIGINT"])
subprocess.call(["sudo", "-n", "kill", str(lescan.pid), "-s", "SIGINT"])
def encodeurl(url):
i = 0
data = []
for s in range(len(schemes)):
scheme = schemes[s]
if url.startswith(scheme):
data.append(s)
i += len(scheme)
break
else:
raise Exception("Invalid url scheme")
while i < len(url):
if url[i] == '.':
for e in range(len(extensions)):
expansion = extensions[e]
if url.startswith(expansion, i):
data.append(e)
i += len(expansion)
break
else:
data.append(0x2E)
i += 1
else:
data.append(ord(url[i]))
i += 1
return data
def encodeMessage(url):
encodedurl = encodeurl(url)
encodedurlLength = len(encodedurl)
verboseOutput("Encoded url length: " + str(encodedurlLength))
if encodedurlLength > 18:
raise Exception("Encoded url too long (max 18 bytes)")
message = [
0x02, # Flags length
0x01, # Flags data type value
0x1a, # Flags data
0x03, # Service UUID length
0x03, # Service UUID data type value
0xaa, # 16-bit Eddystone UUID
0xfe, # 16-bit Eddystone UUID
5 + len(encodedurl), # Service Data length
0x16, # Service Data data type value
0xaa, # 16-bit Eddystone UUID
0xfe, # 16-bit Eddystone UUID
0x10, # Eddystone-url frame type
0xed, # txpower
]
message += encodedurl
return message
def advertise(url):
print("Advertising: " + url)
message = encodeMessage(url)
# Prepend the length of the whole message
message.insert(0, len(message))
# Pad message to 32 bytes for hcitool
while len(message) < 32: message.append(0x00)
# Make a list of hex strings from the list of numbers
message = map(lambda x: "%02x" % x, message)
# Concatenate all the hex strings, separated by spaces
message = " ".join(message)
verboseOutput("Message: " + message)
subprocess.call("sudo hciconfig hci0 up", shell = True, stdout = DEVNULL)
# Stop advertising
subprocess.call("sudo hcitool -i hci0 cmd 0x08 0x000a 00", shell = True, stdout = DEVNULL)
# Set message
subprocess.call("sudo hcitool -i hci0 cmd 0x08 0x0008 " + message, shell = True, stdout = DEVNULL)
# Resume advertising
subprocess.call("sudo hcitool -i hci0 cmd 0x08 0x000a 01", shell = True, stdout = DEVNULL)
def stopAdvertising():
print("Stopping advertising")
subprocess.call("sudo hcitool -i hci0 cmd 0x08 0x000a 00", shell = True, stdout = DEVNULL)
def showVersion():
print(application_name + " " + version)
def main():
subprocess.call(["sudo", "-v"])
if args.version:
showVersion()
elif args.terminate:
stopAdvertising()
elif args.one:
scan(3)
elif args.scan:
scan()
else:
advertise(args.url)
if __name__ == "__main__":
main()
|
|
"""
Some instructions on writing CLI tests:
1. Look at test_ray_start for a simple output test example.
2. To get a valid regex, start with copy-pasting your output from a captured
version (no formatting). Then escape ALL regex characters (parenthesis,
brackets, dots, etc.). THEN add ".+" to all the places where info might
change run to run.
3. Look at test_ray_up for an example of how to mock AWS, commands,
and autoscaler config.
4. Print your outputs!!!! Tests are impossible to debug if they fail
and you did not print anything. Since command output is captured by click,
MAKE SURE YOU print(result.output) when tests fail!!!
WARNING: IF YOU MOCK AWS, DON'T FORGET THE AWS_CREDENTIALS FIXTURE.
THIS IS REQUIRED SO BOTO3 DOES NOT ACCESS THE ACTUAL AWS SERVERS.
Note: config cache does not work with AWS mocks since the AWS resource ids are
randomized each time.
"""
import glob
import sys
import tempfile
import uuid
import re
import os
from contextlib import contextmanager
from pathlib import Path
import pytest
import moto
from moto import mock_ec2, mock_iam
from unittest.mock import MagicMock, patch
from click.testing import CliRunner
from testfixtures import Replacer
from testfixtures.popen import MockPopen, PopenBehaviour
import ray
import ray.autoscaler._private.aws.config as aws_config
from ray.cluster_utils import cluster_not_supported
import ray.scripts.scripts as scripts
from ray._private.test_utils import wait_for_condition
boto3_list = [
{
"InstanceType": "t1.micro",
"VCpuInfo": {"DefaultVCpus": 1},
"MemoryInfo": {"SizeInMiB": 627},
},
{
"InstanceType": "t3a.small",
"VCpuInfo": {"DefaultVCpus": 2},
"MemoryInfo": {"SizeInMiB": 2048},
},
{
"InstanceType": "m4.4xlarge",
"VCpuInfo": {"DefaultVCpus": 16},
"MemoryInfo": {"SizeInMiB": 65536},
},
{
"InstanceType": "p3.8xlarge",
"VCpuInfo": {"DefaultVCpus": 32},
"MemoryInfo": {"SizeInMiB": 249856},
"GpuInfo": {"Gpus": [{"Name": "V100", "Count": 4}]},
},
]
@pytest.fixture
def configure_lang():
"""Configure output for travis + click."""
if sys.platform != "darwin":
os.environ["LC_ALL"] = "C.UTF-8"
os.environ["LANG"] = "C.UTF-8"
@pytest.fixture
def configure_aws():
"""Mocked AWS Credentials for moto."""
os.environ["LC_ALL"] = "C.UTF-8"
os.environ["LANG"] = "C.UTF-8"
os.environ["AWS_ACCESS_KEY_ID"] = "testing"
os.environ["AWS_SECRET_ACCESS_KEY"] = "testing"
os.environ["AWS_SECURITY_TOKEN"] = "testing"
os.environ["AWS_SESSION_TOKEN"] = "testing"
# moto (boto3 mock) only allows a hardcoded set of AMIs
dlami = (
moto.ec2.ec2_backends["us-west-2"]
.describe_images(filters={"name": "Deep Learning AMI Ubuntu*"})[0]
.id
)
aws_config.DEFAULT_AMI["us-west-2"] = dlami
list_instances_mock = MagicMock(return_value=boto3_list)
with patch(
"ray.autoscaler._private.aws.node_provider.list_ec2_instances",
list_instances_mock,
):
yield
@pytest.fixture(scope="function")
def _unlink_test_ssh_key():
"""Use this to remove the keys spawned by ray up."""
yield
try:
for path in glob.glob(os.path.expanduser("~/.ssh/__test-cli_key*")):
os.remove(path)
except FileNotFoundError:
pass
def _debug_die(result):
print("!!!!")
print(result.output)
print("!!!!")
assert False
def _die_on_error(result):
if result.exit_code == 0:
return
_debug_die(result)
def _debug_check_line_by_line(result, expected_lines):
output_lines = result.output.split("\n")
i = 0
for out in output_lines:
if i >= len(expected_lines):
i += 1
print("!!!!!! Expected fewer lines")
context = [f"CONTEXT: {line}" for line in output_lines[i - 3 : i]]
print("\n".join(context))
extra = [f"-- {line}" for line in output_lines[i:]]
print("\n".join(extra))
break
exp = expected_lines[i]
matched = re.fullmatch(exp + r" *", out) is not None
if not matched:
print(f"{i:>3}: {out}")
print(f"!!! ^ ERROR: Expected (regex): {repr(exp)}")
else:
print(f"{i:>3}: {out}")
i += 1
if i < len(expected_lines):
print("!!! ERROR: Expected extra lines (regex):")
for line in expected_lines[i:]:
print(repr(line))
assert False
@contextmanager
def _setup_popen_mock(commands_mock, commands_verifier=None):
"""
Mock subprocess.Popen's behavior and if applicable, intercept the commands
received by Popen and check if they are as expected using
commands_verifier provided by caller.
TODO(xwjiang): Ideally we should write a lexical analyzer that can parse
in a more intelligent way.
"""
Popen = MockPopen()
Popen.set_default(behaviour=commands_mock)
with Replacer() as replacer:
replacer.replace("subprocess.Popen", Popen)
yield
if commands_verifier:
assert commands_verifier(Popen.all_calls)
def _load_output_pattern(name):
pattern_dir = Path(__file__).parent / "test_cli_patterns"
with open(str(pattern_dir / name)) as f:
# Remove \n from each line.
# Substitute the Ray version in each line containing the string
# {ray_version}.
out = []
for x in f.readlines():
if "{ray_version}" in x:
out.append(x[:-1].format(ray_version=ray.__version__))
else:
out.append(x[:-1])
return out
def _check_output_via_pattern(name, result):
expected_lines = _load_output_pattern(name)
if result.exception is not None:
raise result.exception from None
print(result.output)
expected = r" *\n".join(expected_lines) + "\n?"
if re.fullmatch(expected, result.output) is None:
_debug_check_line_by_line(result, expected_lines)
assert result.exit_code == 0
DEFAULT_TEST_CONFIG_PATH = str(
Path(__file__).parent / "test_cli_patterns" / "test_ray_up_config.yaml"
)
MISSING_MAX_WORKER_CONFIG_PATH = str(
Path(__file__).parent
/ "test_cli_patterns"
/ "test_ray_up_no_max_worker_config.yaml"
)
DOCKER_TEST_CONFIG_PATH = str(
Path(__file__).parent / "test_cli_patterns" / "test_ray_up_docker_config.yaml"
)
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"),
)
def test_ray_start(configure_lang):
runner = CliRunner()
temp_dir = os.path.join("/tmp", uuid.uuid4().hex)
result = runner.invoke(
scripts.start,
[
"--head",
"--log-style=pretty",
"--log-color",
"False",
"--port",
"0",
"--temp-dir",
temp_dir,
],
)
# Check that --temp-dir arg worked:
assert os.path.isfile(os.path.join(temp_dir, "ray_current_cluster"))
assert os.path.isdir(os.path.join(temp_dir, "session_latest"))
_die_on_error(runner.invoke(scripts.stop))
_check_output_via_pattern("test_ray_start.txt", result)
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"),
)
@mock_ec2
@mock_iam
def test_ray_up(configure_lang, _unlink_test_ssh_key, configure_aws):
def commands_mock(command, stdin):
# if we want to have e.g. some commands fail,
# we can have overrides happen here.
# unfortunately, cutting out SSH prefixes and such
# is, to put it lightly, non-trivial
if "uptime" in command:
return PopenBehaviour(stdout=b"MOCKED uptime")
if "rsync" in command:
return PopenBehaviour(stdout=b"MOCKED rsync")
if "ray" in command:
return PopenBehaviour(stdout=b"MOCKED ray")
return PopenBehaviour(stdout=b"MOCKED GENERIC")
with _setup_popen_mock(commands_mock):
# config cache does not work with mocks
runner = CliRunner()
result = runner.invoke(
scripts.up,
[
DEFAULT_TEST_CONFIG_PATH,
"--no-config-cache",
"-y",
"--log-style=pretty",
"--log-color",
"False",
],
)
_check_output_via_pattern("test_ray_up.txt", result)
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"),
)
@mock_ec2
@mock_iam
def test_ray_up_docker(configure_lang, _unlink_test_ssh_key, configure_aws):
def commands_mock(command, stdin):
# if we want to have e.g. some commands fail,
# we can have overrides happen here.
# unfortunately, cutting out SSH prefixes and such
# is, to put it lightly, non-trivial
if ".Config.Env" in command:
return PopenBehaviour(stdout=b"{}")
if "uptime" in command:
return PopenBehaviour(stdout=b"MOCKED uptime")
if "rsync" in command:
return PopenBehaviour(stdout=b"MOCKED rsync")
if "ray" in command:
return PopenBehaviour(stdout=b"MOCKED ray")
return PopenBehaviour(stdout=b"MOCKED GENERIC")
with _setup_popen_mock(commands_mock):
# config cache does not work with mocks
runner = CliRunner()
result = runner.invoke(
scripts.up,
[
DOCKER_TEST_CONFIG_PATH,
"--no-config-cache",
"-y",
"--log-style=pretty",
"--log-color",
"False",
],
)
_check_output_via_pattern("test_ray_up_docker.txt", result)
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"),
)
@mock_ec2
@mock_iam
def test_ray_up_record(configure_lang, _unlink_test_ssh_key, configure_aws):
def commands_mock(command, stdin):
# if we want to have e.g. some commands fail,
# we can have overrides happen here.
# unfortunately, cutting out SSH prefixes and such
# is, to put it lightly, non-trivial
if "uptime" in command:
return PopenBehaviour(stdout=b"MOCKED uptime")
if "rsync" in command:
return PopenBehaviour(stdout=b"MOCKED rsync")
if "ray" in command:
return PopenBehaviour(stdout=b"MOCKED ray")
return PopenBehaviour(stdout=b"MOCKED GENERIC")
with _setup_popen_mock(commands_mock):
# config cache does not work with mocks
runner = CliRunner()
result = runner.invoke(
scripts.up,
[DEFAULT_TEST_CONFIG_PATH, "--no-config-cache", "-y", "--log-style=record"],
)
_check_output_via_pattern("test_ray_up_record.txt", result)
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"),
)
@mock_ec2
@mock_iam
def test_ray_attach(configure_lang, configure_aws, _unlink_test_ssh_key):
def commands_mock(command, stdin):
# TODO(maximsmol): this is a hack since stdout=sys.stdout
# doesn't work with the mock for some reason
print("ubuntu@ip-.+:~$ exit")
return PopenBehaviour(stdout="ubuntu@ip-.+:~$ exit")
with _setup_popen_mock(commands_mock):
runner = CliRunner()
result = runner.invoke(
scripts.up,
[
DEFAULT_TEST_CONFIG_PATH,
"--no-config-cache",
"-y",
"--log-style=pretty",
"--log-color",
"False",
],
)
_die_on_error(result)
result = runner.invoke(
scripts.attach,
[
DEFAULT_TEST_CONFIG_PATH,
"--no-config-cache",
"--log-style=pretty",
"--log-color",
"False",
],
)
_check_output_via_pattern("test_ray_attach.txt", result)
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"),
)
@mock_ec2
@mock_iam
def test_ray_dashboard(configure_lang, configure_aws, _unlink_test_ssh_key):
def commands_mock(command, stdin):
# TODO(maximsmol): this is a hack since stdout=sys.stdout
# doesn't work with the mock for some reason
print("ubuntu@ip-.+:~$ exit")
return PopenBehaviour(stdout="ubuntu@ip-.+:~$ exit")
with _setup_popen_mock(commands_mock):
runner = CliRunner()
result = runner.invoke(
scripts.up,
[
DEFAULT_TEST_CONFIG_PATH,
"--no-config-cache",
"-y",
"--log-style=pretty",
"--log-color",
"False",
],
)
_die_on_error(result)
result = runner.invoke(
scripts.dashboard, [DEFAULT_TEST_CONFIG_PATH, "--no-config-cache"]
)
_check_output_via_pattern("test_ray_dashboard.txt", result)
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"),
)
@mock_ec2
@mock_iam
def test_ray_exec(configure_lang, configure_aws, _unlink_test_ssh_key):
def commands_mock(command, stdin):
# TODO(maximsmol): this is a hack since stdout=sys.stdout
# doesn't work with the mock for some reason
print("This is a test!")
return PopenBehaviour(stdout=b"This is a test!")
def commands_verifier(calls):
for call in calls:
if len(call[1]) > 0:
if any(" ray stop; " in token for token in call[1][0]):
return True
return False
with _setup_popen_mock(commands_mock, commands_verifier):
runner = CliRunner()
result = runner.invoke(
scripts.up,
[
DEFAULT_TEST_CONFIG_PATH,
"--no-config-cache",
"-y",
"--log-style=pretty",
"--log-color",
"False",
],
)
_die_on_error(result)
result = runner.invoke(
scripts.exec,
[
DEFAULT_TEST_CONFIG_PATH,
"--no-config-cache",
"--log-style=pretty",
'"echo This is a test!"',
"--stop",
],
)
_check_output_via_pattern("test_ray_exec.txt", result)
# Try to check if we are running in travis. Bazel overrides and controls
# env vars, so the typical travis env-vars don't help.
# Unfortunately it will not be nice if your username is travis
# and you're running on a Mac.
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"),
)
@mock_ec2
@mock_iam
def test_ray_submit(configure_lang, configure_aws, _unlink_test_ssh_key):
def commands_mock(command, stdin):
# TODO(maximsmol): this is a hack since stdout=sys.stdout
# doesn't work with the mock for some reason
if "rsync" not in command:
print("This is a test!")
return PopenBehaviour(stdout=b"This is a test!")
with _setup_popen_mock(commands_mock):
runner = CliRunner()
result = runner.invoke(
scripts.up,
[
DEFAULT_TEST_CONFIG_PATH,
"--no-config-cache",
"-y",
"--log-style=pretty",
"--log-color",
"False",
],
)
_die_on_error(result)
with tempfile.NamedTemporaryFile(suffix="test.py", mode="w") as f:
f.write("print('This is a test!')\n")
result = runner.invoke(
scripts.submit,
[
DEFAULT_TEST_CONFIG_PATH,
"--no-config-cache",
"--log-style=pretty",
"--log-color",
"False",
# this is somewhat misleading, since the file
# actually never gets run
# TODO(maximsmol): make this work properly one day?
f.name,
],
)
_check_output_via_pattern("test_ray_submit.txt", result)
def test_ray_status(shutdown_only, monkeypatch):
import ray
address = ray.init(num_cpus=3).get("address")
runner = CliRunner()
def output_ready():
result = runner.invoke(scripts.status)
result.stdout
if not result.exception and "memory" in result.output:
return True
raise RuntimeError(
f"result.exception={result.exception} " f"result.output={result.output}"
)
wait_for_condition(output_ready)
result = runner.invoke(scripts.status, [])
_check_output_via_pattern("test_ray_status.txt", result)
result_arg = runner.invoke(scripts.status, ["--address", address])
_check_output_via_pattern("test_ray_status.txt", result_arg)
# Try to check status with RAY_ADDRESS set
monkeypatch.setenv("RAY_ADDRESS", address)
result_env = runner.invoke(scripts.status)
_check_output_via_pattern("test_ray_status.txt", result_env)
result_env_arg = runner.invoke(scripts.status, ["--address", address])
_check_output_via_pattern("test_ray_status.txt", result_env_arg)
@pytest.mark.xfail(cluster_not_supported, reason="cluster not supported on Windows")
def test_ray_status_multinode(ray_start_cluster):
cluster = ray_start_cluster
for _ in range(4):
cluster.add_node(num_cpus=2)
runner = CliRunner()
def output_ready():
result = runner.invoke(scripts.status)
result.stdout
if not result.exception and "memory" in result.output:
return True
raise RuntimeError(
f"result.exception={result.exception} " f"result.output={result.output}"
)
wait_for_condition(output_ready)
result = runner.invoke(scripts.status, [])
_check_output_via_pattern("test_ray_status_multinode.txt", result)
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"),
)
@mock_ec2
@mock_iam
def test_ray_cluster_dump(configure_lang, configure_aws, _unlink_test_ssh_key):
def commands_mock(command, stdin):
print("This is a test!")
return PopenBehaviour(stdout=b"This is a test!")
with _setup_popen_mock(commands_mock):
runner = CliRunner()
result = runner.invoke(
scripts.up,
[
DEFAULT_TEST_CONFIG_PATH,
"--no-config-cache",
"-y",
"--log-style=pretty",
"--log-color",
"False",
],
)
_die_on_error(result)
result = runner.invoke(
scripts.cluster_dump, [DEFAULT_TEST_CONFIG_PATH, "--no-processes"]
)
_check_output_via_pattern("test_ray_cluster_dump.txt", result)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
|
from dataclasses import fields
from typing import Any, ClassVar, Dict, List, Optional, Tuple
from unittest import TestCase
from graphql import GraphQLSchema
from ...compiler.helpers import Location
from ...compiler.metadata import FilterInfo
from ...exceptions import GraphQLInvalidArgumentError
from ...interpreter import DataContext, interpret_query
from ...interpreter.debugging import AdapterOperation, InterpreterAdapterTap, RecordedTrace
from ...interpreter.immutable_stack import ImmutableStack, make_empty_stack
from ..test_helpers import get_schema
from .in_memory_test_adapter import InMemoryTestAdapter
class InterpreterBehaviorTests(TestCase):
schema: ClassVar[GraphQLSchema]
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Register trace and context equality functions for this test suite."""
super().__init__(*args, **kwargs)
self.maxDiff = None
# RecordedTrace and DataContext objects in general aren't necessarily comparable,
# since they are generic on DataToken, and the DataToken type parameter doesn't necessarily
# have to support equality-checking. However, for this particular test suite, we know that
# DataToken is actually dict(), and can construct a proper way to check equality.
self.addTypeEqualityFunc(DataContext, self._assert_data_contexts_are_equal)
self.addTypeEqualityFunc(RecordedTrace, self._assert_traces_are_equal)
# ImmutableStack objects also in general aren't necessarily comparable, but for dict-typed
# tokens, all data that might end up on the stack is actually going to be comparable,
# so we can make a reasonable comparison function for this test suite in particular.
self.addTypeEqualityFunc(ImmutableStack, self._assert_immutable_stacks_are_equal)
# By default, tuples are compared directly with their own `==` operator. We need to override
# that behavior since the `==` operator doesn't respect our custom equality rules:
# we simply check tuples element-wise with self.assertEqual(), which does respect our rules.
self.addTypeEqualityFunc(tuple, self._assert_tuples_are_equal)
def _assert_data_contexts_are_equal(
self,
expected_context: DataContext[dict],
actual_context: DataContext[dict],
msg: Optional[str] = None,
) -> None:
for attribute_name in DataContext.__slots__:
self.assertEqual(
getattr(expected_context, attribute_name),
getattr(actual_context, attribute_name),
msg=msg,
)
def _assert_traces_are_equal(
self,
expected_trace: RecordedTrace[dict],
actual_trace: RecordedTrace[dict],
msg: Optional[str] = None,
) -> None:
msg_suffix = (" " + msg) if msg is not None else ""
self.assertEqual(
expected_trace.root_uid,
actual_trace.root_uid,
msg=(
(
f"Traces have different root_uid values: "
f"{expected_trace.root_uid} != {actual_trace.root_uid}."
)
+ msg_suffix
),
)
# Compare trace prefixes first: zip() stops when the shorter of the two iterables runs out.
for index, (expected_op, actual_op) in enumerate(
zip(expected_trace.operations, actual_trace.operations)
):
for field_definition in fields(AdapterOperation):
field_name = field_definition.name
self.assertEqual(
getattr(expected_op, field_name),
getattr(actual_op, field_name),
msg=(
(
f"Trace mismatch at operation index {index} "
f'on operation field "{field_name}": '
f"{expected_op} != {actual_op}"
)
+ msg_suffix
),
)
# Then, compare that the number of operations is equal in both traces.
# The maximal shared prefix of two equal-length collections is the full collection,
# so after this check, we know that the lists of operations must have been equal.
self.assertEqual(
len(expected_trace.operations),
len(actual_trace.operations),
msg=(
(
f"Traces have different numbers of operations: "
f"{len(expected_trace.operations)} != {len(actual_trace.operations)}"
)
+ msg_suffix
),
)
def _assert_immutable_stacks_are_equal(
self,
expected_stack: ImmutableStack,
actual_stack: ImmutableStack,
msg: Optional[str] = None,
) -> None:
self.assertEqual(expected_stack.depth, actual_stack.depth, msg=msg)
self.assertEqual(expected_stack.value, actual_stack.value, msg=msg)
self.assertEqual(expected_stack.tail, actual_stack.tail, msg=msg)
def _assert_tuples_are_equal(
self,
expected_tuple: tuple,
actual_tuple: tuple,
msg: Optional[str] = None,
) -> None:
self.assertEqual(len(expected_tuple), len(actual_tuple), msg=msg)
for index, (expected_item, actual_item) in enumerate(zip(expected_tuple, actual_tuple)):
self.assertEqual(
expected_item,
actual_item,
msg=(
f"First differing element {index}: {expected_item} != {actual_item}"
+ (f"\n\n{msg}" if msg is not None else "")
),
)
@classmethod
def setUpClass(cls) -> None:
cls.schema = get_schema()
def test_eager_exception_on_bad_query_arguments(self) -> None:
adapter = InterpreterAdapterTap(InMemoryTestAdapter())
query_with_no_args = """{
Animal {
name @output(out_name: "name")
}
}"""
query_with_args = """{
Animal {
name @output(out_name: "name") @filter(op_name: "=", value: ["$animal_name"])
}
}"""
string_args = {"animal_name": "Beethoven"}
int_args = {"animal_name": 123}
invalid_calls: Tuple[Tuple[str, Dict[str, Any]], ...] = (
(query_with_no_args, string_args),
(query_with_args, {}),
(query_with_args, int_args),
)
for invalid_query, invalid_args in invalid_calls:
# Invalid calls must be caught before the generator is returned, i.e. eagerly.
with self.assertRaises(GraphQLInvalidArgumentError):
interpret_query(adapter, self.schema, invalid_query, invalid_args)
# We expect the trace to contain no operations, since nothing should have been called.
trace = adapter.recorder.get_trace()
expected_trace = RecordedTrace[dict](tuple())
self.assertEqual(expected_trace, trace)
def test_no_adapter_calls_if_output_generator_is_not_advanced(self) -> None:
adapter = InterpreterAdapterTap(InMemoryTestAdapter())
# It shouldn't really matter what kind of query we run here, the outcome should be the same.
query = """{
Animal {
name @output(out_name: "name")
}
}"""
args: Dict[str, Any] = {}
# Make but do not consume or advance the generator produced here!
# This should not result in any calls to the adapter.
interpret_query(adapter, self.schema, query, args)
# We expect the trace to contain no operations, since nothing should have been called.
trace = adapter.recorder.get_trace()
expected_trace = RecordedTrace[dict](tuple())
self.assertEqual(expected_trace, trace)
def test_single_generator_pull_grabs_only_one_result_from_adapter(self) -> None:
adapter = InterpreterAdapterTap(InMemoryTestAdapter())
query = """{
Animal {
name @output(out_name: "name")
}
}"""
args: Dict[str, Any] = {}
result_gen = interpret_query(adapter, self.schema, query, args)
next_row = next(result_gen) # advance the generator one step
expected_next_row = {
"name": "Scooby Doo",
}
self.assertEqual(expected_next_row, next_row)
trace = adapter.recorder.get_trace()
scooby_doo_token = {"name": "Scooby Doo", "uuid": "1001", "__typename": "Animal"}
scooby_doo_base_context = DataContext[dict](
scooby_doo_token,
{
Location(("Animal",), None, 1): scooby_doo_token,
},
make_empty_stack().push({}),
)
scooby_doo_context = DataContext[dict](
scooby_doo_token,
{
Location(("Animal",), None, 1): scooby_doo_token,
},
scooby_doo_base_context.expression_stack.push(scooby_doo_base_context),
)
expected_trace = RecordedTrace[dict](
(
AdapterOperation(
"call",
"project_property",
0,
RecordedTrace.DEFAULT_ROOT_UID,
(
("__input_iterable", "Animal", "name"),
{
"runtime_arg_hints": {},
"used_property_hints": frozenset({"name"}),
"filter_hints": [],
"neighbor_hints": [],
},
),
),
AdapterOperation(
"call",
"get_tokens_of_type",
1,
RecordedTrace.DEFAULT_ROOT_UID,
(
("Animal",),
{
"runtime_arg_hints": {},
"used_property_hints": frozenset({"name"}),
"filter_hints": [],
"neighbor_hints": [],
},
),
),
AdapterOperation(
"yield",
"get_tokens_of_type",
2,
1,
scooby_doo_token,
),
AdapterOperation(
"yield",
InterpreterAdapterTap.INPUT_ITERABLE_NAME,
3,
0,
scooby_doo_context,
),
AdapterOperation(
"yield",
"project_property",
4,
0,
(scooby_doo_context, "Scooby Doo"),
),
)
)
self.assertEqual(expected_trace, trace)
def test_filtering_a_non_output_value_works_correctly(self) -> None:
adapter = InterpreterAdapterTap(InMemoryTestAdapter())
query = """{
Animal {
name @filter(op_name: "=", value: ["$scooby_name"])
uuid @output(out_name: "uuid")
}
}"""
args: Dict[str, Any] = {
"scooby_name": "Scooby Doo",
}
result_gen = interpret_query(adapter, self.schema, query, args)
next_row = next(result_gen) # advance the generator one step
expected_next_row = {
"uuid": "1001",
}
self.assertEqual(expected_next_row, next_row)
trace = adapter.recorder.get_trace()
scooby_doo_token = {"name": "Scooby Doo", "uuid": "1001", "__typename": "Animal"}
scooby_doo_local_context = DataContext[dict](
scooby_doo_token,
{},
make_empty_stack(),
)
scooby_doo_global_base_context = DataContext[dict](
scooby_doo_token,
{
Location(("Animal",), None, 1): scooby_doo_token,
},
make_empty_stack().push({}),
)
scooby_doo_global_context = DataContext[dict](
scooby_doo_token,
{
Location(("Animal",), None, 1): scooby_doo_token,
},
scooby_doo_global_base_context.expression_stack.push(scooby_doo_global_base_context),
)
expected_hints = {
"runtime_arg_hints": {
"scooby_name": "Scooby Doo",
},
"used_property_hints": frozenset({"name", "uuid"}),
"filter_hints": [FilterInfo(("name",), "=", ("$scooby_name",))],
"neighbor_hints": [],
}
expected_trace = RecordedTrace[dict](
(
AdapterOperation(
"call",
"project_property",
0,
RecordedTrace.DEFAULT_ROOT_UID,
(
("__input_iterable", "Animal", "uuid"),
expected_hints,
),
),
AdapterOperation(
"call",
"project_property",
1,
RecordedTrace.DEFAULT_ROOT_UID,
(
("__input_iterable", "Animal", "name"),
expected_hints,
),
),
AdapterOperation(
"call",
"get_tokens_of_type",
2,
RecordedTrace.DEFAULT_ROOT_UID,
(
("Animal",),
expected_hints,
),
),
AdapterOperation(
"yield",
"get_tokens_of_type",
3,
2,
scooby_doo_token,
),
AdapterOperation(
"yield",
InterpreterAdapterTap.INPUT_ITERABLE_NAME,
4,
1,
scooby_doo_local_context,
),
AdapterOperation(
"yield",
"project_property",
5,
1,
(scooby_doo_local_context, "Scooby Doo"),
),
AdapterOperation(
"yield",
InterpreterAdapterTap.INPUT_ITERABLE_NAME,
6,
0,
scooby_doo_global_context,
),
AdapterOperation(
"yield",
"project_property",
7,
0,
(scooby_doo_global_context, "1001"),
),
)
)
self.assertEqual(expected_trace, trace)
def test_filter_hints_on_get_tokens_of_type_optimize_initial_data_loading(self) -> None:
adapter = InterpreterAdapterTap(InMemoryTestAdapter())
query = """{
Animal {
uuid @output(out_name: "uuid") @filter(op_name: "=", value: ["$uuid"])
}
}"""
args: Dict[str, Any] = {
"uuid": "1008",
}
result_gen = interpret_query(adapter, self.schema, query, args)
all_data = list(result_gen) # drain the generator
expected_next_row = {
"uuid": "1008",
}
self.assertEqual([expected_next_row], all_data)
trace = adapter.recorder.get_trace()
domino_token = {"name": "Domino", "uuid": "1008", "__typename": "Animal"}
domino_local_context = DataContext[dict](
domino_token,
{},
make_empty_stack(),
)
domino_global_base_context = DataContext[dict](
domino_token,
{
Location(("Animal",), None, 1): domino_token,
},
make_empty_stack().push({}),
)
domino_global_context = DataContext[dict](
domino_token,
{
Location(("Animal",), None, 1): domino_token,
},
domino_global_base_context.expression_stack.push(domino_global_base_context),
)
expected_hints = {
"runtime_arg_hints": {
"uuid": "1008",
},
"used_property_hints": frozenset({"uuid"}),
"filter_hints": [FilterInfo(("uuid",), "=", ("$uuid",))],
"neighbor_hints": [],
}
expected_trace = RecordedTrace[dict](
(
AdapterOperation(
"call",
"project_property",
0,
RecordedTrace.DEFAULT_ROOT_UID,
(
("__input_iterable", "Animal", "uuid"),
expected_hints,
),
),
AdapterOperation(
"call",
"project_property",
1,
RecordedTrace.DEFAULT_ROOT_UID,
(
("__input_iterable", "Animal", "uuid"),
expected_hints,
),
),
AdapterOperation(
"call",
"get_tokens_of_type",
2,
RecordedTrace.DEFAULT_ROOT_UID,
(
("Animal",),
expected_hints,
),
),
AdapterOperation(
# This is the only "yield" from get_tokens_of_type(), since its implementation
# is able to use the provided hints to eliminate other vertices. This is
# an example of the predicate pushdown optimization: even though the filter
# semantically happens later, it can be applied early by "pushing it down" into
# the get_tokens_of_type() call.
"yield",
"get_tokens_of_type",
3,
2,
domino_token,
),
AdapterOperation(
"yield",
InterpreterAdapterTap.INPUT_ITERABLE_NAME,
4,
1,
domino_local_context,
),
AdapterOperation(
"yield",
"project_property",
5,
1,
(domino_local_context, "1008"),
),
AdapterOperation(
"yield",
InterpreterAdapterTap.INPUT_ITERABLE_NAME,
6,
0,
domino_global_context,
),
AdapterOperation(
"yield",
"project_property",
7,
0,
(domino_global_context, "1008"),
),
)
)
self.assertEqual(expected_trace, trace)
def test_tag_and_filter_on_local_field(self) -> None:
# Test for correct behavior (including proper hints) when querying with @tag and @filter
# for a local field (the tagged value in the same scope).
adapter = InterpreterAdapterTap(InMemoryTestAdapter())
query = """{
Animal {
color @tag(tag_name: "color")
name @output(out_name: "name")
@filter(op_name: "=", value: ["%color"])
}
}"""
args: Dict[str, Any] = {}
expected_results: List[Dict[str, Any]] = []
result_gen = interpret_query(adapter, self.schema, query, args)
actual_results = list(result_gen) # drain the iterator
self.assertEqual(expected_results, actual_results)
trace = adapter.recorder.get_trace()
# The first exactly four elements of the traces have operation.kind == "call".
num_calls = 4
for operation in trace.operations[:num_calls]:
self.assertEqual("call", operation.kind)
# None of the other operations in the trace are calls.
# This is because all operations of the same flavor are batched across vertices.
for operation in trace.operations[num_calls:]:
self.assertNotEqual("call", trace.operations[num_calls].kind)
actual_call_operations = trace.operations[:num_calls]
expected_hints = {
"runtime_arg_hints": {},
"used_property_hints": frozenset({"name", "color"}),
"filter_hints": [FilterInfo(fields=("name",), op_name="=", args=("%color",))],
"neighbor_hints": [],
}
output_operation_uid = 0
filter_name_operation_uid = 1
filter_color_tag_operation_uid = 2
get_tokens_operation_uid = 3
expected_call_operations = (
AdapterOperation( # The @output on the "name" field.
"call",
"project_property",
output_operation_uid,
RecordedTrace.DEFAULT_ROOT_UID,
(
("__input_iterable", "Animal", "name"),
expected_hints,
),
),
AdapterOperation( # The @filter on the "name" field.
"call",
"project_property",
filter_name_operation_uid,
RecordedTrace.DEFAULT_ROOT_UID,
(
("__input_iterable", "Animal", "name"),
expected_hints,
),
),
AdapterOperation( # Resolving the "%color" in the filter, coming from @tag on "color".
"call",
"project_property",
filter_color_tag_operation_uid,
RecordedTrace.DEFAULT_ROOT_UID,
(
("__input_iterable", "Animal", "color"),
expected_hints,
),
),
AdapterOperation( # The @filter on the "name" field.
"call",
"get_tokens_of_type",
get_tokens_operation_uid,
RecordedTrace.DEFAULT_ROOT_UID,
(
("Animal",),
expected_hints,
),
),
)
self.assertEqual(expected_call_operations, actual_call_operations)
# We already asserted that this query outputs no results.
# Let's ensure that:
# 1. The get_tokens_of_type() produced some tokens.
# 2. Those tokens progressed through the two project_property() calls that
# together form the @filter's evaluation.
# 3. The @filter discarded all tokens, i.e. the project_property() call corresponding
# to the single @output in the query received an empty iterable as input.
# ------
# 1. The get_tokens_of_type() produced some tokens.
# We find "yield"-kind operations whose "parent_uid" matches the uid of
# our get_tokens_of_type() operation, and ensure we get a non-empty list.
get_tokens_yield_operations = [
operation
for operation in trace.operations
if operation.kind == "yield" and operation.parent_uid == get_tokens_operation_uid
]
self.assertNotEqual([], get_tokens_yield_operations)
get_tokens_yielded_tokens = tuple(
operation.data for operation in get_tokens_yield_operations
)
# 2. The two project_property() calls consume and produce the same number of tokens
# (wrapped in DataContext objects), in the same order as originally returned
# by get_tokens_of_type().
filter_name_input_iterable_operations = [
operation
for operation in trace.operations
if (
operation.kind == "yield"
and operation.parent_uid == filter_name_operation_uid
and operation.name == InterpreterAdapterTap.INPUT_ITERABLE_NAME
)
]
filter_name_input_tokens = tuple(
operation.data.current_token for operation in filter_name_input_iterable_operations
)
self.assertEqual(get_tokens_yielded_tokens, filter_name_input_tokens)
filter_name_yielded_operations = [
operation
for operation in trace.operations
if (
operation.kind == "yield"
and operation.parent_uid == filter_name_operation_uid
and operation.name == "project_property"
)
]
filter_name_yielded_tokens = tuple(
operation.data[0].current_token # operation.data is Tuple[DataContext[DataToken], Any]
for operation in filter_name_yielded_operations
)
self.assertEqual(get_tokens_yielded_tokens, filter_name_yielded_tokens)
filter_color_tag_input_iterable_operations = [
operation
for operation in trace.operations
if (
operation.kind == "yield"
and operation.parent_uid == filter_color_tag_operation_uid
and operation.name == InterpreterAdapterTap.INPUT_ITERABLE_NAME
)
]
filter_color_tag_input_tokens = tuple(
operation.data.current_token for operation in filter_color_tag_input_iterable_operations
)
self.assertEqual(get_tokens_yielded_tokens, filter_color_tag_input_tokens)
filter_color_tag_yielded_operations = [
operation
for operation in trace.operations
if (
operation.kind == "yield"
and operation.parent_uid == filter_color_tag_operation_uid
and operation.name == "project_property"
)
]
filter_color_tag_yielded_tokens = tuple(
operation.data[0].current_token # operation.data is Tuple[DataContext[DataToken], Any]
for operation in filter_color_tag_yielded_operations
)
self.assertEqual(get_tokens_yielded_tokens, filter_color_tag_yielded_tokens)
# 3. The @filter discarded all tokens, i.e. the project_property() call corresponding
# to the single @output in the query received an empty iterable as input.
output_operation_input_iterable_operations = [
operation
for operation in trace.operations
if (
operation.kind == "yield"
and operation.parent_uid == output_operation_uid
and operation.name == InterpreterAdapterTap.INPUT_ITERABLE_NAME
)
]
self.assertEqual([], output_operation_input_iterable_operations)
|
|
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The mock module allows easy mocking of apitools clients.
This module allows you to mock out the constructor of a particular apitools
client, for a specific API and version. Then, when the client is created, it
will be run against an expected session that you define. This way code that is
not aware of the testing framework can construct new clients as normal, as long
as it's all done within the context of a mock.
"""
import difflib
import sys
import six
from apitools.base.protorpclite import messages
from apitools.base.py import base_api
from apitools.base.py import encoding
from apitools.base.py import exceptions
class Error(Exception):
"""Exceptions for this module."""
def _MessagesEqual(msg1, msg2):
"""Compare two protorpc messages for equality.
Using python's == operator does not work in all cases, specifically when
there is a list involved.
Args:
msg1: protorpc.messages.Message or [protorpc.messages.Message] or number
or string, One of the messages to compare.
msg2: protorpc.messages.Message or [protorpc.messages.Message] or number
or string, One of the messages to compare.
Returns:
If the messages are isomorphic.
"""
if isinstance(msg1, list) and isinstance(msg2, list):
if len(msg1) != len(msg2):
return False
return all(_MessagesEqual(x, y) for x, y in zip(msg1, msg2))
if (not isinstance(msg1, messages.Message) or
not isinstance(msg2, messages.Message)):
return msg1 == msg2
for field in msg1.all_fields():
field1 = getattr(msg1, field.name)
field2 = getattr(msg2, field.name)
if not _MessagesEqual(field1, field2):
return False
return True
class UnexpectedRequestException(Error):
def __init__(self, received_call, expected_call):
expected_key, expected_request = expected_call
received_key, received_request = received_call
expected_repr = encoding.MessageToRepr(
expected_request, multiline=True)
received_repr = encoding.MessageToRepr(
received_request, multiline=True)
expected_lines = expected_repr.splitlines()
received_lines = received_repr.splitlines()
diff_lines = difflib.unified_diff(expected_lines, received_lines)
diff = '\n'.join(diff_lines)
if expected_key != received_key:
msg = '\n'.join((
'expected: {expected_key}({expected_request})',
'received: {received_key}({received_request})',
'',
)).format(
expected_key=expected_key,
expected_request=expected_repr,
received_key=received_key,
received_request=received_repr)
super(UnexpectedRequestException, self).__init__(msg)
else:
msg = '\n'.join((
'for request to {key},',
'expected: {expected_request}',
'received: {received_request}',
'diff: {diff}',
'',
)).format(
key=expected_key,
expected_request=expected_repr,
received_request=received_repr,
diff=diff)
super(UnexpectedRequestException, self).__init__(msg)
class ExpectedRequestsException(Error):
def __init__(self, expected_calls):
msg = 'expected:\n'
for (key, request) in expected_calls:
msg += '{key}({request})\n'.format(
key=key,
request=encoding.MessageToRepr(request, multiline=True))
super(ExpectedRequestsException, self).__init__(msg)
class _ExpectedRequestResponse(object):
"""Encapsulation of an expected request and corresponding response."""
def __init__(self, key, request, response=None, exception=None):
self.__key = key
self.__request = request
if response and exception:
raise exceptions.ConfigurationValueError(
'Should specify at most one of response and exception')
if response and isinstance(response, exceptions.Error):
raise exceptions.ConfigurationValueError(
'Responses should not be an instance of Error')
if exception and not isinstance(exception, exceptions.Error):
raise exceptions.ConfigurationValueError(
'Exceptions must be instances of Error')
self.__response = response
self.__exception = exception
@property
def key(self):
return self.__key
@property
def request(self):
return self.__request
def ValidateAndRespond(self, key, request):
"""Validate that key and request match expectations, and respond if so.
Args:
key: str, Actual key to compare against expectations.
request: protorpc.messages.Message or [protorpc.messages.Message]
or number or string, Actual request to compare againt expectations
Raises:
UnexpectedRequestException: If key or request dont match
expectations.
apitools_base.Error: If a non-None exception is specified to
be thrown.
Returns:
The response that was specified to be returned.
"""
if key != self.__key or not (self.__request == request or
_MessagesEqual(request, self.__request)):
raise UnexpectedRequestException((key, request),
(self.__key, self.__request))
if self.__exception:
# Can only throw apitools_base.Error.
raise self.__exception # pylint: disable=raising-bad-type
return self.__response
class _MockedMethod(object):
"""A mocked API service method."""
def __init__(self, key, mocked_client, real_method):
self.__name__ = real_method.__name__
self.__key = key
self.__mocked_client = mocked_client
self.__real_method = real_method
self.method_config = real_method.method_config
config = self.method_config()
self.__request_type = getattr(self.__mocked_client.MESSAGES_MODULE,
config.request_type_name)
self.__response_type = getattr(self.__mocked_client.MESSAGES_MODULE,
config.response_type_name)
def _TypeCheck(self, msg, is_request):
"""Ensure the given message is of the expected type of this method.
Args:
msg: The message instance to check.
is_request: True to validate against the expected request type,
False to validate against the expected response type.
Raises:
exceptions.ConfigurationValueError: If the type of the message was
not correct.
"""
if is_request:
mode = 'request'
real_type = self.__request_type
else:
mode = 'response'
real_type = self.__response_type
if not isinstance(msg, real_type):
raise exceptions.ConfigurationValueError(
'Expected {} is not of the correct type for method [{}].\n'
' Required: [{}]\n'
' Given: [{}]'.format(
mode, self.__key, real_type, type(msg)))
def Expect(self, request, response=None, exception=None,
enable_type_checking=True, **unused_kwargs):
"""Add an expectation on the mocked method.
Exactly one of response and exception should be specified.
Args:
request: The request that should be expected
response: The response that should be returned or None if
exception is provided.
exception: An exception that should be thrown, or None.
enable_type_checking: When true, the message type of the request
and response (if provided) will be checked against the types
required by this method.
"""
# TODO(jasmuth): the unused_kwargs provides a placeholder for
# future things that can be passed to Expect(), like special
# params to the method call.
# Ensure that the registered request and response mocks actually
# match what this method accepts and returns.
if enable_type_checking:
self._TypeCheck(request, is_request=True)
if response:
self._TypeCheck(response, is_request=False)
# pylint: disable=protected-access
# Class in same module.
self.__mocked_client._request_responses.append(
_ExpectedRequestResponse(self.__key,
request,
response=response,
exception=exception))
# pylint: enable=protected-access
def __call__(self, request, **unused_kwargs):
# TODO(jasmuth): allow the testing code to expect certain
# values in these currently unused_kwargs, especially the
# upload parameter used by media-heavy services like bigquery
# or bigstore.
# pylint: disable=protected-access
# Class in same module.
if self.__mocked_client._request_responses:
request_response = self.__mocked_client._request_responses.pop(0)
else:
raise UnexpectedRequestException(
(self.__key, request), (None, None))
# pylint: enable=protected-access
response = request_response.ValidateAndRespond(self.__key, request)
if response is None and self.__real_method:
response = self.__real_method(request)
print(encoding.MessageToRepr(
response, multiline=True, shortstrings=True))
return response
return response
def _MakeMockedService(api_name, collection_name,
mock_client, service, real_service):
class MockedService(base_api.BaseApiService):
pass
for method in service.GetMethodsList():
real_method = None
if real_service:
real_method = getattr(real_service, method)
setattr(MockedService,
method,
_MockedMethod(api_name + '.' + collection_name + '.' + method,
mock_client,
real_method))
return MockedService
class Client(object):
"""Mock an apitools client."""
def __init__(self, client_class, real_client=None):
"""Mock an apitools API, given its class.
Args:
client_class: The class for the API. eg, if you
from apis.sqladmin import v1beta3
then you can pass v1beta3.SqladminV1beta3 to this class
and anything within its context will use your mocked
version.
real_client: apitools Client, The client to make requests
against when the expected response is None.
"""
if not real_client:
real_client = client_class(get_credentials=False)
self.__orig_class = self.__class__
self.__client_class = client_class
self.__real_service_classes = {}
self.__real_client = real_client
self._request_responses = []
self.__real_include_fields = None
def __enter__(self):
return self.Mock()
def Mock(self):
"""Stub out the client class with mocked services."""
client = self.__real_client or self.__client_class(
get_credentials=False)
class Patched(self.__class__, self.__client_class):
pass
self.__class__ = Patched
for name in dir(self.__client_class):
service_class = getattr(self.__client_class, name)
if not isinstance(service_class, type):
continue
if not issubclass(service_class, base_api.BaseApiService):
continue
self.__real_service_classes[name] = service_class
# pylint: disable=protected-access
collection_name = service_class._NAME
# pylint: enable=protected-access
api_name = '%s_%s' % (self.__client_class._PACKAGE,
self.__client_class._URL_VERSION)
mocked_service_class = _MakeMockedService(
api_name, collection_name, self,
service_class,
service_class(client) if self.__real_client else None)
setattr(self.__client_class, name, mocked_service_class)
setattr(self, collection_name, mocked_service_class(self))
self.__real_include_fields = self.__client_class.IncludeFields
self.__client_class.IncludeFields = self.IncludeFields
# pylint: disable=attribute-defined-outside-init
self._url = client._url
self._http = client._http
return self
def __exit__(self, exc_type, value, traceback):
is_active_exception = value is not None
self.Unmock(suppress=is_active_exception)
if is_active_exception:
six.reraise(exc_type, value, traceback)
return True
def Unmock(self, suppress=False):
self.__class__ = self.__orig_class
for name, service_class in self.__real_service_classes.items():
setattr(self.__client_class, name, service_class)
delattr(self, service_class._NAME)
self.__real_service_classes = {}
del self._url
del self._http
self.__client_class.IncludeFields = self.__real_include_fields
self.__real_include_fields = None
requests = [(rq_rs.key, rq_rs.request)
for rq_rs in self._request_responses]
self._request_responses = []
if requests and not suppress and sys.exc_info()[1] is None:
raise ExpectedRequestsException(requests)
def IncludeFields(self, include_fields):
if self.__real_client:
return self.__real_include_fields(self.__real_client,
include_fields)
|
|
import pytest
from delphin.codecs import simplemrs
from delphin import dmrs
@pytest.fixture
def dogs_bark():
return {
'top': 10000,
'index': 10000,
'nodes': [dmrs.Node(10000, '_bark_v_1_rel', type='e'),
dmrs.Node(10001, 'udef_q_rel'),
dmrs.Node(10002, '_dog_n_1_rel', type='x')],
'links': [dmrs.Link(10000, 10002, 'ARG1', 'NEQ'),
dmrs.Link(10001, 10002, 'RSTR', 'H')]}
class TestNode():
def test_init(self):
with pytest.raises(TypeError):
dmrs.Node()
with pytest.raises(TypeError):
dmrs.Node(1)
dmrs.Node(1, '_dog_n_1')
dmrs.Node(1, '_dog_n_1', type='x')
dmrs.Node(1, '_dog_n_1', type='x', properties={'NUM': 'sg'})
dmrs.Node(1, '_dog_n_1', type='x', properties={'NUM': 'sg'}, carg='Dog')
dmrs.Node('1', '_dog_n_1')
def test__eq__(self):
n = dmrs.Node(1, '_dog_n_1', type='x', properties={'NUM': 'sg'})
assert n == dmrs.Node(2, '_dog_n_1', type='x', properties={'NUM': 'sg'})
assert n != dmrs.Node(1, '_dog_n_2', type='x', properties={'NUM': 'sg'})
assert n != dmrs.Node(2, '_dog_n_1', type='e', properties={'NUM': 'sg'})
assert n != dmrs.Node(2, '_dog_n_1', type='x', properties={'NUM': 'pl'})
def test_sortinfo(self):
n = dmrs.Node(1, '_dog_n_1')
assert n.sortinfo == {}
n = dmrs.Node(1, '_dog_n_1', type='x')
assert n.sortinfo == {'cvarsort': 'x'}
n = dmrs.Node(1, '_dog_n_1', properties={'NUM': 'sg'})
assert n.sortinfo == {'NUM': 'sg'}
n = dmrs.Node(1, '_dog_n_1', type='x', properties={'NUM': 'sg'})
assert n.sortinfo == {'cvarsort': 'x', 'NUM': 'sg'}
class TestLink():
def test_init(self):
with pytest.raises(TypeError):
dmrs.Link()
with pytest.raises(TypeError):
dmrs.Link(1)
with pytest.raises(TypeError):
dmrs.Link(1, 2)
with pytest.raises(TypeError):
dmrs.Link(1, 2, 'ARG1')
dmrs.Link(1, 2, 'ARG1', 'EQ')
dmrs.Link('1', 2, 'ARG1', 'EQ')
dmrs.Link(1, '2', 'ARG1', 'EQ')
def test__eq__(self):
link1 = dmrs.Link(1, 2, 'ARG1', 'EQ')
assert link1 == dmrs.Link(1, 2, 'ARG1', 'EQ')
assert link1 != dmrs.Link(2, 1, 'ARG1', 'EQ')
assert link1 != dmrs.Link(1, 2, 'ARG2', 'EQ')
assert link1 != dmrs.Link(1, 2, 'ARG1', 'NEQ')
class TestDMRS():
def test__init__(self, dogs_bark):
d = dmrs.DMRS()
assert d.top is None
assert d.index is None
assert d.nodes == []
assert d.links == []
d = dmrs.DMRS(**dogs_bark)
assert d.top == 10000
assert d.index == 10000
assert len(d.nodes) == 3
assert d.nodes[0].predicate == '_bark_v_1_rel'
assert d.nodes[1].predicate == 'udef_q_rel'
assert d.nodes[2].predicate == '_dog_n_1_rel'
assert len(d.links) == 2
assert d.links[0].role == 'ARG1'
assert d.links[1].role == 'RSTR'
# make sure the old way of marking top still works
dogs_bark2 = dict(dogs_bark)
dogs_bark2['links'].append(dmrs.Link(0, dogs_bark['top'], None, 'H'))
del dogs_bark2['top']
d2 = dmrs.DMRS(**dogs_bark2)
assert d.top == d2.top
def test_arguments(self, dogs_bark):
d = dmrs.DMRS()
assert d.arguments() == {}
assert d.arguments('h') == {}
d = dmrs.DMRS(**dogs_bark)
assert d.arguments() == {
10000: [('ARG1', 10002)],
10001: [('RSTR', 10002)],
10002: []
}
assert d.arguments('h') == {
10000: [],
10001: [('RSTR', 10002)],
10002: []
}
assert d.arguments('xei') == {
10000: [('ARG1', 10002)],
10001: [],
10002: []
}
def test_scopal_arguments(self, dogs_bark):
d = dmrs.DMRS()
assert d.scopal_arguments() == {}
d = dmrs.DMRS(**dogs_bark)
assert d.scopal_arguments() == {
10000: [],
10001: [('RSTR', 'qeq', 10002)],
10002: []
}
_, scopes = d.scopes()
scopemap = {}
for lbl, nodes in scopes.items():
for node in nodes:
scopemap[node.id] = lbl
assert d.scopal_arguments(scopes=scopes) == {
10000: [],
10001: [('RSTR', 'qeq', scopemap[10002])],
10002: []
}
def test_from_mrs_it_rains():
m = simplemrs.decode('''
[ TOP: h0 INDEX: e2 [e TENSE: pres]
RELS: < [ _rain_v_1<3:8> LBL: h1 ARG0: e2 ] >
HCONS: < h0 qeq h1 > ]''')
d = dmrs.from_mrs(m)
assert len(d.nodes) == 1
assert d.nodes[0].predicate == '_rain_v_1'
assert d.nodes[0].type == 'e'
assert d.nodes[0].properties == {'TENSE': 'pres'}
def test_from_mrs_nearly_all_cats_were_chased_by_dogs():
m = simplemrs.decode('''
[ LTOP: h0
INDEX: e2 [ e SF: prop TENSE: past MOOD: indicative PROG: - PERF: - ]
RELS: < [ _nearly_x_deg<0:6> LBL: h4 ARG0: e5 [ e SF: prop TENSE: untensed MOOD: indicative PROG: - PERF: - ] ARG1: u6 ]
[ _all_q<7:10> LBL: h4 ARG0: x3 [ x PERS: 3 NUM: pl IND: + ] RSTR: h7 BODY: h8 ]
[ _cat_n_1<11:15> LBL: h9 ARG0: x3 ]
[ _chase_v_1<21:27> LBL: h1 ARG0: e2 ARG1: x10 [ x PERS: 3 NUM: pl IND: + ] ARG2: x3 ]
[ udef_q<31:36> LBL: h11 ARG0: x10 RSTR: h12 BODY: h13 ]
[ _dog_n_1<31:36> LBL: h14 ARG0: x10 ] >
HCONS: < h0 qeq h1 h7 qeq h9 h12 qeq h14 >
ICONS: < e2 topic x3 > ]''')
d = dmrs.from_mrs(m)
assert len(d.nodes) == 6
n1 = d.nodes[0]
n2 = d.nodes[1]
assert n1.predicate == '_nearly_x_deg'
assert n2.predicate == '_all_q'
assert any((l.start, l.end, l.role, l.post) == (n1.id, n2.id, 'MOD', 'EQ')
for l in d.links)
def test_from_mrs_issue_303():
# https://github.com/delph-in/pydelphin/issues/303
m = simplemrs.decode('''
[ TOP: h0 INDEX: e2 [e TENSE: pres]
RELS: < [ _rain_v_1<3:8> LBL: h1 ARG0: e2 ] >
HCONS: < > ]''')
with pytest.warns(dmrs.DMRSWarning):
d = dmrs.from_mrs(m)
assert d.top is None
m = simplemrs.decode('''
[ TOP: h0 INDEX: e2 [e TENSE: pres]
RELS: < [ _rain_v_1<3:8> LBL: h1 ARG0: e2 ] >
HCONS: < h0 qeq h3 > ]''')
with pytest.warns(dmrs.DMRSWarning):
d = dmrs.from_mrs(m)
assert d.top is None
m = simplemrs.decode('''
[ LTOP: h0
INDEX: e2 [ e SF: prop TENSE: past MOOD: indicative PROG: - PERF: - ]
RELS: < [ neg<7:10> LBL: h1 ARG0: e4 [ e SF: prop TENSE: untensed MOOD: indicative PROG: - PERF: - ] ARG1: h5 ]
[ _rain_v_1<11:16> LBL: h6 ARG0: e2 ] >
HCONS: < h0 qeq h1 h5 qeq h7 >
ICONS: < > ]''')
with pytest.warns(dmrs.DMRSWarning):
d = dmrs.from_mrs(m)
n = d.nodes[0]
assert n.predicate == 'neg'
assert 'ARG1' not in d.scopal_arguments()[n.id]
|
|
# coding=utf-8
# Copyright 2018 The Batfish Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines Batfish questions and logic for loading them from disk or Batfish."""
from __future__ import absolute_import, print_function
import json
import logging
import os
import re
import sys
from copy import deepcopy
from inspect import getmembers
from typing import ( # noqa: F401
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Union,
)
import attr
from pybatfish.client.internal import _bf_answer_obj, _bf_get_question_templates
from pybatfish.datamodel import ( # noqa: F401
Assertion,
AssertionType,
BgpRoute,
VariableType,
)
from pybatfish.datamodel.answer import Answer # noqa: F401
from pybatfish.exception import QuestionValidationException
from pybatfish.question import bfq
from pybatfish.util import BfJsonEncoder, get_uuid, validate_question_name
if TYPE_CHECKING:
from pybatfish.client.session import Session # noqa: F401
# A set of tags across all questions
_tags = set() # type: Set[str]
_VALID_VARIABLE_NAME_REGEX = re.compile(r"^\w+$")
__all__ = ["list_questions", "list_tags", "load_dir_questions", "load_questions"]
@attr.s(frozen=True)
class AllowedValue(object):
"""Describes a whitelisted value for a question parameter."""
name = attr.ib(type=str)
description = attr.ib(type=Optional[str], default=None)
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> AllowedValue
return AllowedValue(json_dict["name"], json_dict.get("description"))
def __str__(self):
if self.description is not None:
return "{}: {}".format(self.name, self.description)
return self.name
class QuestionMeta(type):
"""A meta class for all Question classes."""
def __new__(cls, name, base, dct):
"""Creates a new class for a specific question."""
new_cls = super(QuestionMeta, cls).__new__(cls, name, base, dct)
additional_kwargs = {"question_name"}
def constructor(self, *args, **kwargs):
"""Create a new question."""
# Reject positional args; this way is PY2-compliant
if args:
raise TypeError("Please use keyword arguments")
# Call super (i.e., QuestionBase)
super(new_cls, self).__init__(new_cls.template, new_cls.session)
# Update well-known params, if passed in
if "exclusions" in kwargs:
self._dict["exclusions"] = kwargs.get("exclusions")
if "question_name" in kwargs:
self._dict["instance"]["instanceName"] = kwargs.get("question_name")
else:
self._dict["instance"]["instanceName"] = "__{}_{}".format(
self._dict["instance"]["instanceName"], get_uuid()
)
# Validate that we are not accepting invalid kwargs/variables
instance_vars = self._dict["instance"].get("variables", {})
allowed_kwargs = set(instance_vars)
allowed_kwargs.update(additional_kwargs)
var_difference = set(kwargs.keys()).difference(allowed_kwargs)
if var_difference:
raise QuestionValidationException(
"Received unsupported parameters/variables: {}".format(
var_difference
)
)
# Set question-specific parameters
for var_name, var_value in kwargs.items():
if var_name not in additional_kwargs:
instance_vars[var_name]["value"] = var_value
# Define signature. Helps with tab completion. Python3 centric
from inspect import Parameter, Signature
# Merge constructor params with question variables
params = [
Parameter(name=param, kind=Parameter.KEYWORD_ONLY)
for param in dct.get("variables", [])
+ [p for p in additional_kwargs if p not in ("kwargs", "self")]
]
setattr(constructor, "__signature__", Signature(parameters=params))
setattr(new_cls, "__init__", constructor)
setattr(new_cls, "__doc__", dct.get("docstring", ""))
new_cls.description = dct.get("description", "")
new_cls.tags = dct.get("tags", [])
new_cls.template = dct.get("template", {})
new_cls.session = dct.get("session")
return new_cls
def __dir__(self):
return ["description", "tags", "template"] + list(reversed(dir(QuestionBase)))
class QuestionBase(object):
"""All questions inherit functionality from this class."""
def __init__(self, dictionary, session):
self._dict = deepcopy(dictionary)
self._session = session
def answer(
self,
snapshot=None,
reference_snapshot=None,
include_one_table_keys=None,
background=False,
extra_args=None,
):
# type: (Optional[str], Optional[str], Optional[bool], bool, Optional[Dict[str, Any]]) -> Union[str, Answer]
"""
Ask and return the answer for this question.
:param snapshot: the snapshot on which to answer the question. If not
provided, the latest snapshot initialized will be used.
:type snapshot: str
:param reference_snapshot: for differential questions only, the snapshot
against which to compare.
:type reference_snapshot: str
:param include_one_table_keys: if differential is True, include keys only
from one table and not both.
:type include_one_table_keys: bool
:param background: run this question in background, return immediately
:type background: bool
:param extra_args: extra arguments to be passed with the question.
:type extra_args: dict
:rtype: :py:class:`~pybatfish.datamodel.answer.base.Answer` or
:py:class:`~pybatfish.datamodel.answer.table.TableAnswer`
:raises QuestionValidationException: if the question is malformed
"""
session = self._session
real_snapshot = session.get_snapshot(snapshot)
if reference_snapshot is None and self.get_differential():
raise ValueError(
"reference_snapshot argument is required to answer a differential question"
)
_validate(self.dict())
if include_one_table_keys is not None:
self._set_include_one_table_keys(include_one_table_keys)
return _bf_answer_obj(
session=session,
question_str=self.json(),
parameters_str="{}",
question_name=self.get_name(),
background=background,
snapshot=real_snapshot,
reference_snapshot=reference_snapshot,
extra_args=extra_args,
)
def dict(self):
"""Return the dictionary representing this question."""
return self._dict
def json(self, **kwargs):
"""Return the json string representing this question.
Keyword arguments passed to json.dumps with default assignments of
sort_keys=True and indent=2
.. deprecated: 0.36.0
"""
return json.dumps(
self._dict, sort_keys=True, indent=2, cls=BfJsonEncoder, **kwargs
)
def get_description(self):
"""Return the short description of this question."""
return self._dict["instance"]["description"]
def get_long_description(self):
"""Return the long description of this question."""
return self._dict["instance"]["longDescription"]
def get_differential(self):
"""Return whether this question is to be asked differentially."""
return self._dict.get("differential", False)
def get_include_one_table_keys(self):
"""Return whether keys present in only one table should be included when computing answer table diffs."""
return self._dict.get("includeOneTableKeys", False)
def get_name(self):
"""Return the name of this question."""
return self._dict["instance"]["instanceName"]
def _set_include_one_table_keys(self, include_one_table_keys):
"""Set if keys present in only table should be included when computing table diffs."""
self._dict["includeOneTableKeys"] = include_one_table_keys
def set_assertion(self, assertion):
# type: (Assertion) -> QuestionBase
"""Set an assertion for a given question.
Overwrites any previous assertions.
"""
self._dict["assertion"] = assertion.dict()
return self
def make_check(self):
# type: () -> QuestionBase
"""Make this question a check which asserts that there are no results."""
self.set_assertion(Assertion(AssertionType.COUNT_EQUALS, 0))
return self
class Questions(object):
"""Class to hold and manage (e.g. load, list) Batfish questions."""
def __init__(self, session):
self._session = session
def list_tags(self):
# type: () -> Set[str]
"""
Get the tags for available questions.
:return: tags for available questions
:rtype: set
"""
return {t for q in self.list() for t in q.get("tags", [])}
def list(self, tags=None):
# type: (Optional[Iterable[str]]) -> List[Dict[str, Union[str, Set]]]
"""
List available questions.
:param tags: if not `None`, only list questions with specified tags
See :py:func:`list_tags` for a list of tags for available questions.
:type tags: Iterable[str]
:return: list of question dict, containing "name", "description", and "tags"
"""
return _list_questions(tags, self)
def load(self, directory=None):
# type: (Optional[str]) -> None
"""
Load questions from Batfish service or local directory.
:param directory: optional directory to load questions from, if none is specified, questions are loaded from the Batfish service instead
:type directory: str
"""
if directory:
_install_questions(
_load_questions_from_dir(directory, self._session).items(), self
)
else:
_install_questions(_load_remote_questions_templates(self._session), self)
def list_questions(tags=None, question_module="pybatfish.question.bfq"):
# type: (Optional[Iterable[str]], str) -> List[Dict[str, Union[str, Set]]]
"""List available questions.
:param tags: if not `None`, only list questions with given tags.
See :py:func:`list_tags` for a list of tags given currently loaded questions.
:param question_module: which module to load the questions from. By default,
:py:mod:`pybatfish.question.bfq` is used.
:returns: a list of questions, where each question is represented as a dict
containing "name", "description", and "tags".
"""
return _list_questions(tags, sys.modules[question_module])
def _list_questions(tags, obj):
# type: (Optional[Iterable[str]], object) -> List[Dict[str, Union[str, Set]]]
"""List questions in the specified object, optionally filtering on supplied tags."""
# Members of the module are (name,value) pairs so
# x[1] in the lambda represents the value part.
# Want members with value of type QuestionMeta
predicate = lambda x: isinstance(x[1], QuestionMeta)
question_functions = filter(predicate, getmembers(obj))
matching_questions = []
desired_tags = set(map(str.lower, tags)) if tags else set() # type: Set[str]
for name, question_func in question_functions:
if desired_tags and not desired_tags.intersection(
map(str.lower, question_func.tags)
):
# skip questions that don't have any desired tags
continue
matching_questions.append(
{
"name": name,
"description": question_func.description,
"tags": question_func.tags,
}
)
return matching_questions
def list_tags():
# type: () -> Set[str]
"""List tags across all available questions."""
return _tags
def _install_questions_in_module(
questions: Iterable[Tuple[str, QuestionMeta]], module_name: str
) -> None:
"""Install the given questions in the specified module."""
module = sys.modules[module_name]
for (name, question_class) in questions:
setattr(question_class, "__module__", module_name)
setattr(module, name, question_class)
def _install_questions(questions: Iterable[Tuple[str, QuestionMeta]], obj: Any) -> None:
"""Install the given questions in the specified object."""
for (name, question_class) in questions:
setattr(obj, name, question_class)
def _load_questions_from_dir(question_dir, session):
# type: (str, Session) -> Dict[str, QuestionMeta]
logger = logging.getLogger(__name__)
question_files = []
for dirpath, dirnames, filenames in os.walk(question_dir):
for filename in filenames:
if filename.endswith(".json"):
question_files.append(os.path.join(dirpath, filename))
if len(question_files) == 0:
logger.warning(
"WARNING: no .json files found in supplied question directory: {questionDir}".format(
questionDir=question_dir
)
)
return {}
questions = {}
for questionFile in question_files:
try:
(qname, qclass) = _load_question_disk(questionFile, session)
questions[qname] = qclass
except Exception as err:
logger.error(
"Could not load question from {questionFile}:{err}".format(
questionFile=questionFile, err=err
)
)
logger.info(
"Successfully loaded {numQuestions}/{numQuestionFiles} question(s) from local directory".format(
numQuestions=len(questions), numQuestionFiles=len(question_files)
)
)
return questions
def load_dir_questions(questionDir, session, moduleName=bfq.__name__):
# type: (str, Session, str) -> Iterable[str]
"""Load question templates from a directory on disk and install them in the given module."""
# Find all files with questions in them.
questions = _load_questions_from_dir(questionDir, session)
_install_questions_in_module(questions.items(), moduleName)
return questions.keys()
def _load_question_disk(question_path, session):
# type: (str, Session) -> Tuple[str, QuestionMeta]
"""Load a question template from disk and instantiate a new `:py:class:Question`."""
with open(question_path, "r") as question_file:
question_dict = json.load(question_file)
try:
return _load_question_dict(question_dict, session)
except QuestionValidationException as e:
raise QuestionValidationException(
"Error loading question from {}".format(question_path), e
)
def _load_question_dict(question, session):
# type: (Dict[str, Any], Session) -> Tuple[str, QuestionMeta]
"""Create a question from a dictionary which contains a template.
:return the name of the question
"""
# Perform series of validations on the question.
# Try to have meaningful error messages.
# Check has instance data
instance_data = question.get("instance")
if not instance_data:
raise QuestionValidationException("Missing instance data")
# name validation
given_question_name = instance_data.get("instanceName")
if not given_question_name or not validate_question_name(given_question_name):
raise QuestionValidationException(
"Invalid question name: {}".format(given_question_name)
)
question_name = str(given_question_name) # type: str
# description validation
question_description = instance_data.get("description", "").strip() # type: str
if not question_description:
raise QuestionValidationException(
"Missing description for question '{}'".format(question_name)
)
if not question_description.endswith("."):
question_description += "."
# Extend description if we can
long_description = instance_data.get("longDescription", "").strip() # type: str
if long_description:
if not long_description.endswith("."):
long_description += "."
question_description = "\n\n".join([question_description, long_description])
# Extract question tags
tags = sorted(map(str, instance_data.get("tags", [])))
_tags.update(tags)
# Validate question variables
ivars = instance_data.get("variables", {})
ordered_variable_names = instance_data.get("orderedVariableNames", [])
variables = _process_variables(question_name, ivars, ordered_variable_names)
# Compute docstring
docstring = _compute_docstring(question_description, variables, ivars)
# Make new Question class
question_class = QuestionMeta(
question_name,
(QuestionBase,),
{
"docstring": docstring,
"description": question_description,
"session": session,
"tags": tags,
"template": deepcopy(question),
"variables": variables,
},
)
return question_name, question_class
def _process_variables(question_name, variables, ordered_variable_names):
# type: (str, Dict[str, Dict[str, Any]], List[str]) -> List[str]
"""Perform validation on question variables.
:returns an ordered list of variable names
"""
if not variables:
return []
for var_name, var_data in variables.items():
_validate_variable_name(question_name, var_name)
_validate_variable_data(question_name, var_name, var_data)
if _has_valid_ordered_variable_names(ordered_variable_names, variables):
return ordered_variable_names
def __var_key(name):
"""Orders required [!optional] vars first, then by name."""
return variables[name].get("optional", False), name
return sorted(variables.keys(), key=__var_key)
def _validate_variable_data(question_name, var_name, var_data):
# type: (str, str, Dict[str, Any]) -> bool
"""Perform validation on variable metadata and fix style if necessary.
:raises QuestionValidationException if metadata is invalid.
"""
var_type = var_data.get("type", "").strip()
if not var_type:
raise QuestionValidationException(
"Question {} is missing type for variable {}".format(
question_name, var_name
)
)
var_data["type"] = var_type
var_desc = var_data.get("description", "").strip()
if not var_desc:
raise QuestionValidationException(
"Question {} is missing description for variable {}".format(
question_name, var_name
)
)
if not var_desc.endswith("."):
var_desc += "."
var_data["description"] = var_desc
return True
def _validate_variable_name(question_name, var_name):
# type: (str, str) -> bool
"""Check if the variable name is valid."""
if not re.match(_VALID_VARIABLE_NAME_REGEX, var_name):
raise QuestionValidationException(
"Question {} has invalid variable name: {}. Only alphanumeric characters are allowed".format(
question_name, var_name
)
)
return True
def _has_valid_ordered_variable_names(ordered_variable_names, variables):
# type: (List[str], Dict[str, Dict[str, Any]]) -> bool
"""Check if ordered_variable_names is present and that it includes all instance variables."""
if not ordered_variable_names:
return False
return len(ordered_variable_names) == len(variables) and set(
ordered_variable_names
) == set(variables.keys())
def _compute_docstring(base_docstring, var_names, variables):
# type: (str, List[str], Dict[str, Any]) -> str
"""Compute a docstring for a question, based on the variables."""
if not variables:
return base_docstring
return "\n".join(
[base_docstring, "\n"]
+ [_compute_var_help(var, variables[var]) for var in var_names]
)
def _compute_var_help(var_name, var_data):
# type: (str, Dict[str, Any]) -> str
"""Create explanation of a single question variable."""
# Variable help has 2 sections: param and type. Param section may include
# optionally: required (inline), and allowed_values and/or default_value on
# their own lines with a leading blank.
param_line = ":param {name}: {opt_req}{desc}\n".format(
name=var_name,
opt_req="*Required.* " if not var_data.get("optional", False) else "",
desc=var_data["description"],
)
allowed_values = _build_allowed_values(var_data)
if allowed_values:
param_line += " Allowed values:\n\n * {}\n".format(
"\n * ".join([str(v) for v in allowed_values])
)
default_value = var_data.get("value")
if default_value is not None:
param_line += "\n Default value: ``{}``\n".format(default_value)
type_line = ":type {name}: {type}".format(name=var_name, type=var_data["type"])
return param_line + type_line
def _build_allowed_values(var_data):
values_dict = var_data.get("values")
if values_dict:
return [AllowedValue.from_dict(v) for v in values_dict]
old_values_dict = var_data.get("allowedValues")
if old_values_dict:
return [AllowedValue(v) for v in old_values_dict]
return None
def load_questions(
question_dir=None, from_server=False, module_name=bfq.__name__, session=None
):
# type: (Optional[str], bool, str, Optional[Session]) -> None
"""Load questions from directory or batfish service.
:param question_dir: Load questions from this local directory instead of
remote questions from the batfish service.
:type question_dir: str
:param from_server: if true or `question_dir` is None, load questions from
service.
:type from_server: bool
:param module_name: the name of the module where questions should be loaded.
Default is :py:mod:`pybatfish.question.bfq`
:param session: Batfish session to load questions from
:type session: :class:`~pybatfish.client.session.Session`
"""
if not session:
from pybatfish.client.commands import bf_session
s = bf_session
else:
s = session
s.q.load(directory=question_dir)
new_names = set() # type: Set[str]
if not question_dir or from_server:
remote_questions = _load_remote_questions_templates(s)
_install_questions_in_module(remote_questions, module_name)
new_names |= set(name for name, q in remote_questions)
if question_dir:
local_questions = load_dir_questions(
question_dir, session=s, moduleName=module_name
)
over_written_questions = len(set(local_questions) & new_names)
if over_written_questions > 0:
logging.getLogger(__name__).info(
"Overwrote {over_written_questions} remote question(s) with local question(s)".format(
over_written_questions=over_written_questions
)
)
def _load_remote_questions_templates(session):
# type: (Session) -> Set[Tuple[str, QuestionMeta]]
logger = logging.getLogger(__name__)
num_questions = 0
remote_questions = set()
questions_dict = _bf_get_question_templates(session)
for (key, value) in questions_dict.items():
try:
remote_questions.add(_load_question_dict(json.loads(value), session))
num_questions += 1
except Exception as err:
logger.error(
"Could not load question {name} : {err}".format(name=key, err=err)
)
logger.info(
"Successfully loaded {numQuestions} questions from remote".format(
numQuestions=num_questions
)
)
return remote_questions
def _validate(questionJson):
valid = True
errorMessage = "\n"
instanceData = questionJson["instance"]
if "variables" in instanceData:
variables = instanceData["variables"]
for variableName, variable in variables.items():
# First check for missing mandatory parameters
optional = False
if "optional" in variable:
optional = variable["optional"]
if not optional:
if "value" not in variable:
valid = False
errorMessage += (
" Missing value for mandatory parameter: '"
+ variableName
+ "'\n"
)
# Now do some dynamic type-checking
allowed_values = _build_allowed_values(variable)
if "value" in variable:
value = variable["value"]
variableType = variable["type"]
minLength = None
if "minLength" in variable:
minLength = variable["minLength"]
isArray = "minElements" in variable
if isArray:
if not isinstance(value, list):
valid = False
errorMessage += (
" Expected a list for parameter: '" + variableName + "'\n"
)
else:
minElements = variable["minElements"]
if len(value) < minElements:
valid = False
errorMessage += (
" Number of elements provided for parameter: '"
+ variableName
+ "' less than the minimum: "
+ str(minElements)
+ "\n"
)
else:
for i in range(0, len(value)):
valueElement = value[i]
typeValid = _validateType(valueElement, variableType)
if not typeValid:
valid = False
errorMessage += (
" Expected type: '"
+ variableType
+ "' for element: "
+ str(i)
+ " of parameter: "
" + variableName + "
"\n"
)
elif minLength and len(valueElement) < minLength:
valid = False
errorMessage += (
" Length of value: '"
+ valueElement
+ "' for element : "
+ str(i)
+ " of parameter: '"
+ variableName
+ "' below minimum length: "
+ str(minLength)
+ "\n"
)
elif (
allowed_values is not None
and valueElement
not in [v.name for v in allowed_values]
):
valid = False
errorMessage += " Value: '{}' is not among allowed values {} of parameter: '{}'\n".format(
valueElement,
[v.name for v in allowed_values],
variableName,
)
else:
typeValid, typeValidErrorMessage = _validateType(
value, variableType
)
if not typeValid:
valid = False
if typeValidErrorMessage:
errorMessage += (
" Expected type: '"
+ variableType
+ "' for parameter: '"
+ variableName
+ "'. Got error: '"
+ typeValidErrorMessage
+ "'\n"
)
else:
errorMessage += (
" Expected type: '"
+ variableType
+ "' for parameter: '"
+ variableName
+ "'\n"
)
elif minLength and len(value) < minLength:
valid = False
errorMessage += (
" Length of value: '"
+ value
+ "' for parameter: '"
+ variableName
+ "' below minimum length: "
+ str(minLength)
+ "\n"
)
elif allowed_values is not None and value not in [
v.name for v in allowed_values
]:
valid = False
errorMessage += " Value: '{}' is not among allowed values {} of parameter: '{}'\n".format(
value, [v.name for v in allowed_values], variableName
)
if not valid:
raise QuestionValidationException(errorMessage)
return True
def _validateType(value, expectedType):
"""
Check if the input `value` have contents that matches the requirements specified by `expectedType`.
Return a tuple, first element in the tuple is a boolean tells the validation result, while
the second element contains the error message if there is one.
:raises QuestionValidationException
"""
if expectedType == VariableType.BOOLEAN:
return isinstance(value, bool), None
elif expectedType == VariableType.COMPARATOR:
validComparators = ["<", "<=", "==", ">=", ">", "!="]
if value not in validComparators:
return (
False,
"'{}' is not a known comparator. Valid options are: '{}'".format(
value, ", ".join(validComparators)
),
)
return True, None
elif expectedType == VariableType.INTEGER:
INT32_MIN = -(2 ** 32)
INT32_MAX = 2 ** 32 - 1
valid = isinstance(value, int) and INT32_MIN <= value <= INT32_MAX
return valid, None
elif expectedType == VariableType.FLOAT:
return isinstance(value, float), None
elif expectedType == VariableType.DOUBLE:
return isinstance(value, float), None
elif expectedType in [
VariableType.ADDRESS_GROUP_NAME,
VariableType.APPLICATION_SPEC,
VariableType.BGP_PEER_PROPERTY_SPEC,
VariableType.BGP_PROCESS_PROPERTY_SPEC,
VariableType.BGP_ROUTE_STATUS_SPEC,
VariableType.BGP_SESSION_COMPAT_STATUS_SPEC,
VariableType.BGP_SESSION_STATUS_SPEC,
VariableType.BGP_SESSION_TYPE_SPEC,
VariableType.DISPOSITION_SPEC,
VariableType.FILTER,
VariableType.FILTER_SPEC,
VariableType.INTEGER_SPACE,
VariableType.INTERFACE,
VariableType.INTERFACE_GROUP_NAME,
VariableType.INTERFACE_PROPERTY_SPEC,
VariableType.INTERFACES_SPEC,
VariableType.IP_PROTOCOL_SPEC,
VariableType.IP_SPACE_SPEC,
VariableType.IPSEC_SESSION_STATUS_SPEC,
VariableType.JAVA_REGEX,
VariableType.JSON_PATH_REGEX,
VariableType.LOCATION_SPEC,
VariableType.MLAG_ID,
VariableType.MLAG_ID_SPEC,
VariableType.NAMED_STRUCTURE_SPEC,
VariableType.NODE_PROPERTY_SPEC,
VariableType.NODE_ROLE_DIMENSION_NAME,
VariableType.NODE_ROLE_NAME,
VariableType.NODE_SPEC,
VariableType.OSPF_INTERFACE_PROPERTY_SPEC,
VariableType.OSPF_PROCESS_PROPERTY_SPEC,
VariableType.OSPF_SESSION_STATUS_SPEC,
VariableType.REFERENCE_BOOK_NAME,
VariableType.ROUTING_POLICY_SPEC,
VariableType.ROUTING_PROTOCOL_SPEC,
VariableType.STRUCTURE_NAME,
VariableType.VRF,
VariableType.VXLAN_VNI_PROPERTY_SPEC,
VariableType.ZONE,
]:
if not isinstance(value, str):
return False, "A Batfish {} must be a string".format(expectedType)
return True, None
elif expectedType == VariableType.IP:
if not isinstance(value, str):
return False, "A Batfish {} must be a string".format(expectedType)
else:
return _isIp(value)
elif expectedType == VariableType.IP_WILDCARD:
if not isinstance(value, str):
return False, "A Batfish {} must be a string".format(expectedType)
else:
return _isIpWildcard(value)
elif expectedType == VariableType.JSON_PATH:
return _isJsonPath(value)
elif expectedType == VariableType.LONG:
INT64_MIN = -(2 ** 64)
INT64_MAX = 2 ** 64 - 1
valid = isinstance(value, int) and INT64_MIN <= value <= INT64_MAX
return valid, None
elif expectedType == VariableType.PREFIX:
if not isinstance(value, str):
return False, "A Batfish {} must be a string".format(expectedType)
else:
return _isPrefix(value)
elif expectedType == VariableType.PREFIX_RANGE:
if not isinstance(value, str):
return False, "A Batfish {} must be a string".format(expectedType)
else:
return _isPrefixRange(value)
elif expectedType == VariableType.QUESTION:
return isinstance(value, QuestionBase), None
elif expectedType == VariableType.BGP_ROUTES:
if not isinstance(value, list) or not all(
isinstance(r, BgpRoute) for r in value
):
return False, "A Batfish {} must be a list of BgpRoute".format(expectedType)
return True, None
elif expectedType == VariableType.STRING:
return isinstance(value, str), None
elif expectedType == VariableType.SUBRANGE:
if isinstance(value, int):
return True, None
elif isinstance(value, str):
return _isSubRange(value)
else:
return (
False,
"A Batfish {} must either be a string or an integer".format(
expectedType
),
)
elif expectedType == VariableType.PROTOCOL:
if not isinstance(value, str):
return False, "A Batfish {} must be a string".format(expectedType)
else:
validProtocols = ["dns", "ssh", "tcp", "udp"]
if not value.lower() in validProtocols:
return (
False,
"'{}' is not a valid protocols. Valid options are: '{}'".format(
value, ", ".join(validProtocols)
),
)
return True, None
elif expectedType == VariableType.IP_PROTOCOL:
if not isinstance(value, str):
return False, "A Batfish {} must be a string".format(expectedType)
else:
try:
intValue = int(value)
if not 0 <= intValue < 256:
return (
False,
"'{}' is not in valid ipProtocol range: 0-255".format(intValue),
)
return True, None
except ValueError:
# TODO: Should be validated at server side
return True, None
elif expectedType in [
VariableType.ANSWER_ELEMENT,
VariableType.BGP_ROUTE_CONSTRAINTS,
VariableType.HEADER_CONSTRAINT,
VariableType.PATH_CONSTRAINT,
]:
return True, None
else:
logging.getLogger(__name__).warning(
"WARNING: skipping validation for unknown argument type {}".format(
expectedType
)
)
return True, None
def _isJsonPath(value):
"""
Check if the input string represents a valid jsonPath.
Return a tuple, first element in the tuple is a boolean tells the validation result, while
the second element contains the error message if there is one.
"""
if not isinstance(value, dict):
return (
False,
"Expected a jsonPath dictionary with elements 'path' (string) and optional 'suffix' (boolean)",
)
elif "path" not in value:
return False, "Missing 'path' element of jsonPath"
else:
path = value["path"]
if not isinstance(path, str):
return False, "'path' element of jsonPath dictionary should be a string"
if "suffix" in value:
suffix = value["suffix"]
if not isinstance(suffix, bool):
return (
False,
"'suffix' element of jsonPath dictionary should be a boolean",
)
return True, None
def _isIp(value):
"""
Check if the input string represents a valid IP address.
A valid IP can be one of the two forms:
1. A string that contains three '.' which separate the string into
four segments, each segment is an integer.
2. A string be either "INVALID_IP(XXXl)" or "AUTO/NONE(XXXl)",
where XXX is a long value.
Return a tuple, first element in the tuple is a boolean tells the validation result, while
the second element contains the error message if there is one.
"""
addrArray = value.split(".")
if not len(addrArray) == 4:
if value.startswith("INVALID_IP") or value.startswith("AUTO/NONE"):
tail = value.split("(")
if len(tail) == 2:
longStrParts = tail[1].split("l")
if len(longStrParts) == 2:
try:
int(longStrParts[0])
return True, None
except ValueError:
return False, "Invalid ip string: '{}'".format(value)
return False, "Invalid ip string: '{}'".format(value)
else:
for segments in addrArray:
try:
segmentVal = int(segments)
except ValueError:
return (
False,
"Ip segment is not a number: '{}' in ip string: '{}'".format(
segments, value
),
)
if not 0 <= segmentVal <= 255:
return (
False,
"Ip segment is out of range 0-255: '{}' in ip string: '{}'".format(
segments, value
),
)
return True, None
def _isSubRange(value):
"""
Check if the input string represents a valid subRange.
Return a tuple, first element in the tuple is a boolean tells the validation result, while
the second element contains the error message if there is one.
"""
contents = value.split("-")
if len(contents) != 2:
return False, "Invalid subRange: {}".format(value)
try:
int(contents[0])
except ValueError:
return False, "Invalid subRange start: {}".format(contents[0])
try:
int(contents[1])
except ValueError:
return False, "Invalid subRange end: {}".format(contents[1])
return True, None
def _isPrefix(value):
"""
Check if the input string represents a valid prefix.
A prefix contains two parts separated by '/'. The first part represents a
valid IP address, the second part is an integer value.
Return a tuple, first element in the tuple is a boolean tells the validation
result, while the second element contains the error message if there is one.
"""
contents = value.split("/")
if not len(contents) == 2:
return False, "Invalid prefix string: '{}'".format(value)
try:
int(contents[1])
except ValueError:
return False, "Prefix length must be an integer"
return _isIp(contents[0])
def _isPrefixRange(value):
"""
Check if the input string represents a valid prefix range.
A prefix range contains a valid prefix, a ":", then an optional subrange.
Return a tuple, first element in the tuple is a boolean tells the validation
result, while the second element contains the error message if there is one.
"""
contents = value.split(":")
if len(contents) < 1 or len(contents) > 2:
return False, "Invalid PrefixRange string: '{}'".format(value)
if not _isPrefix(contents[0])[0]:
return (
False,
"Invalid prefix string: '{}' in prefix range string: '{}'".format(
contents[0], value
),
)
if len(contents) == 2:
return _isSubRange(contents[1])
return True, None
def _isIpWildcard(value):
"""
Check if the input string represents a valid ipWildCard.
A valid ipWildcard can be one of the three forms:
1. A normal IP address (_isIp() returns true)
2. A string contains a ':', each side of the colon is a valid IP address
3. A string contains a '/', left side of the slash is a valid IP address,
the right side of the slash is an integer
Return a tuple, first element in the tuple is a boolean tells the validation
result, while the second element contains the error message if there is one.
"""
if ":" in value:
contents = value.split(":")
if not len(contents) == 2:
return False, "Invalid IpWildcard string: '{}'".format(value)
if not _isIp(contents[0])[0]:
return False, "Invalid ip string: '{}'".format(contents[0])
else:
return _isIp(contents[1])
elif "/" in value:
contents = value.split("/")
if not len(contents) == 2:
return False, "Invalid IpWildcard string: '{}'".format(value)
if not _isIp(contents[0])[0]:
return False, "Invalid ip string: '{}'".format(contents[0])
else:
try:
int(contents[1])
return True, None
except ValueError:
return (
False,
"Invalid prefix length: '{}' in IpWildcard string: '{}'".format(
contents[1], value
),
)
else:
return _isIp(value)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.test import TestCase, RequestFactory
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model, HASH_SESSION_KEY
from django.core import mail
from django.utils.translation import ugettext as _
from django.utils import timezone
from django.test.utils import override_settings
from djconfig.utils import override_djconfig
from ..core.tests import utils
from .forms import UserProfileForm, EmailChangeForm, UserForm, EmailCheckForm
from ..comment.like.models import CommentLike
from ..topic.models import Topic
from ..comment.models import Comment
from ..comment.bookmark.models import CommentBookmark
from .utils.tokens import UserActivationTokenGenerator, UserEmailChangeTokenGenerator
from .utils.email import send_activation_email, send_email_change_email, sender
from .utils import email
from . import middleware
User = get_user_model()
class UserViewTest(TestCase):
def setUp(self):
utils.cache_clear()
self.user = utils.create_user()
self.user2 = utils.create_user()
self.category = utils.create_category()
self.topic = utils.create_topic(self.category, user=self.user2)
self.topic2 = utils.create_topic(self.category)
def test_user_views_denied_to_non_logged_users(self):
"""
profile user's topics, comments, likes should not be seen by guests
"""
pk = self.user.pk
slug = self.user.st.slug
response = self.client.get(reverse('spirit:user:topics', kwargs={'pk': pk, 'slug': slug}))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('spirit:user:detail', kwargs={'pk': pk, 'slug': slug}))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('spirit:user:likes', kwargs={'pk': pk, 'slug': slug}))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('spirit:user:update'))
self.assertEqual(response.status_code, 302)
response = self.client.get(reverse('spirit:user:password-change'))
self.assertEqual(response.status_code, 302)
response = self.client.get(reverse('spirit:user:email-change'))
self.assertEqual(response.status_code, 302)
response = self.client.get(reverse('spirit:user:email-change-confirm', kwargs={'token': "foo"}))
self.assertEqual(response.status_code, 302)
def test_profile_topics(self):
"""
profile user's topics
"""
utils.login(self)
response = self.client.get(reverse("spirit:user:topics", kwargs={'pk': self.user2.pk,
'slug': self.user2.st.slug}))
self.assertEqual(response.status_code, 200)
self.assertEqual(list(response.context['topics']), [self.topic, ])
self.assertEqual(response.context['p_user'], self.user2)
def test_profile_topics_order(self):
"""
topics ordered by date
"""
Topic.objects.all().delete()
category = utils.create_category()
topic_a = utils.create_topic(category=category, user=self.user2)
topic_b = utils.create_topic(category=category, user=self.user2)
topic_c = utils.create_topic(category=category, user=self.user2)
Topic.objects.filter(pk=topic_a.pk).update(date=timezone.now() - datetime.timedelta(days=10))
Topic.objects.filter(pk=topic_c.pk).update(date=timezone.now() - datetime.timedelta(days=5))
utils.login(self)
response = self.client.get(reverse("spirit:user:topics", kwargs={'pk': self.user2.pk,
'slug': self.user2.st.slug}))
self.assertEqual(list(response.context['topics']), [topic_b, topic_c, topic_a])
def test_profile_topics_bookmarks(self):
"""
profile user's topics with bookmarks
"""
bookmark = CommentBookmark.objects.create(topic=self.topic, user=self.user)
utils.login(self)
response = self.client.get(reverse("spirit:user:topics",
kwargs={'pk': self.user2.pk, 'slug': self.user2.st.slug}))
self.assertEqual(response.status_code, 200)
self.assertEqual(list(response.context['topics']), [self.topic, ])
self.assertEqual(response.context['topics'][0].bookmark, bookmark)
@override_djconfig(topics_per_page=1)
def test_profile_topics_paginate(self):
"""
profile user's topics paginated
"""
topic = utils.create_topic(self.category, user=self.user2)
utils.login(self)
response = self.client.get(reverse(
"spirit:user:topics",
kwargs={'pk': self.user2.pk, 'slug': self.user2.st.slug}))
self.assertEqual(response.status_code, 200)
self.assertEqual(list(response.context['topics']), [topic, ])
def test_profile_topics_dont_show_removed_or_private(self):
"""
dont show private topics or removed
"""
Topic.objects.all().delete()
category = utils.create_category()
category_removed = utils.create_category(is_removed=True)
subcategory = utils.create_category(parent=category_removed)
subcategory_removed = utils.create_category(parent=category, is_removed=True)
utils.create_private_topic(user=self.user2)
utils.create_topic(category=category, user=self.user2, is_removed=True)
utils.create_topic(category=category_removed, user=self.user2)
utils.create_topic(category=subcategory, user=self.user2)
utils.create_topic(category=subcategory_removed, user=self.user2)
utils.login(self)
response = self.client.get(reverse(
"spirit:user:topics",
kwargs={'pk': self.user2.pk, 'slug': self.user2.st.slug}))
self.assertEqual(list(response.context['topics']), [])
def test_profile_topics_invalid_slug(self):
"""
profile user's topics
"""
utils.login(self)
response = self.client.get(reverse(
"spirit:user:topics",
kwargs={'pk': self.user2.pk, 'slug': "invalid"}))
expected_url = reverse(
"spirit:user:topics",
kwargs={'pk': self.user2.pk, 'slug': self.user2.st.slug})
self.assertRedirects(response, expected_url, status_code=301)
def test_profile_comments(self):
"""
profile user's comments
"""
utils.login(self)
comment = utils.create_comment(user=self.user2, topic=self.topic)
utils.create_comment(user=self.user, topic=self.topic)
response = self.client.get(reverse(
"spirit:user:detail",
kwargs={'pk': self.user2.pk, 'slug': self.user2.st.slug}))
self.assertEqual(response.status_code, 200)
self.assertEqual(list(response.context['comments']), [comment, ])
self.assertEqual(response.context['p_user'], self.user2)
def test_profile_comments_order(self):
"""
comments ordered by date
"""
comment_a = utils.create_comment(user=self.user2, topic=self.topic)
comment_b = utils.create_comment(user=self.user2, topic=self.topic)
comment_c = utils.create_comment(user=self.user2, topic=self.topic)
Comment.objects.filter(pk=comment_a.pk).update(date=timezone.now() - datetime.timedelta(days=10))
Comment.objects.filter(pk=comment_c.pk).update(date=timezone.now() - datetime.timedelta(days=5))
utils.login(self)
response = self.client.get(reverse(
"spirit:user:detail",
kwargs={'pk': self.user2.pk, 'slug': self.user2.st.slug}))
self.assertEqual(list(response.context['comments']), [comment_b, comment_c, comment_a])
@override_djconfig(comments_per_page=1)
def test_profile_comments_paginate(self):
"""
profile user's comments paginated
"""
utils.create_comment(user=self.user2, topic=self.topic)
comment = utils.create_comment(user=self.user2, topic=self.topic)
utils.login(self)
response = self.client.get(reverse(
"spirit:user:detail",
kwargs={'pk': self.user2.pk, 'slug': self.user2.st.slug}))
self.assertEqual(response.status_code, 200)
self.assertEqual(list(response.context['comments']), [comment, ])
def test_profile_comments_dont_show_removed_or_private(self):
"""
dont show private topics or removed
"""
category = utils.create_category()
category_removed = utils.create_category(is_removed=True)
subcategory = utils.create_category(parent=category_removed)
subcategory_removed = utils.create_category(parent=category, is_removed=True)
topic_a = utils.create_private_topic(user=self.user2)
topic_b = utils.create_topic(category=category, is_removed=True)
topic_c = utils.create_topic(category=category_removed)
topic_d = utils.create_topic(category=subcategory)
topic_e = utils.create_topic(category=subcategory_removed)
utils.create_comment(user=self.user2, topic=topic_a.topic)
utils.create_comment(user=self.user2, topic=topic_b)
utils.create_comment(user=self.user2, topic=topic_c)
utils.create_comment(user=self.user2, topic=topic_d)
utils.create_comment(user=self.user2, topic=topic_e)
utils.login(self)
response = self.client.get(reverse(
"spirit:user:detail",
kwargs={'pk': self.user2.pk, 'slug': self.user2.st.slug}))
self.assertEqual(list(response.context['comments']), [])
def test_profile_comments_invalid_slug(self):
"""
profile user's comments, invalid user slug
"""
utils.login(self)
response = self.client.get(reverse(
"spirit:user:detail",
kwargs={'pk': self.user2.pk, 'slug': "invalid"}))
expected_url = reverse(
"spirit:user:detail",
kwargs={'pk': self.user2.pk, 'slug': self.user2.st.slug})
self.assertRedirects(response, expected_url, status_code=301)
def test_profile_likes(self):
"""
profile user's likes
"""
utils.login(self)
comment = utils.create_comment(user=self.user, topic=self.topic)
comment2 = utils.create_comment(user=self.user2, topic=self.topic)
like = CommentLike.objects.create(user=self.user2, comment=comment)
CommentLike.objects.create(user=self.user, comment=comment2)
response = self.client.get(reverse(
"spirit:user:likes",
kwargs={'pk': self.user2.pk, 'slug': self.user2.st.slug}))
self.assertEqual(response.status_code, 200)
self.assertEqual(list(response.context['comments']), [like.comment, ])
self.assertEqual(response.context['p_user'], self.user2)
def test_profile_likes_order(self):
"""
comments ordered by date
"""
comment_a = utils.create_comment(user=self.user, topic=self.topic)
comment_b = utils.create_comment(user=self.user, topic=self.topic)
comment_c = utils.create_comment(user=self.user, topic=self.topic)
like_a = CommentLike.objects.create(user=self.user2, comment=comment_a)
CommentLike.objects.create(user=self.user2, comment=comment_b)
like_c = CommentLike.objects.create(user=self.user2, comment=comment_c)
CommentLike.objects.filter(pk=like_a.pk).update(date=timezone.now() - datetime.timedelta(days=10))
CommentLike.objects.filter(pk=like_c.pk).update(date=timezone.now() - datetime.timedelta(days=5))
utils.login(self)
response = self.client.get(reverse(
"spirit:user:likes",
kwargs={'pk': self.user2.pk, 'slug': self.user2.st.slug}))
self.assertEqual(list(response.context['comments']), [comment_b, comment_c, comment_a])
def test_profile_likes_dont_show_removed_or_private(self):
"""
dont show private topics or removed
"""
category = utils.create_category()
category_removed = utils.create_category(is_removed=True)
subcategory = utils.create_category(parent=category_removed)
subcategory_removed = utils.create_category(parent=category, is_removed=True)
topic_a = utils.create_private_topic(user=self.user2)
topic_b = utils.create_topic(category=category, is_removed=True)
topic_c = utils.create_topic(category=category_removed)
topic_d = utils.create_topic(category=subcategory)
topic_e = utils.create_topic(category=subcategory_removed)
comment_a = utils.create_comment(user=self.user2, topic=topic_a.topic)
comment_b = utils.create_comment(user=self.user, topic=topic_b)
comment_c = utils.create_comment(user=self.user, topic=topic_c)
comment_d = utils.create_comment(user=self.user, topic=topic_d)
comment_e = utils.create_comment(user=self.user, topic=topic_e)
CommentLike.objects.create(user=self.user2, comment=comment_a)
CommentLike.objects.create(user=self.user2, comment=comment_b)
CommentLike.objects.create(user=self.user2, comment=comment_c)
CommentLike.objects.create(user=self.user2, comment=comment_d)
CommentLike.objects.create(user=self.user2, comment=comment_e)
utils.login(self)
response = self.client.get(reverse(
"spirit:user:likes",
kwargs={'pk': self.user2.pk, 'slug': self.user2.st.slug}))
self.assertEqual(list(response.context['comments']), [])
def test_profile_likes_invalid_slug(self):
"""
profile user's likes, invalid user slug
"""
utils.login(self)
response = self.client.get(reverse(
"spirit:user:likes",
kwargs={'pk': self.user2.pk, 'slug': "invalid"}))
expected_url = reverse(
"spirit:user:likes",
kwargs={'pk': self.user2.pk, 'slug': self.user2.st.slug})
self.assertRedirects(response, expected_url, status_code=301)
@override_djconfig(comments_per_page=1)
def test_profile_likes_paginate(self):
"""
profile user's likes paginate
"""
comment = utils.create_comment(user=self.user2, topic=self.topic)
comment2 = utils.create_comment(user=self.user2, topic=self.topic)
CommentLike.objects.create(user=self.user2, comment=comment)
like = CommentLike.objects.create(user=self.user2, comment=comment2)
utils.login(self)
response = self.client.get(reverse(
"spirit:user:likes",
kwargs={'pk': self.user2.pk, 'slug': self.user2.st.slug}))
self.assertEqual(response.status_code, 200)
self.assertEqual(list(response.context['comments']), [like.comment, ])
def test_profile_update(self):
"""
profile update
"""
utils.login(self)
# get
response = self.client.get(reverse('spirit:user:update'))
self.assertEqual(response.status_code, 200)
# post
form_data = {'first_name': 'foo', 'last_name': 'bar',
'location': 'spirit', 'timezone': self.user.st.timezone}
response = self.client.post(reverse('spirit:user:update'),
form_data)
expected_url = reverse('spirit:user:update')
self.assertRedirects(response, expected_url, status_code=302)
def test_profile_password_change(self):
"""
profile password change
"""
user = utils.create_user(password="foo")
utils.login(self, user=user, password="foo")
form_data = {'old_password': 'foo',
'new_password1': 'bar',
'new_password2': 'bar'}
response = self.client.post(reverse('spirit:user:password-change'),
form_data)
expected_url = reverse("spirit:user:update")
self.assertRedirects(response, expected_url, status_code=302)
utils.login(self, user=user, password="bar")
# get
response = self.client.get(reverse('spirit:user:password-change'))
self.assertEqual(response.status_code, 200)
def test_profile_password_change_re_login(self):
"""
Changing the password should invalidate the session
"""
user = utils.create_user(password="foo")
utils.login(self, user=user, password="foo")
old_hash = self.client.session[HASH_SESSION_KEY]
form_data = {'old_password': 'foo',
'new_password1': 'bar',
'new_password2': 'bar'}
response = self.client.post(reverse('spirit:user:password-change'), form_data)
expected_url = reverse("spirit:user:update")
self.assertRedirects(response, expected_url, status_code=302)
self.assertNotEqual(old_hash, self.client.session[HASH_SESSION_KEY])
def test_email_change_confirm(self):
"""
email change confirmation
"""
utils.login(self)
new_email = "[email protected]"
token = UserEmailChangeTokenGenerator().generate(self.user, new_email)
response = self.client.get(reverse('spirit:user:email-change-confirm', kwargs={'token': token}))
expected_url = reverse("spirit:user:update")
self.assertRedirects(response, expected_url, status_code=302)
self.assertEqual(User.objects.get(pk=self.user.pk).email, new_email)
def test_email_change_confirm_invalid(self):
"""
The token should expire after email change
"""
utils.login(self)
old_email = "[email protected]"
token = UserEmailChangeTokenGenerator().generate(self.user, old_email)
new_email = "[email protected]"
User.objects.filter(pk=self.user.pk).update(email=new_email)
response = self.client.get(reverse('spirit:user:email-change-confirm', kwargs={'token': token}))
expected_url = reverse("spirit:user:update")
self.assertRedirects(response, expected_url, status_code=302)
self.assertEqual(User.objects.get(pk=self.user.pk).email, new_email)
def test_email_change_duplicated(self):
"""
email should be unique
"""
utils.login(self)
utils.create_user(email="[email protected]")
new_email = "[email protected]"
old_email = self.user.email
token = UserEmailChangeTokenGenerator().generate(self.user, new_email)
self.client.get(reverse('spirit:user:email-change-confirm', kwargs={'token': token}))
self.assertEqual(User.objects.get(pk=self.user.pk).email, old_email)
@override_settings(ST_UNIQUE_EMAILS=False)
def test_email_change_duplicated_allowed(self):
"""
Duplicated email allowed
"""
utils.login(self)
utils.create_user(email="[email protected]")
new_email = "[email protected]"
token = UserEmailChangeTokenGenerator().generate(self.user, new_email)
self.client.get(reverse('spirit:user:email-change-confirm', kwargs={'token': token}))
self.assertEqual(User.objects.get(pk=self.user.pk).email, new_email)
def test_profile_email_change(self):
"""
email change
"""
user = utils.create_user(password="foo")
utils.login(self, user=user, password="foo")
form_data = {'password': 'foo',
'email': '[email protected]'}
response = self.client.post(reverse('spirit:user:email-change'),
form_data)
expected_url = reverse("spirit:user:update")
self.assertRedirects(response, expected_url, status_code=302)
self.assertEquals(len(mail.outbox), 1)
self.assertIn(_("Email change"), mail.outbox[0].subject)
# get
response = self.client.get(reverse('spirit:user:email-change'))
self.assertEqual(response.status_code, 200)
class UserFormTest(TestCase):
def setUp(self):
utils.cache_clear()
self.user = utils.create_user()
def test_profile(self):
"""
edit user profile
"""
form_data = {'first_name': 'foo', 'last_name': 'bar',
'location': 'spirit', 'timezone': self.user.st.timezone}
form = UserProfileForm(data=form_data, instance=self.user.st)
self.assertEqual(form.is_valid(), True)
form = UserForm(data=form_data, instance=self.user)
self.assertEqual(form.is_valid(), True)
def test_profile_timezone_field(self):
form_data = {
'first_name': 'foo', 'last_name': 'bar',
'location': 'spirit', 'timezone': 'UTC'}
form = UserProfileForm(data=form_data, instance=self.user.st)
self.assertEqual(form.is_valid(), True)
form_data['timezone'] = 'badtimezone'
form = UserProfileForm(data=form_data, instance=self.user.st)
self.assertEqual(form.is_valid(), False)
self.assertTrue('timezone' in form.errors)
def test_email_change(self):
"""
email change
"""
user = utils.create_user(password="foo")
form_data = {'email': '[email protected]', 'password': 'foo'}
form = EmailChangeForm(data=form_data, user=user)
self.assertEqual(form.is_valid(), True)
def test_email_change_invalid(self):
"""
email change invalid
"""
user = utils.create_user(password="foo", email="[email protected]")
form_data = {'email': '[email protected]', 'password': 'bad-password'}
form = EmailChangeForm(data=form_data, user=user)
self.assertEqual(form.is_valid(), False)
self.assertNotIn('email', form.cleaned_data)
self.assertNotIn('password', form.cleaned_data)
def test_email_change_email_duplication(self):
"""
email change, don't allow email duplication
"""
utils.create_user(email="[email protected]")
user = utils.create_user(password="foo")
form_data = {'email': '[email protected]', 'password': 'foo'}
form = EmailChangeForm(data=form_data, user=user)
self.assertEqual(form.is_valid(), False)
self.assertNotIn('email', form.cleaned_data)
@override_settings(ST_UNIQUE_EMAILS=False)
def test_email_change_email_duplication_allowed(self):
"""
Duplicated email allowed
"""
utils.create_user(email="[email protected]")
user = utils.create_user(password="foo")
form_data = {'email': '[email protected]', 'password': 'foo'}
form = EmailChangeForm(data=form_data, user=user)
self.assertEqual(form.is_valid(), True)
@override_settings(ST_CASE_INSENSITIVE_EMAILS=True)
def test_email_change_email_case_insensitive(self):
"""
Should lower case the email before validating it
"""
utils.create_user(email="[email protected]")
user = utils.create_user(password="foo")
form_data = {'email': '[email protected]', 'password': 'foo'}
form = EmailChangeForm(data=form_data, user=user)
self.assertEqual(form.is_valid(), False)
@override_settings(ST_CASE_INSENSITIVE_EMAILS=False)
def test_email_change_email_case_sensitive(self):
"""
Should not lower case the email before validating it
"""
utils.create_user(email="[email protected]")
user = utils.create_user(password="foo")
form_data = {'email': '[email protected]', 'password': 'foo'}
form = EmailChangeForm(data=form_data, user=user)
self.assertEqual(form.is_valid(), True)
def test_email_check(self):
"""
Check it's an email
"""
# Unique email
form_data = {'email': '[email protected]', }
form = EmailCheckForm(form_data)
self.assertTrue(form.is_valid())
# Duplicated email
utils.create_user(email="[email protected]")
form_data['email'] = "[email protected]"
form = EmailCheckForm(form_data)
self.assertFalse(form.is_valid())
@override_settings(ST_UNIQUE_EMAILS=False)
def test_email_check_non_unique(self):
"""
Duplicated email allowed
"""
utils.create_user(email="[email protected]")
form_data = {'email': '[email protected]', }
form = EmailCheckForm(form_data)
self.assertTrue(form.is_valid())
@override_settings(ST_CASE_INSENSITIVE_EMAILS=True)
def test_email_check_case_insensitive(self):
"""
Should lower case the email before validating it
"""
utils.create_user(email="[email protected]")
form_data = {'email': '[email protected]', }
form = EmailCheckForm(form_data)
self.assertFalse(form.is_valid())
@override_settings(ST_CASE_INSENSITIVE_EMAILS=False)
def test_email_check_case_sensitive(self):
"""
Should not lower case the email before validating it
"""
utils.create_user(email="[email protected]")
form_data = {'email': '[email protected]', }
form = EmailCheckForm(form_data)
self.assertTrue(form.is_valid())
class UserModelTest(TestCase):
def setUp(self):
utils.cache_clear()
def test_user_superuser(self):
"""
is_superuser should always be is_administrator and is_moderator
test model
"""
user = User(is_superuser=True)
user.save()
self.assertTrue(user.st.is_administrator)
self.assertTrue(user.st.is_moderator)
def test_user_administrator(self):
"""
is_administrator should always be is_moderator
"""
user = User()
user.save()
user.st.is_administrator = True
user.st.save()
self.assertTrue(user.st.is_moderator)
@override_settings(ST_DOUBLE_POST_THRESHOLD_MINUTES=1)
def test_update_post_hash(self):
"""
Should update the last post hash and date
if stored hash doesn't matches the new one
and/or stored date is higher than the threshold
"""
user = User()
user.save()
self.assertTrue(user.st.update_post_hash('my_hash'))
self.assertFalse(user.st.update_post_hash('my_hash'))
self.assertTrue(user.st.update_post_hash('my_new_hash'))
self.assertFalse(user.st.update_post_hash('my_new_hash'))
@override_settings(ST_DOUBLE_POST_THRESHOLD_MINUTES=10)
def test_update_post_hash_threshold(self):
"""
Should update the last post hash when the time threshold has past
"""
user = User()
user.save()
self.assertTrue(user.st.update_post_hash('my_hash'))
self.assertFalse(user.st.update_post_hash('my_hash'))
user.st.last_post_on = timezone.now() - datetime.timedelta(minutes=11)
user.st.save()
self.assertTrue(user.st.update_post_hash('my_hash'))
self.assertFalse(user.st.update_post_hash('my_hash'))
@override_settings(ST_DOUBLE_POST_THRESHOLD_MINUTES=1)
def test_update_post_hash_threshold(self):
"""
Should update the last post hash and date for the current user
"""
user = User(username='foo')
user.save()
user_b = User(username='bar')
user_b.save()
self.assertEqual('', User.objects.get(pk=user.pk).st.last_post_hash)
self.assertEqual('', User.objects.get(pk=user_b.pk).st.last_post_hash)
self.assertTrue(user.st.update_post_hash('my_hash'))
self.assertEqual('my_hash', User.objects.get(pk=user.pk).st.last_post_hash)
self.assertEqual('', User.objects.get(pk=user_b.pk).st.last_post_hash)
class UtilsUserTests(TestCase):
def setUp(self):
utils.cache_clear()
self.user = utils.create_user()
def test_user_activation_token_generator(self):
"""
Validate if user can be activated
"""
self.user.st.is_verified = False
activation_token = UserActivationTokenGenerator()
token = activation_token.generate(self.user)
self.assertTrue(activation_token.is_valid(self.user, token))
self.assertFalse(activation_token.is_valid(self.user, "bad token"))
# Invalid after verification
self.user.st.is_verified = True
self.assertFalse(activation_token.is_valid(self.user, token))
# Invalid for different user
user2 = utils.create_user()
self.assertFalse(activation_token.is_valid(user2, token))
def test_user_email_change_token_generator(self):
"""
Email change
"""
new_email = "[email protected]"
email_change_token = UserEmailChangeTokenGenerator()
token = email_change_token.generate(self.user, new_email)
self.assertTrue(email_change_token.is_valid(self.user, token))
self.assertFalse(email_change_token.is_valid(self.user, "bad token"))
# get new email
self.assertTrue(email_change_token.is_valid(self.user, token))
self.assertEqual(email_change_token.get_email(), new_email)
# Invalid for different user
user2 = utils.create_user()
self.assertFalse(email_change_token.is_valid(user2, token))
# Invalid after email change
self.user.email = "[email protected]"
self.assertFalse(email_change_token.is_valid(self.user, token))
def test_user_activation_email(self):
"""
Send activation email
"""
self._monkey_sender_called = False
def monkey_sender(request, subject, template_name, context, email):
self.assertEqual(request, req)
self.assertEqual(email, [self.user.email, ])
self.assertTrue(
UserActivationTokenGenerator().is_valid(
self.user, context['token']))
self.assertEqual(context['user_id'], self.user.pk)
self.assertEqual(subject, _("User activation"))
self.assertEqual(template_name, 'spirit/user/activation_email.html')
self._monkey_sender_called = True
req = RequestFactory().get('/')
org_sender, email.sender = email.sender, monkey_sender
try:
send_activation_email(req, self.user)
self.assertTrue(self._monkey_sender_called)
finally:
email.sender = org_sender
def test_user_activation_email_complete(self):
"""
Integration test
"""
req = RequestFactory().get('/')
send_activation_email(req, self.user)
self.assertEquals(len(mail.outbox), 1)
def test_email_change_email(self):
"""
Send change email
"""
self._monkey_sender_called = False
def monkey_sender(request, subject, template_name, context, email):
self.assertEqual(request, req)
self.assertEqual(email, [self.user.email, ])
change_token = UserEmailChangeTokenGenerator()
token = change_token.generate(self.user, new_email)
self.assertDictEqual(context, {'token': token, })
self.assertEqual(subject, _("Email change"))
self.assertEqual(template_name, 'spirit/user/email_change_email.html')
self._monkey_sender_called = True
req = RequestFactory().get('/')
new_email = "[email protected]"
org_sender, email.sender = email.sender, monkey_sender
try:
send_email_change_email(req, self.user, new_email)
self.assertTrue(self._monkey_sender_called)
finally:
email.sender = org_sender
def test_email_change_email_complete(self):
"""
Integration test
"""
req = RequestFactory().get('/')
send_email_change_email(req, self.user, "[email protected]")
self.assertEquals(len(mail.outbox), 1)
def test_sender(self):
return # TODO: fix for html emails
"""
Base email sender
"""
class SiteMock:
name = "foo"
domain = "bar.com"
def monkey_get_current_site(request):
return SiteMock
def monkey_render_to_string(template, data):
self.assertEquals(template, template_name)
self.assertDictEqual(data, {'user_id': self.user.pk,
'token': token,
'site_name': SiteMock.name,
'domain': SiteMock.domain,
'protocol': 'https' if req.is_secure() else 'http'})
return "email body"
req = RequestFactory().get('/')
token = "token"
subject = SiteMock.name
template_name = "spirit/_base_email.html"
context = {'user_id': self.user.pk, 'token': token}
org_site, email.get_current_site = email.get_current_site, monkey_get_current_site
org_render_to_string, email.render_to_string = email.render_to_string, monkey_render_to_string
try:
sender(req, subject, template_name, context, [self.user.email, ])
finally:
email.get_current_site = org_site
email.render_to_string = org_render_to_string
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].subject, SiteMock.name)
self.assertEquals(mail.outbox[0].body, "email body")
self.assertEquals(mail.outbox[0].from_email, "foo <[email protected]>")
self.assertEquals(mail.outbox[0].to, [self.user.email, ])
@override_settings(DEFAULT_FROM_EMAIL='[email protected]')
def test_sender_from_email(self):
"""
Should use DEFAULT_FROM_EMAIL instead of the default
"""
class SiteMock:
name = "foo"
domain = "bar.com"
def monkey_get_current_site(*args, **kw):
return SiteMock
def monkey_render_to_string(*args, **kw):
return "email body"
req = RequestFactory().get('/')
token = "token"
subject = SiteMock.name
template_name = "template.html"
context = {'user_id': self.user.pk, 'token': token}
org_site, email.get_current_site = email.get_current_site, monkey_get_current_site
org_render_to_string, email.render_to_string = email.render_to_string, monkey_render_to_string
try:
sender(req, subject, template_name, context, [self.user.email, ])
finally:
email.get_current_site = org_site
email.render_to_string = org_render_to_string
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].from_email, "[email protected]")
class UserMiddlewareTest(TestCase):
def setUp(self):
timezone.deactivate()
utils.cache_clear()
self.user = utils.create_user()
@override_settings(TIME_ZONE='UTC')
def test_timezone(self):
"""
Should activate the user timezone
"""
timezone.deactivate()
utils.login(self)
req = RequestFactory().get('/')
req.user = self.user
time_zone = 'America/Argentina/Buenos_Aires'
self.user.st.timezone = time_zone
self.assertEqual(timezone.get_current_timezone().zone, 'UTC')
middleware.TimezoneMiddleware().process_request(req)
self.assertEqual(timezone.get_current_timezone().zone, time_zone)
@override_settings(TIME_ZONE='UTC')
def test_timezone_bad_tz(self):
timezone.deactivate()
utils.login(self)
req = RequestFactory().get('/')
req.user = self.user
self.user.st.timezone = 'badtimezone'
time_zone = 'America/Argentina/Buenos_Aires'
timezone.activate(time_zone)
self.assertEqual(timezone.get_current_timezone().zone, time_zone)
middleware.TimezoneMiddleware().process_request(req)
self.assertEqual(timezone.get_current_timezone().zone, 'UTC')
@override_settings(TIME_ZONE='UTC')
def test_timezone_anonymous_user(self):
class AnonymUserMock(object):
def is_authenticated(self):
return False
timezone.deactivate()
req = RequestFactory().get('/')
req.user = AnonymUserMock()
time_zone = 'America/Argentina/Buenos_Aires'
timezone.activate(time_zone)
self.assertEqual(timezone.get_current_timezone().zone, time_zone)
middleware.TimezoneMiddleware().process_request(req)
self.assertEqual(timezone.get_current_timezone().zone, 'UTC')
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from designate.i18n import _LI
from designate import rpc
LOG = logging.getLogger(__name__)
CENTRAL_API = None
class CentralAPI(object):
"""
Client side of the central RPC API.
API version history:
1.0 - Initial version
1.1 - Add new finder methods
1.2 - Add get_tenant and get_tenants
1.3 - Add get_absolute_limits
2.0 - Renamed most get_resources to find_resources
2.1 - Add quota methods
3.0 - RecordSet Changes
3.1 - Add floating ip ptr methods
3.2 - TLD Api changes
3.3 - Add methods for blacklisted domains
4.0 - Create methods now accept designate objects
4.1 - Add methods for server pools
4.2 - Add methods for pool manager integration
4.3 - Added Zone Transfer Methods
5.0 - Remove dead server code
5.1 - Add xfr_domain
5.2 - Add Zone Import methods
"""
RPC_API_VERSION = '5.2'
def __init__(self, topic=None):
topic = topic if topic else cfg.CONF.central_topic
target = messaging.Target(topic=topic, version=self.RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap='5.2')
@classmethod
def get_instance(cls):
"""
The rpc.get_client() which is called upon the API object initialization
will cause a assertion error if the designate.rpc.TRANSPORT isn't setup
by rpc.init() before.
This fixes that by creating the rpcapi when demanded.
"""
global CENTRAL_API
if not CENTRAL_API:
CENTRAL_API = cls()
return CENTRAL_API
# Misc Methods
def get_absolute_limits(self, context):
LOG.info(_LI("get_absolute_limits: "
"Calling central's get_absolute_limits."))
return self.client.call(context, 'get_absolute_limits')
# Quota Methods
def get_quotas(self, context, tenant_id):
LOG.info(_LI("get_quotas: Calling central's get_quotas."))
return self.client.call(context, 'get_quotas', tenant_id=tenant_id)
def get_quota(self, context, tenant_id, resource):
LOG.info(_LI("get_quota: Calling central's get_quota."))
return self.client.call(context, 'get_quota', tenant_id=tenant_id,
resource=resource)
def set_quota(self, context, tenant_id, resource, hard_limit):
LOG.info(_LI("set_quota: Calling central's set_quota."))
return self.client.call(context, 'set_quota', tenant_id=tenant_id,
resource=resource, hard_limit=hard_limit)
def reset_quotas(self, context, tenant_id):
LOG.info(_LI("reset_quotas: Calling central's reset_quotas."))
return self.client.call(context, 'reset_quotas', tenant_id=tenant_id)
# TSIG Key Methods
def create_tsigkey(self, context, tsigkey):
LOG.info(_LI("create_tsigkey: Calling central's create_tsigkey."))
return self.client.call(context, 'create_tsigkey', tsigkey=tsigkey)
def find_tsigkeys(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info(_LI("find_tsigkeys: Calling central's find_tsigkeys."))
return self.client.call(context, 'find_tsigkeys', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def get_tsigkey(self, context, tsigkey_id):
LOG.info(_LI("get_tsigkey: Calling central's get_tsigkey."))
return self.client.call(context, 'get_tsigkey', tsigkey_id=tsigkey_id)
def update_tsigkey(self, context, tsigkey):
LOG.info(_LI("update_tsigkey: Calling central's update_tsigkey."))
return self.client.call(context, 'update_tsigkey', tsigkey=tsigkey)
def delete_tsigkey(self, context, tsigkey_id):
LOG.info(_LI("delete_tsigkey: Calling central's delete_tsigkey."))
return self.client.call(context, 'delete_tsigkey',
tsigkey_id=tsigkey_id)
# Tenant Methods
def find_tenants(self, context):
LOG.info(_LI("find_tenants: Calling central's find_tenants."))
return self.client.call(context, 'find_tenants')
def get_tenant(self, context, tenant_id):
LOG.info(_LI("get_tenant: Calling central's get_tenant."))
return self.client.call(context, 'get_tenant', tenant_id=tenant_id)
def count_tenants(self, context):
LOG.info(_LI("count_tenants: Calling central's count_tenants."))
return self.client.call(context, 'count_tenants')
# Domain Methods
def create_domain(self, context, domain):
LOG.info(_LI("create_domain: Calling central's create_domain."))
return self.client.call(context, 'create_domain', domain=domain)
def get_domain(self, context, domain_id):
LOG.info(_LI("get_domain: Calling central's get_domain."))
return self.client.call(context, 'get_domain', domain_id=domain_id)
def get_domain_servers(self, context, domain_id):
LOG.info(_LI("get_domain_servers: "
"Calling central's get_domain_servers."))
return self.client.call(context, 'get_domain_servers',
domain_id=domain_id)
def find_domains(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info(_LI("find_domains: Calling central's find_domains."))
return self.client.call(context, 'find_domains', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_domain(self, context, criterion=None):
LOG.info(_LI("find_domain: Calling central's find_domain."))
return self.client.call(context, 'find_domain', criterion=criterion)
def update_domain(self, context, domain, increment_serial=True):
LOG.info(_LI("update_domain: Calling central's update_domain."))
return self.client.call(context, 'update_domain', domain=domain,
increment_serial=increment_serial)
def delete_domain(self, context, domain_id):
LOG.info(_LI("delete_domain: Calling central's delete_domain."))
return self.client.call(context, 'delete_domain', domain_id=domain_id)
def count_domains(self, context, criterion=None):
LOG.info(_LI("count_domains: Calling central's count_domains."))
return self.client.call(context, 'count_domains', criterion=criterion)
def touch_domain(self, context, domain_id):
LOG.info(_LI("touch_domain: Calling central's touch_domain."))
return self.client.call(context, 'touch_domain', domain_id=domain_id)
# TLD Methods
def create_tld(self, context, tld):
LOG.info(_LI("create_tld: Calling central's create_tld."))
return self.client.call(context, 'create_tld', tld=tld)
def find_tlds(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info(_LI("find_tlds: Calling central's find_tlds."))
return self.client.call(context, 'find_tlds', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def get_tld(self, context, tld_id):
LOG.info(_LI("get_tld: Calling central's get_tld."))
return self.client.call(context, 'get_tld', tld_id=tld_id)
def update_tld(self, context, tld):
LOG.info(_LI("update_tld: Calling central's update_tld."))
return self.client.call(context, 'update_tld', tld=tld)
def delete_tld(self, context, tld_id):
LOG.info(_LI("delete_tld: Calling central's delete_tld."))
return self.client.call(context, 'delete_tld', tld_id=tld_id)
# RecordSet Methods
def create_recordset(self, context, domain_id, recordset):
LOG.info(_LI("create_recordset: Calling central's create_recordset."))
return self.client.call(context, 'create_recordset',
domain_id=domain_id, recordset=recordset)
def get_recordset(self, context, domain_id, recordset_id):
LOG.info(_LI("get_recordset: Calling central's get_recordset."))
return self.client.call(context, 'get_recordset', domain_id=domain_id,
recordset_id=recordset_id)
def find_recordsets(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info(_LI("find_recordsets: Calling central's find_recordsets."))
return self.client.call(context, 'find_recordsets',
criterion=criterion, marker=marker,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_recordset(self, context, criterion=None):
LOG.info(_LI("find_recordset: Calling central's find_recordset."))
return self.client.call(context, 'find_recordset', criterion=criterion)
def update_recordset(self, context, recordset, increment_serial=True):
LOG.info(_LI("update_recordset: Calling central's update_recordset."))
return self.client.call(context, 'update_recordset',
recordset=recordset,
increment_serial=increment_serial)
def delete_recordset(self, context, domain_id, recordset_id,
increment_serial=True):
LOG.info(_LI("delete_recordset: Calling central's delete_recordset."))
return self.client.call(context, 'delete_recordset',
domain_id=domain_id,
recordset_id=recordset_id,
increment_serial=increment_serial)
def count_recordsets(self, context, criterion=None):
LOG.info(_LI("count_recordsets: Calling central's count_recordsets."))
return self.client.call(context, 'count_recordsets',
criterion=criterion)
# Record Methods
def create_record(self, context, domain_id, recordset_id, record,
increment_serial=True):
LOG.info(_LI("create_record: Calling central's create_record."))
return self.client.call(context, 'create_record',
domain_id=domain_id,
recordset_id=recordset_id,
record=record,
increment_serial=increment_serial)
def get_record(self, context, domain_id, recordset_id, record_id):
LOG.info(_LI("get_record: Calling central's get_record."))
return self.client.call(context, 'get_record',
domain_id=domain_id,
recordset_id=recordset_id,
record_id=record_id)
def find_records(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info(_LI("find_records: Calling central's find_records."))
return self.client.call(context, 'find_records', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_record(self, context, criterion=None):
LOG.info(_LI("find_record: Calling central's find_record."))
return self.client.call(context, 'find_record', criterion=criterion)
def update_record(self, context, record, increment_serial=True):
LOG.info(_LI("update_record: Calling central's update_record."))
return self.client.call(context, 'update_record',
record=record,
increment_serial=increment_serial)
def delete_record(self, context, domain_id, recordset_id, record_id,
increment_serial=True):
LOG.info(_LI("delete_record: Calling central's delete_record."))
return self.client.call(context, 'delete_record',
domain_id=domain_id,
recordset_id=recordset_id,
record_id=record_id,
increment_serial=increment_serial)
def count_records(self, context, criterion=None):
LOG.info(_LI("count_records: Calling central's count_records."))
return self.client.call(context, 'count_records', criterion=criterion)
# Misc. Report combining counts for tenants, domains and records
def count_report(self, context, criterion=None):
LOG.info(_LI("count_report: Calling central's count_report."))
return self.client.call(context, 'count_report', criterion=criterion)
# Sync Methods
def sync_domains(self, context):
LOG.info(_LI("sync_domains: Calling central's sync_domains."))
return self.client.call(context, 'sync_domains')
def sync_domain(self, context, domain_id):
LOG.info(_LI("sync_domain: Calling central's sync_domains."))
return self.client.call(context, 'sync_domain', domain_id=domain_id)
def sync_record(self, context, domain_id, recordset_id, record_id):
LOG.info(_LI("sync_record: Calling central's sync_record."))
return self.client.call(context, 'sync_record',
domain_id=domain_id,
recordset_id=recordset_id,
record_id=record_id)
def list_floatingips(self, context):
LOG.info(_LI("list_floatingips: Calling central's list_floatingips."))
return self.client.call(context, 'list_floatingips')
def get_floatingip(self, context, region, floatingip_id):
LOG.info(_LI("get_floatingip: Calling central's get_floatingip."))
return self.client.call(context, 'get_floatingip', region=region,
floatingip_id=floatingip_id)
def update_floatingip(self, context, region, floatingip_id, values):
LOG.info(_LI("update_floatingip: "
"Calling central's update_floatingip."))
return self.client.call(context, 'update_floatingip', region=region,
floatingip_id=floatingip_id, values=values)
# Blacklisted Domain Methods
def create_blacklist(self, context, blacklist):
LOG.info(_LI("create_blacklist: Calling central's create_blacklist"))
return self.client.call(context, 'create_blacklist',
blacklist=blacklist)
def get_blacklist(self, context, blacklist_id):
LOG.info(_LI("get_blacklist: Calling central's get_blacklist."))
return self.client.call(context, 'get_blacklist',
blacklist_id=blacklist_id)
def find_blacklists(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info(_LI("find_blacklists: Calling central's find_blacklists."))
return self.client.call(
context, 'find_blacklists', criterion=criterion, marker=marker,
limit=limit, sort_key=sort_key, sort_dir=sort_dir)
def find_blacklist(self, context, criterion):
LOG.info(_LI("find_blacklist: Calling central's find_blacklist."))
return self.client.call(context, 'find_blacklist', criterion=criterion)
def update_blacklist(self, context, blacklist):
LOG.info(_LI("update_blacklist: Calling central's update_blacklist."))
return self.client.call(context, 'update_blacklist',
blacklist=blacklist)
def delete_blacklist(self, context, blacklist_id):
LOG.info(_LI("delete_blacklist: Calling central's delete blacklist."))
return self.client.call(context, 'delete_blacklist',
blacklist_id=blacklist_id)
# Pool Server Methods
def create_pool(self, context, pool):
LOG.info(_LI("create_pool: Calling central's create_pool."))
return self.client.call(context, 'create_pool', pool=pool)
def find_pools(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info(_LI("find_pools: Calling central's find_pools."))
return self.client.call(context, 'find_pools', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_pool(self, context, criterion=None):
LOG.info(_LI("find_pool: Calling central's find_pool."))
return self.client.call(context, 'find_pool', criterion=criterion)
def get_pool(self, context, pool_id):
LOG.info(_LI("get_pool: Calling central's get_pool."))
return self.client.call(context, 'get_pool', pool_id=pool_id)
def update_pool(self, context, pool):
LOG.info(_LI("update_pool: Calling central's update_pool."))
return self.client.call(context, 'update_pool', pool=pool)
def delete_pool(self, context, pool_id):
LOG.info(_LI("delete_pool: Calling central's delete_pool."))
return self.client.call(context, 'delete_pool', pool_id=pool_id)
# Pool Manager Integration Methods
def update_status(self, context, domain_id, status, serial):
LOG.info(_LI("update_status: Calling central's update_status "
"for %(domain_id)s : %(status)s : %(serial)s") %
{'domain_id': domain_id, 'status': status, 'serial': serial})
self.client.cast(context, 'update_status', domain_id=domain_id,
status=status, serial=serial)
# Zone Ownership Transfers
def create_zone_transfer_request(self, context, zone_transfer_request):
LOG.info(_LI("create_zone_transfer_request: \
Calling central's create_zone_transfer_request."))
return self.client.call(
context, 'create_zone_transfer_request',
zone_transfer_request=zone_transfer_request)
def get_zone_transfer_request(self, context, zone_transfer_request_id):
LOG.info(_LI("get_zone_transfer_request: \
Calling central's get_zone_transfer_request."))
return self.client.call(
context,
'get_zone_transfer_request',
zone_transfer_request_id=zone_transfer_request_id)
def find_zone_transfer_requests(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
LOG.info(_LI("find_zone_transfer_requests: \
Calling central's find_zone_transfer_requests."))
return self.client.call(
context, 'find_zone_transfer_requests', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir)
def find_zone_transfer_request(self, context, zone_transfer_request):
LOG.info(_LI("find_zone_transfer_request: \
Calling central's find_zone_transfer_request."))
return self.client.call(
context, 'find_zone_transfer_request',
zone_transfer_request=zone_transfer_request)
def update_zone_transfer_request(self, context, zone_transfer_request):
LOG.info(_LI("update_zone_transfer_request: \
Calling central's update_zone_transfer_request."))
return self.client.call(
context, 'update_zone_transfer_request',
zone_transfer_request=zone_transfer_request)
def delete_zone_transfer_request(self, context, zone_transfer_request_id):
LOG.info(_LI("delete_zone_transfer_request: \
Calling central's delete_zone_transfer_request."))
return self.client.call(
context,
'delete_zone_transfer_request',
zone_transfer_request_id=zone_transfer_request_id)
def create_zone_transfer_accept(self, context, zone_transfer_accept):
LOG.info(_LI("create_zone_transfer_accept: \
Calling central's create_zone_transfer_accept."))
return self.client.call(
context, 'create_zone_transfer_accept',
zone_transfer_accept=zone_transfer_accept)
def get_zone_transfer_accept(self, context, zone_transfer_accept_id):
LOG.info(_LI("get_zone_transfer_accept: \
Calling central's get_zone_transfer_accept."))
return self.client.call(
context,
'get_zone_transfer_accept',
zone_transfer_accept_id=zone_transfer_accept_id)
def find_zone_transfer_accepts(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
LOG.info(_LI("find_zone_transfer_accepts: \
Calling central's find_zone_transfer_accepts."))
return self.client.call(
context, 'find_zone_transfer_accepts', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir)
def find_zone_transfer_accept(self, context, zone_transfer_accept):
LOG.info(_LI("find_zone_transfer_accept: \
Calling central's find_zone_transfer_accept."))
return self.client.call(
context, 'find_zone_transfer_accept',
zone_transfer_accept=zone_transfer_accept)
def update_zone_transfer_accept(self, context, zone_transfer_accept):
LOG.info(_LI("update_zone_transfer_accept: \
Calling central's update_zone_transfer_accept."))
return self.client.call(
context, 'update_zone_transfer_accept',
zone_transfer_accept=zone_transfer_accept)
def delete_zone_transfer_accept(self, context, zone_transfer_accept_id):
LOG.info(_LI("delete_zone_transfer_accept: \
Calling central's delete_zone_transfer_accept."))
return self.client.call(
context,
'delete_zone_transfer_accept',
zone_transfer_accept_id=zone_transfer_accept_id)
def xfr_domain(self, context, domain_id):
LOG.info(_LI("xfr_domain: Calling central's xfr_domain"))
cctxt = self.client.prepare(version='5.2')
return cctxt.call(context, 'xfr_domain', domain_id=domain_id)
# Zone Import Methods
def create_zone_import(self, context, request_body):
LOG.info(_LI("create_zone_import: Calling central's "
"create_zone_import."))
return self.client.call(context, 'create_zone_import',
request_body=request_body)
def find_zone_imports(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
LOG.info(_LI("find_zone_imports: Calling central's "
"find_zone_imports."))
return self.client.call(context, 'find_zone_imports',
criterion=criterion, marker=marker,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def get_zone_import(self, context, zone_import_id):
LOG.info(_LI("get_zone_import: Calling central's get_zone_import."))
return self.client.call(context, 'get_zone_import',
zone_import_id=zone_import_id)
def update_zone_import(self, context, zone_import):
LOG.info(_LI("update_zone_import: Calling central's "
"update_zone_import."))
return self.client.call(context, 'update_zone_import',
zone_import=zone_import)
def delete_zone_import(self, context, zone_import_id):
LOG.info(_LI("delete_zone_import: Calling central's "
"delete_zone_import."))
return self.client.call(context, 'delete_zone_import',
zone_import_id=zone_import_id)
|
|
import os
import sys
import json
import re
import traceback
import logging
import webbrowser
import copy
import pickle
import platform
import subprocess
import time
from PyQt4.QtGui import (QMainWindow, QMessageBox,
QApplication, QFileDialog, QInputDialog,
QLineEdit, QMenu, QDrag, QPainter, QPen,
QPalette, QDesktopServices, QFont,
QPixmap, QFileSystemModel, QHeaderView,
QActionGroup, QDockWidget)
from PyQt4.QtCore import (Qt, pyqtSignature, SIGNAL, QMimeData, QTimer,
QSettings, QCoreApplication, QUrl)
from spyderlib.widgets.internalshell import InternalShell
try: # Support for spyder < 3
from spyderlib.widgets.externalshell.namespacebrowser import \
NamespaceBrowser
except ImportError:
from spyderlib.widgets.variableexplorer.namespacebrowser import \
NamespaceBrowser
from spyderlib.widgets.sourcecode.codeeditor import CodeEditor
from spyderlib.utils.misc import get_error_match
import spykeutils
from spykeutils.plugin.data_provider import DataProvider
from spykeutils.plugin.analysis_plugin import AnalysisPlugin
from spykeutils.progress_indicator import CancelException
from spykeutils import SpykeException
from spykeutils.plot.helper import ProgressIndicatorDialog
from .. import api
from main_ui import Ui_MainWindow
from settings import SettingsWindow
from filter_dock import FilterDock
from filter_dialog import FilterDialog
from filter_group_dialog import FilterGroupDialog
from plugin_editor_dock import PluginEditorDock
import ipython_connection as ipy
from plugin_model import PluginModel
from remote_thread import RemoteThread
logger = logging.getLogger('spykeviewer')
# Monkeypatch variable editor to report error in message box
try: # spyder < 3
from spyderlib.widgets.dicteditor import DictDelegate
except ImportError:
from spyderlib.widgets.variableexplorer.collectionseditor import \
CollectionsDelegate as DictDelegate
_orig_createEditor = DictDelegate.createEditor
def _patched_createEditor(*args, **kwargs):
try:
return _orig_createEditor(*args, **kwargs)
except Exception:
QMessageBox.critical(None, 'Edit item',
'Could not create editor for selected data!')
DictDelegate.createEditor = _patched_createEditor
#noinspection PyCallByClass,PyTypeChecker,PyArgumentList
class MainWindow(QMainWindow, Ui_MainWindow):
""" The main window of Spyke Viewer.
"""
def __init__(self, parent=None, splash=None):
QMainWindow.__init__(self, parent)
api.window = self
self.splash = splash
self.update_splash_screen('Creating user interface....')
QCoreApplication.setOrganizationName('SpykeUtils')
QCoreApplication.setApplicationName('Spyke Viewer')
self.data_path = QDesktopServices.storageLocation(
QDesktopServices.DataLocation)
self.startup_script = os.path.join(self.data_path, 'startup.py')
self.setupUi(self)
self.dir = os.getcwd()
# Threads providing output from remotely started plugins
self.process_threads = {}
self.remote_process_counter = 0
QTimer.singleShot(1000, self.clean_finished_process_threads)
# Lazy load mode menu
self.load_actions = QActionGroup(self)
self.load_actions.setExclusive(True)
self.actionFull_Load.setActionGroup(self.load_actions)
self.actionLazy_Load.setActionGroup(self.load_actions)
self.actionCached_Lazy_Load.setActionGroup(self.load_actions)
# Cascading mode menu
self.cascade_actions = QActionGroup(self)
self.cascade_actions.setExclusive(True)
self.actionFull.setActionGroup(self.cascade_actions)
self.actionLazy.setActionGroup(self.cascade_actions)
# Python console
self.console = None
self.progress = ProgressIndicatorDialog(self)
self.provider_factory = DataProvider
self.selections = []
self.provider = None
self.plugin_paths = []
self.init_python()
# IPython menu option
self.ipy_kernel = None
if ipy.ipython_available:
self.ipyDock = QDockWidget()
self.ipyDock.setObjectName('ipythonDock')
self.ipyDock.setWindowTitle('IPython')
self.addDockWidget(Qt.BottomDockWidgetArea, self.ipyDock)
self.ipyDock.setVisible(False)
self.ipyDock.visibilityChanged.connect(self.on_ipyDock_visibilityChanged)
# Drag and Drop for selections menu
self.menuSelections.setAcceptDrops(True)
self.menuSelections.paintEvent =\
self.on_menuSelections_paint
self.menuSelections.mousePressEvent =\
self.on_menuSelections_mousePressed
self.menuSelections.mouseMoveEvent =\
self.on_menuSelections_mouseMoved
self.menuSelections.dragEnterEvent =\
self.on_menuSelections_dragEnter
self.menuSelections.dragMoveEvent =\
self.on_menuSelections_dragMoved
self.menuSelections.dropEvent =\
self.on_menuSelections_drop
self.seldrag_start_pos = None
self.seldrag_selection = None
self.seldrag_target = None
self.seldrag_target_upper = False
# Filters
settings = QSettings()
if not settings.contains('filterPath'):
data_path = QDesktopServices.storageLocation(
QDesktopServices.DataLocation)
self.filter_path = os.path.join(data_path, 'filters')
else:
self.filter_path = settings.value('filterPath')
filter_types = self.get_filter_types()
self.filterDock = FilterDock(self.filter_path, filter_types,
menu=self.menuFilter, parent=self)
self.filterDock.setObjectName('filterDock')
self.filterDock.current_filter_changed.connect(
self.on_current_filter_changed)
self.filterDock.filters_changed.connect(
self.on_filters_changed)
self.addDockWidget(Qt.RightDockWidgetArea, self.filterDock)
self.show_filter_exceptions = True
# Plugins
self.menuPluginsContext = QMenu(self)
self.menuPluginsContext.addAction(self.actionRunPlugin)
self.menuPluginsContext.addAction(self.actionRemotePlugin)
self.menuPluginsContext.addAction(self.actionConfigurePlugin)
self.menuPluginsContext.addAction(self.actionEditPlugin)
self.menuPluginsContext.addAction(self.actionShowPluginFolder)
# Plugin Editor
self.pluginEditorDock = PluginEditorDock()
self.pluginEditorDock.setObjectName('editorDock')
self.addDockWidget(Qt.RightDockWidgetArea, self.pluginEditorDock)
self.pluginEditorDock.setVisible(False)
self.pluginEditorDock.plugin_saved.connect(self.plugin_saved)
self.pluginEditorDock.file_available.connect(self.on_file_available)
self.consoleDock.edit_script = lambda (path): \
self.pluginEditorDock.add_file(path)
def p(x):
match = get_error_match(unicode(x))
if match:
fname, lnb = match.groups()
self.pluginEditorDock.show_position(fname, int(lnb))
self.connect(self.console, SIGNAL("go_to_error(QString)"), p)
# File navigation
self.file_system_model = QFileSystemModel()
self.file_system_model.setRootPath('')
self.fileTreeView.setModel(self.file_system_model)
self.fileTreeView.setCurrentIndex(
self.file_system_model.index(self.dir))
self.fileTreeView.expand(self.file_system_model.index(self.dir))
self.fileTreeView.setColumnHidden(1, True)
self.fileTreeView.setColumnHidden(2, True)
self.fileTreeView.setColumnHidden(3, True)
self.fileTreeView.header().setResizeMode(QHeaderView.ResizeToContents)
# Docks
self.setCentralWidget(None)
# Finish initialization if we are not a subclass
if type(self) is MainWindow:
self.finish_initialization()
##### Startup ########################################################
def update_splash_screen(self, message):
if not self.splash:
return
self.splash.showMessage(message, Qt.AlignCenter | Qt.AlignBottom)
self.splash.show()
QCoreApplication.processEvents()
def finish_initialization(self):
""" This should to be called at the end of the initialization phase
of the program (e.g. at the end of the ``__init__()`` method of a
domain-specific subclass).
"""
self.update_view_menu()
self.update_splash_screen('Restoring saved state...')
self.restore_state()
self.update_splash_screen('Running startup script...')
self.run_startup_script()
self.set_config_options()
if api.config.load_mode == 1:
self.actionLazy_Load.trigger()
elif api.config.load_mode == 2:
self.actionCached_Lazy_Load.trigger()
else:
self.actionFull_Load.trigger()
if api.config.lazy_cascading:
self.actionLazy.trigger()
else:
self.actionFull.trigger()
self.update_splash_screen('Loading plugins...')
self.reload_plugins()
self.load_plugin_configs()
if api.config.load_selection_on_start:
self.update_splash_screen('Loading previous selection...')
self.load_current_selection()
def get_filter_types(self):
""" Return a list of filter type tuples as required by
:class:`filter_dock.FilterDock. Override in domain-specific
subclass.
"""
return []
def update_view_menu(self):
""" Recreate the "View" menu.
"""
if hasattr(self, 'menuView'):
a = self.menuView.menuAction()
self.mainMenu.removeAction(a)
self.menuView.clear()
self.menuView = self.createPopupMenu()
self.menuView.setTitle('&View')
self.mainMenu.insertMenu(self.menuHelp.menuAction(), self.menuView)
def set_default_plugin_path(self):
""" Set the default plugin path (contains the standard plugins
after installation).
"""
if hasattr(sys, 'frozen'):
module_path = os.path.dirname(sys.executable)
else:
file_path = os.path.abspath(os.path.dirname(__file__))
module_path = os.path.dirname(file_path)
plugin_path = os.path.join(module_path, 'plugins')
if os.path.isdir(plugin_path):
self.plugin_paths.append(plugin_path)
else:
logger.warning('Plugin path "%s" does not exist, no plugin '
'path set!' %
plugin_path)
def restore_state(self):
""" Restore previous state of the GUI and settings from saved
configuration.
"""
settings = QSettings()
if not settings.contains('windowGeometry') or \
not settings.contains('windowState'):
self.set_initial_layout()
else:
self.restoreGeometry(settings.value('windowGeometry'))
self.restoreState(settings.value('windowState'))
if not settings.contains('pluginPaths'):
self.set_default_plugin_path()
else:
paths = settings.value('pluginPaths')
self.plugin_paths = []
if paths is not None:
for p in paths:
if not os.path.isdir(p):
logger.warning('Plugin path "%s" does not exist, '
'removing from configuration...' % p)
else:
self.plugin_paths.append(p)
if not self.plugin_paths:
logger.warning('No plugin paths set! Setting default path...')
self.set_default_plugin_path()
if not settings.contains('selectionPath'):
self.selection_path = os.path.join(self.data_path, 'selections')
else:
self.selection_path = settings.value('selectionPath')
if not settings.contains('dataPath'):
AnalysisPlugin.data_dir = os.path.join(self.data_path, 'data')
else:
AnalysisPlugin.data_dir = settings.value('dataPath')
if not settings.contains('remoteScript') or not os.path.isfile(
settings.value('remoteScript')):
if settings.contains('remoteScript'):
logger.warning('Remote script not found! Reverting to '
'default location...')
if hasattr(sys, 'frozen'):
path = os.path.dirname(sys.executable)
else:
path = os.path.dirname(spykeutils.__file__)
path = os.path.join(os.path.abspath(path), 'plugin')
self.remote_script = os.path.join(path, 'startplugin.py')
else:
self.remote_script = settings.value('remoteScript')
if self.plugin_paths:
self.pluginEditorDock.set_default_path(self.plugin_paths[-1])
def set_initial_layout(self):
""" Set an initial layout for the docks (when no previous
configuration could be loaded).
"""
self.filesDock.setMinimumSize(100, 100)
self.resize(800, 750)
self.removeDockWidget(self.filesDock)
self.removeDockWidget(self.filterDock)
self.removeDockWidget(self.pluginDock)
self.addDockWidget(Qt.RightDockWidgetArea, self.filesDock)
self.addDockWidget(Qt.RightDockWidgetArea, self.filterDock)
self.addDockWidget(Qt.RightDockWidgetArea, self.pluginDock)
self.tabifyDockWidget(self.filterDock, self.pluginDock)
self.filesDock.setVisible(True)
self.filterDock.setVisible(True)
self.pluginDock.setVisible(True)
self.consoleDock.setVisible(False)
self.variableExplorerDock.setVisible(False)
self.historyDock.setVisible(False)
self.tabifyDockWidget(self.consoleDock, self.variableExplorerDock)
self.tabifyDockWidget(self.variableExplorerDock, self.historyDock)
def run_startup_script(self):
""" Run the startup script that can be used for configuration.
"""
if not os.path.isfile(self.startup_script):
content = ('# Startup script for Spyke Viewer\n'
'import spykeviewer.api as spyke')
try:
path = os.path.dirname(self.startup_script)
if not os.path.isdir(path):
os.makedirs(path)
with open(self.startup_script, 'w') as f:
f.write(content)
except:
logger.warning('Could not create startup script ' +
self.startup_script + ':\n' +
traceback.format_exc() + '\n')
return
try:
with open(self.startup_script, 'r') as f:
# We turn all encodings to UTF-8, so remove encoding
# comments manually
lines = f.readlines()
if lines:
if re.findall('coding[:=]\s*([-\w.]+)', lines[0]):
lines.pop(0)
elif re.findall('coding[:=]\s*([-\w.]+)', lines[1]):
lines.pop(1)
source = ''.join(lines).decode('utf-8')
code = compile(source, self.startup_script, 'exec')
exec(code, {})
except Exception:
logger.warning('Error during execution of startup script ' +
self.startup_script + ':\n' +
traceback.format_exc() + '\n')
def set_config_options(self):
self.console.set_codecompletion_enter(
api.config.codecomplete_console_enter)
self.pluginEditorDock.enter_completion = \
api.config.codecomplete_editor_enter
##### Interactive Python #############################################
def get_console_objects(self):
""" Return a dictionary of objects that should be included in the
console on startup. These objects will also not be displayed in
variable explorer. Override this function in domain-specific
subclasses, e.g. for imports.
"""
import numpy
import scipy
import matplotlib.pyplot as plt
import guiqwt.pyplot as guiplt
plt.ion()
guiplt.ion()
return {'np': numpy, 'sp': scipy, 'plt': plt, 'guiplt': guiplt,
'spyke': api}
def init_python(self):
""" Initialize the Python docks: console, history and variable
explorer.
"""
class StreamDuplicator():
def __init__(self, out_list):
self.outs = out_list
def write(self, s):
for o in self.outs:
o.write(s)
def flush(self):
for o in self.outs:
if hasattr(o, 'flush'):
o.flush()
def set_parent(self, _): # Called when connecting IPython 0.13
pass
# Fixing bugs in the internal shell
class FixedInternalShell(InternalShell):
def __init__(self, *args, **kwargs):
super(FixedInternalShell, self).__init__(*args, **kwargs)
# Do not try to show a completion list when completions is None
def show_completion_list(self, completions, completion_text="",
automatic=True):
if completions is None:
return
super(FixedInternalShell, self).show_completion_list(
completions, completion_text, automatic)
# Do not get dir() for non-text objects
def get_dir(self, objtxt):
if not isinstance(objtxt, (str, unicode)):
return
return super(FixedInternalShell, self).get_dir(objtxt)
# Fix exception when using non-ascii characters
def run_command(self, cmd, history=True, new_prompt=True):
"""Run command in interpreter"""
if not cmd:
cmd = ''
else:
if history:
self.add_to_history(cmd)
cmd_line = cmd + '\n'
self.interpreter.stdin_write.write(cmd_line.encode('utf-8'))
if not self.multithreaded:
self.interpreter.run_line()
self.emit(SIGNAL("refresh()"))
# Console
msg = ('current and selections can be used to access selected data'
'\n\nModules imported at startup: ')
ns = self.get_console_objects()
excludes = ['execfile', 'guiplt', 'help', 'raw_input', 'runfile']
first_item = True
for n, o in ns.iteritems():
if type(o) == type(sys):
if not first_item:
msg += ', '
first_item = False
msg += o.__name__
if n != o.__name__:
msg += ' as ' + n
excludes.append(n)
ns['current'] = self.provider
ns['selections'] = self.selections
font = QFont("Monospace")
font.setStyleHint(font.TypeWriter, font.PreferDefault)
if not platform.system() == 'Darwin':
font.setPointSize(9)
self.console = FixedInternalShell(
self.consoleDock, namespace=ns, multithreaded=False,
message=msg, max_line_count=10000, font=font)
#self.console.clear_terminal()
self.console.set_codecompletion_auto(True)
self.console.set_calltips(True)
try:
self.console.setup_calltips(size=600, font=font)
except AttributeError: # Not needed for spyderlib >= 2.3.0
pass
self.console.setup_completion(size=(370, 240), font=font)
self.consoleDock.setWidget(self.console)
# Variable browser
self.browser = NamespaceBrowser(self.variableExplorerDock)
self.browser.set_shellwidget(self.console)
self.browser.setup(
check_all=True, exclude_private=True,
exclude_uppercase=False, exclude_capitalized=False,
exclude_unsupported=False, truncate=False, minmax=False,
collvalue=False, remote_editing=False, inplace=False,
autorefresh=False,
excluded_names=excludes)
self.variableExplorerDock.setWidget(self.browser)
# History
self.history = CodeEditor(self.historyDock)
self.history.setup_editor(linenumbers=False, language='py',
scrollflagarea=False)
self.history.setReadOnly(True)
self.history.set_text('\n'.join(self.console.history))
self.history.set_cursor_position('eof')
self.historyDock.setWidget(self.history)
self.console.connect(self.console, SIGNAL("refresh()"),
self._append_python_history)
# Duplicate stdout and stderr for console
# Not using previous stdout, only stderr. Using StreamDuplicator
# because spyder stream does not have flush() method...
sys.stdout = StreamDuplicator([sys.stdout])
sys.stderr = StreamDuplicator([sys.stderr, sys.__stderr__])
# Set root logging handler to print all log warnings in console
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
root_logger = logging.getLogger()
root_logger.addHandler(ch)
def _append_python_history(self):
self.browser.refresh_table()
try:
self.history.append('\n' + self.console.history[-1])
except IndexError:
pass # Empty history, not a problem
self.history.set_cursor_position('eof')
def on_ipyDock_visibilityChanged(self, visible):
if visible and not self.ipyDock.widget():
self.create_ipython_kernel()
widget = self.ipy_kernel.get_widget()
self.ipyDock.setWidget(widget)
def create_ipython_kernel(self):
""" Create a new IPython kernel. Does nothing if a kernel already
exists.
"""
if not ipy.ipython_available or self.ipy_kernel:
return
self.ipy_kernel = ipy.IPythonConnection()
self.ipy_kernel.push({'current': self.provider,
'selections': self.selections})
def on_variableExplorerDock_visibilityChanged(self, visible):
if visible:
self.browser.refresh_table()
def on_historyDock_visibilityChanged(self, visible):
if visible:
self.history.set_cursor_position('eof')
##### Selections #####################################################
def on_menuSelections_mousePressed(self, event):
if event.button() == Qt.LeftButton:
action = self.menuSelections.actionAt(event.pos())
if action:
selection = action.data()
if selection:
self.seldrag_start_pos = event.pos()
self.seldrag_selection = selection
else:
self.seldrag_start_pos = None
self.seldrag_selection = None
self.seldrag_target = None
QMenu.mousePressEvent(self.menuSelections, event)
def on_menuSelections_mouseMoved(self, event):
if event.buttons() & Qt.LeftButton and self.seldrag_start_pos:
if ((event.pos() - self.seldrag_start_pos).manhattanLength() >=
QApplication.startDragDistance()):
drag = QDrag(self.menuSelections)
data = QMimeData()
data.setText(self.seldrag_selection.name)
drag.setMimeData(data)
drag.exec_()
self.seldrag_start_pos = None
self.seldrag_selection = None
self.seldrag_target = None
QMenu.mouseMoveEvent(self.menuSelections, event)
def on_menuSelections_paint(self, event):
QMenu.paintEvent(self.menuSelections, event)
if self.seldrag_target:
# Paint line where selection will be dropped
p = QPainter()
color = QPalette().color(self.menuSelections.foregroundRole())
pen = QPen(color, 2, Qt.SolidLine)
p.begin(self.menuSelections)
p.setPen(pen)
rect = self.menuSelections.actionGeometry(self.seldrag_target)
if self.seldrag_target_upper:
p.drawLine(rect.topLeft(), rect.topRight())
else:
p.drawLine(rect.bottomLeft(), rect.bottomRight())
p.end()
def _menuSelections_pos_is_drop_target(self, pos):
""" Return if selection can be dropped at this position and
prepare information needed for drawing and dropping
"""
action = self.menuSelections.actionAt(pos)
if not action or not action.data():
self.seldrag_target = None
return False
self.seldrag_target = action
rect = self.menuSelections.actionGeometry(action)
if pos.y() < rect.top() + rect.height() / 2:
self.seldrag_target_upper = True
else:
self.seldrag_target_upper = False
return True
def on_menuSelections_dragEnter(self, event):
event.setDropAction(Qt.MoveAction)
if self._menuSelections_pos_is_drop_target(event.pos()):
event.accept()
else:
event.ignore()
QMenu.dragEnterEvent(self.menuSelections, event)
def on_menuSelections_dragMoved(self, event):
event.setDropAction(Qt.MoveAction)
if self._menuSelections_pos_is_drop_target(event.pos()):
event.accept()
self.menuSelections.update()
else:
event.ignore()
QMenu.dragMoveEvent(self.menuSelections, event)
def on_menuSelections_drop(self, event):
source = self.seldrag_selection
target = self.seldrag_target.data()
if source != target:
self.selections.remove(source)
target_index = self.selections.index(target)
if not self.seldrag_target_upper:
target_index += 1
self.selections.insert(target_index, source)
self.populate_selection_menu()
QMenu.dropEvent(self.menuSelections, event)
def populate_selection_menu(self):
self.menuSelections.clear()
a = self.menuSelections.addAction('New')
a.triggered.connect(self.on_selection_new)
a = self.menuSelections.addAction('Clear')
a.triggered.connect(self.on_selection_clear)
self.menuSelections.addSeparator()
for i, s in enumerate(self.selections):
m = self.menuSelections.addMenu(s.name)
m.menuAction().setData(s)
a = m.addAction('Load')
self.connect(a, SIGNAL('triggered()'),
lambda sel=s: self.on_selection_load(sel))
a = m.addAction('Save')
self.connect(a, SIGNAL('triggered()'),
lambda sel=s: self.on_selection_save(sel))
a = m.addAction('Rename')
self.connect(a, SIGNAL('triggered()'),
lambda sel=s: self.on_selection_rename(sel))
a = m.addAction('Remove')
self.connect(a, SIGNAL('triggered()'),
lambda sel=s: self.on_selection_remove(sel))
def on_selection_load(self, selection):
self.set_current_selection(selection.data_dict())
def on_selection_save(self, selection):
i = self.selections.index(selection)
self.selections[i] = self.provider_factory(
self.selections[i].name, self)
self.populate_selection_menu()
def on_selection_clear(self):
if QMessageBox.question(
self, 'Please confirm',
'Do you really want to remove all selections?',
QMessageBox.Yes | QMessageBox.No) == QMessageBox.No:
return
del self.selections[:]
self.populate_selection_menu()
def on_selection_rename(self, selection):
(name, ok) = QInputDialog.getText(
self, 'Edit selection name',
'New name:', QLineEdit.Normal, selection.name)
if ok and name:
selection.name = name
self.populate_selection_menu()
def on_selection_remove(self, selection):
if QMessageBox.question(
self, 'Please confirm',
'Do you really want to remove the selection "%s"?' %
selection.name,
QMessageBox.Yes | QMessageBox.No) == QMessageBox.No:
return
self.selections.remove(selection)
self.populate_selection_menu()
def on_selection_new(self):
self.selections.append(self.provider_factory(
'Selection %d' % (len(self.selections) + 1), self))
self.populate_selection_menu()
def serialize_selections(self):
sl = list() # Selection list, current selection as first item
sl.append(self.provider_factory('__current__', self).data_dict())
for s in self.selections:
sl.append(s.data_dict())
return json.dumps(sl, sort_keys=True, indent=2)
def save_selections_to_file(self, filename):
f = open(filename, 'w')
f.write(self.serialize_selections())
f.close()
def load_selections_from_file(self, filename):
try:
f = open(filename, 'r')
p = json.load(f)
f.close()
for s in p:
if not s:
continue
if s['name'] == '__current__':
self.set_current_selection(s)
else:
self.add_selection(s)
except Exception, e:
QMessageBox.critical(self, 'Error loading selection',
str(e).decode('utf8'))
logger.warning('Error loading selection:\n' +
traceback.format_exc() + '\n')
finally:
self.progress.done()
self.populate_selection_menu()
def load_current_selection(self):
""" Load the displayed (current) selection from a file.
"""
current_selection = os.path.join(
self.selection_path, '.current.sel')
if os.path.isfile(current_selection):
self.load_selections_from_file(current_selection)
else:
self.populate_selection_menu()
@pyqtSignature("")
def on_actionSave_selection_triggered(self):
d = QFileDialog(self, 'Choose where to save selection',
self.selection_path)
d.setAcceptMode(QFileDialog.AcceptSave)
d.setNameFilter("Selection files (*.sel)")
d.setDefaultSuffix('sel')
if d.exec_():
filename = str(d.selectedFiles()[0])
else:
return
self.save_selections_to_file(filename)
@pyqtSignature("")
def on_actionLoad_selection_triggered(self):
d = QFileDialog(self, 'Choose selection file',
self.selection_path)
d.setAcceptMode(QFileDialog.AcceptOpen)
d.setFileMode(QFileDialog.ExistingFile)
d.setNameFilter("Selection files (*.sel)")
if d.exec_():
filename = unicode(d.selectedFiles()[0])
else:
return
self.load_selections_from_file(filename)
def set_current_selection(self, data):
""" Set the current selection based on a dictionary of selection
data. Override in domain-specific subclasses.
"""
raise NotImplementedError('No selection model defined!')
def add_selection(self, data):
""" Add a selection based on a dictionary of selection data.
Override in domain-specific subclasses.
"""
raise NotImplementedError('No selection model defined!')
##### Filters ########################################################
def on_current_filter_changed(self):
enabled = self.filterDock.current_is_data_item()
self.actionEditFilter.setEnabled(enabled)
self.actionDeleteFilter.setEnabled(enabled)
self.actionCopyFilter.setEnabled(enabled)
def on_filters_changed(self, filter_type):
self.filter_populate_function[filter_type]()
def editFilter(self, copy_item):
top = self.filterDock.current_filter_type()
group = self.filterDock.current_filter_group()
name = self.filterDock.current_name()
item = self.filterDock.current_item()
group_filters = None
if not self.filterDock.is_current_group():
dialog = FilterDialog(
self.filterDock.filter_group_dict(), top, group, name,
item.code, item.combined, item.on_exception, self)
else:
group_filters = self.filterDock.group_filters(top, name)
group = None
dialog = FilterGroupDialog(top, name, item.exclusive, self)
while dialog.exec_():
if copy_item and name == dialog.name():
QMessageBox.critical(
self, 'Error saving',
'Please select a different name for the copied element')
continue
try:
if not copy_item and name != dialog.name():
self.filterDock.delete_item(top, name, group)
if not self.filterDock.is_current_group():
self.filterDock.add_filter(
dialog.name(), dialog.group(), dialog.type(),
dialog.code(), dialog.on_exception(),
dialog.combined(), overwrite=True)
else:
self.filterDock.add_filter_group(
dialog.name(), dialog.type(), dialog.exclusive(),
copy.deepcopy(group_filters), overwrite=True)
break
except ValueError as e:
QMessageBox.critical(self, 'Error saving', str(e))
def get_active_filters(self, filter_type):
""" Return a list of active filters for the selected filter type
"""
return self.filterDock.get_active_filters(filter_type)
def is_filtered(self, item, filters):
""" Return if one of the filter functions in the given list
applies to the given item. Combined filters are ignored.
"""
for f, n in filters:
if f.combined or not f.active:
continue
try:
if not f.function()(item):
return True
except Exception, e:
if self.show_filter_exceptions:
sys.stderr.write(
'Exception in filter ' + n + ':\n' + str(e) + '\n')
if not f.on_exception:
return True
return False
def filter_list(self, items, filters):
""" Return a filtered list of the given list with the given filter
functions. Only combined filters are used.
"""
if not items:
return items
item_type = type(items[0])
for f, n in filters:
if not f.combined or not f.active:
continue
try:
items = [i for i in f.function()(items)
if isinstance(i, item_type)]
except Exception, e:
if self.show_filter_exceptions:
sys.stderr.write(
'Exception in filter ' + n + ':\n' + str(e) + '\n')
if not f.on_exception:
return []
return items
def refresh_filters(self):
""" Refresh the list of possible filters. Call if filters are changed
programmatically.
"""
self.filterDock.populate_filter_tree()
@pyqtSignature("")
def on_actionNewFilterGroup_triggered(self):
top = self.filterDock.current_filter_type()
dialog = FilterGroupDialog(top, parent=self)
while dialog.exec_():
try:
self.filterDock.add_filter_group(dialog.name(), dialog.type(),
dialog.exclusive())
break
except ValueError as e:
QMessageBox.critical(self, 'Error creating group', str(e))
@pyqtSignature("")
def on_actionNewFilter_triggered(self):
top = self.filterDock.current_filter_type()
group = self.filterDock.current_filter_group()
dialog = FilterDialog(self.filterDock.filter_group_dict(), type=top,
group=group, parent=self)
while dialog.exec_():
try:
self.filterDock.add_filter(dialog.name(), dialog.group(),
dialog.type(), dialog.code(),
dialog.on_exception(),
dialog.combined())
break
except ValueError as e:
QMessageBox.critical(self, 'Error creating filter', str(e))
@pyqtSignature("")
def on_actionDeleteFilter_triggered(self):
self.filterDock.delete_current_filter()
@pyqtSignature("")
def on_actionEditFilter_triggered(self):
self.editFilter(False)
@pyqtSignature("")
def on_actionCopyFilter_triggered(self):
self.editFilter(True)
##### Plugins ########################################################
def get_plugin_configs(self):
""" Return dictionary indexed by (name,path) tuples with configuration
dictionaries for all plugins.
"""
indices = self.plugin_model.get_all_indices()
c = {}
for idx in indices:
path = self.plugin_model.data(
idx, self.plugin_model.FilePathRole)
plug = self.plugin_model.data(idx, self.plugin_model.DataRole)
if plug:
c[(plug.get_name(), path)] = plug.get_parameters()
return c
def set_plugin_configs(self, configs):
""" Takes a dictionary indexed by plugin name with configuration
dictionaries for plugins and sets configurations of plugins.
"""
indices = self.plugin_model.get_all_indices()
d = {}
for idx in indices:
path = self.plugin_model.data(
idx, self.plugin_model.FilePathRole)
plug = self.plugin_model.data(idx, self.plugin_model.DataRole)
if plug:
d[(plug.get_name(), path)] = plug
for n, c in configs.iteritems():
if n in d:
d[n].set_parameters(c)
def reload_plugins(self, keep_configs=True):
""" Reloads all plugins.
:param bool keep_configs: If ``True``, try to restore all plugin
configuration parameters after reloading.
Default: ``True``
"""
old_closed = self._get_closed_folders()
old_path = None
old_configs = {}
if hasattr(self, 'plugin_model'):
if keep_configs:
old_configs = self.get_plugin_configs()
item = self.pluginsTreeView.currentIndex()
if item:
old_path = self.plugin_model.data(
item, self.plugin_model.FilePathRole)
try:
self.plugin_model = PluginModel()
for p in self.plugin_paths:
self.plugin_model.add_path(p)
except Exception, e:
QMessageBox.critical(self, 'Error loading plugins', str(e))
return
self.pluginsTreeView.setModel(self.plugin_model)
selected_index = None
if old_path:
indices = self.plugin_model.get_indices_for_path(old_path)
if indices:
selected_index = indices[0]
self.pluginsTreeView.setCurrentIndex(selected_index)
self.pluginsTreeView.expandAll()
self.pluginsTreeView.selectionModel().currentChanged.connect(
self.selected_plugin_changed)
self.selected_plugin_changed(selected_index)
self.set_plugin_configs(old_configs)
self.restore_closed_plugin_folders(old_closed)
def _equal_path(self, index, path):
path_list = list(reversed(path.split('/')))
while index.row() >= 0:
if not path_list or index.data() != path_list.pop(0):
return False
index = index.parent()
return True
def restore_closed_plugin_folders(self, paths):
if paths is not None:
folders = self.plugin_model.get_all_folders()
for p in paths:
for f in folders:
if self._equal_path(f, p):
self.pluginsTreeView.setExpanded(f, False)
break
def load_plugin_configs(self, closed_folders=None):
# Restore closed plugin folders
paths = closed_folders
if paths is None:
settings = QSettings()
if settings.contains('closedPluginFolders'):
paths = settings.value('closedPluginFolders')
self.restore_closed_plugin_folders(paths)
# Restore plugin configurations
configs_path = os.path.join(self.data_path, 'plugin_configs.p')
if os.path.isfile(configs_path):
with open(configs_path, 'r') as f:
try:
configs = pickle.load(f)
self.set_plugin_configs(configs)
except:
pass # It does not matter if we can't load plugin configs
def selected_plugin_changed(self, current):
enabled = True
if not current:
enabled = False
elif not self.plugin_model.data(current, Qt.UserRole):
enabled = False
self.actionRunPlugin.setEnabled(enabled)
self.actionEditPlugin.setEnabled(enabled)
self.actionConfigurePlugin.setEnabled(enabled)
self.actionRemotePlugin.setEnabled(enabled)
self.actionShowPluginFolder.setEnabled(enabled)
@pyqtSignature("")
def on_actionRunPlugin_triggered(self):
plugin = self._save_plugin_before_run()
if not plugin:
return
self._run_plugin(plugin)
def _save_plugin_before_run(self):
ana = self.current_plugin()
if not ana:
return None
if api.config.save_plugin_before_starting:
e = self.pluginEditorDock.get_editor(ana.source_file)
if self.pluginEditorDock.file_was_changed(e):
if not self.pluginEditorDock.save_file(e):
return None
ana = self.current_plugin()
if not ana:
return None
return ana
def _run_plugin(self, plugin, current=None, selections=None,
finish_progress=True):
if current is None:
current = self.provider
if selections is None:
selections = self.selections
try:
return plugin.start(current, selections)
except SpykeException, err:
QMessageBox.critical(self, 'Error executing plugin', str(err))
except CancelException:
return None
except Exception, e:
# Only print stack trace from plugin on
tb = sys.exc_info()[2]
while not ('self' in tb.tb_frame.f_locals and
tb.tb_frame.f_locals['self'] == plugin):
if tb.tb_next is not None:
tb = tb.tb_next
else:
break
traceback.print_exception(type(e), e, tb)
return None
finally:
if finish_progress:
self.progress.done()
@pyqtSignature("")
def on_actionEditPlugin_triggered(self):
item = self.pluginsTreeView.currentIndex()
path = ''
if item:
path = self.plugin_model.data(
item, self.plugin_model.FilePathRole)
if not path and self.plugin_paths:
path = self.plugin_paths[0]
self.pluginEditorDock.add_file(path)
@pyqtSignature("")
def on_actionLoad_Python_File_triggered(self):
path = ''
if self.plugin_paths:
path = self.plugin_paths[-1]
d = QFileDialog(self, 'Choose file to edit', path)
d.setAcceptMode(QFileDialog.AcceptOpen)
d.setFileMode(QFileDialog.ExistingFiles)
d.setNameFilter("Python files (*.py)")
if not d.exec_():
return
for p in d.selectedFiles():
self.pluginEditorDock.add_file(unicode(p))
@pyqtSignature("")
def on_actionConfigurePlugin_triggered(self):
ana = self.current_plugin()
if not ana:
return
ana.configure()
@pyqtSignature("")
def on_actionRefreshPlugins_triggered(self):
self.reload_plugins()
@pyqtSignature("")
def on_actionNewPlugin_triggered(self):
self.pluginEditorDock.new_file()
@pyqtSignature("")
def on_actionSavePlugin_triggered(self):
self.pluginEditorDock.save_current()
@pyqtSignature("")
def on_actionSavePluginAs_triggered(self):
self.pluginEditorDock.save_current(True)
@pyqtSignature("")
def on_actionShowPluginFolder_triggered(self):
QDesktopServices.openUrl(QUrl.fromLocalFile(
os.path.dirname(self.current_plugin_path())))
@pyqtSignature("")
def on_actionRemotePlugin_triggered(self):
plugin = self._save_plugin_before_run()
if not plugin:
return
self._execute_remote_plugin(plugin)
def send_plugin_info(self, name, path, selections, config, io_files):
""" Send information to start a plugin to the configured remote
script.
:param str name: Name of the plugin class
:param str path: Path of the plugin file
:param str selections: Serialized selections to use
:param str config: Pickled plugin configuration
:param list io_files: List of paths to required IO plugins.
"""
# Save files to circumvent length limit for command line
selection_path = os.path.join(
self.selection_path, '.temp_%f_.sel' % time.time())
with open(selection_path, 'w') as f:
f.write(selections)
params = ['python', self.remote_script,
name, path, selection_path, '-cf', '-sf',
'-c', config, '-dd', AnalysisPlugin.data_dir]
if io_files:
params.append('-io')
params.extend(io_files)
params.extend(api.config.remote_script_parameters)
p = subprocess.Popen(
params, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
std = RemoteThread(p, self.remote_process_counter, False, self)
err = RemoteThread(p, self.remote_process_counter, True, self)
self.connect(std, SIGNAL("output(int, QString)"), self.output_std)
self.connect(err, SIGNAL("output(int, QString)"), self.output_err)
self.connect(err, SIGNAL("execution_complete(int)"),
self.remote_plugin_done)
std.start()
err.start()
self.process_threads[self.remote_process_counter] = (std, err, p)
print '[#%d started]' % self.remote_process_counter
self.remote_process_counter += 1
def output_std(self, id_, line):
print '[#%d]' % id_, line
def output_err(self, id_, line):
sys.stderr.write(line + '\n')
def remote_plugin_done(self, id_):
print '[#%d done]' % id_
def clean_finished_process_threads(self):
""" Periodically checks if threads for remote plugin output are
still running, removes them otherwise.
"""
for k in self.process_threads.keys():
t = self.process_threads[k]
if not t[0].isRunning() and not t[1].isRunning():
t[2].wait()
del self.process_threads[k]
QTimer.singleShot(1000, self.clean_finished_process_threads)
@pyqtSignature("")
def on_actionEdit_Startup_Script_triggered(self):
self.pluginEditorDock.add_file(self.startup_script)
@pyqtSignature("")
def on_actionRestorePluginConfigurations_triggered(self):
self.reload_plugins(False)
def on_pluginsTreeView_doubleClicked(self, index):
self.on_actionRunPlugin_triggered()
def on_pluginsTreeView_customContextMenuRequested(self, pos):
self.menuPluginsContext.popup(self.pluginsTreeView.mapToGlobal(pos))
def plugin_saved(self, path):
if path == self.startup_script:
return
plugin_path = os.path.normpath(os.path.realpath(path))
in_dirs = False
for p in self.plugin_paths:
directory = os.path.normpath(os.path.realpath(p))
if os.path.commonprefix([plugin_path, directory]) == directory:
in_dirs = True
break
if in_dirs:
self.reload_plugins()
elif api.config.ask_plugin_path:
if QMessageBox.question(self, 'Warning',
'The file "%s"' % plugin_path +
' is not in the currently valid plugin '
'directories. Do you want to open the '
'directory'
'settings now?',
QMessageBox.Yes | QMessageBox.No) == \
QMessageBox.No:
return
self.on_actionSettings_triggered()
def current_plugin(self):
""" Return the currently selected plugin object
"""
item = self.pluginsTreeView.currentIndex()
if not item:
return None
return self.plugin_model.data(item, self.plugin_model.DataRole)
def current_plugin_path(self):
""" Return the path of the file from which the currently selected
plugin has been loaded.
"""
item = self.pluginsTreeView.currentIndex()
if not item:
return None
return self.plugin_model.data(item, self.plugin_model.FilePathRole)
def get_plugin(self, name):
""" Get plugin with the given name. Raises a SpykeException if
multiple plugins with this name exist. Returns None if no such
plugin exists.
"""
plugins = self.plugin_model.get_plugins_for_name(name)
if not plugins:
return None
if len(plugins) > 1:
raise SpykeException('Multiple plugins named "%s" exist!' % name)
return plugins[0]
def start_plugin(self, name, current=None, selections=None,
finish_progress=True):
""" Start first plugin with given name and return result of start()
method. Raises a SpykeException if not exactly one plugins with
this name exist.
:param str name: Name of the plugin (as specified by the get_name()
method in the plugin.
:param current: A DataProvider to use as current selection. If
``None``, the regular current selection from the GUI is used.
Default: ``None``
:param list selections: A list of DataProvider objects to use as
selections. If ``None``, the regular selections from the GUI
are used. Default: ``None``
:param bool finish_progress: If ``True``, progress indicators are
closed automatically after the plugin finishes.
"""
plugins = self.plugin_model.get_plugins_for_name(name)
if not plugins:
raise SpykeException('No plugin named "%s" exists!' % name)
if len(plugins) > 1:
raise SpykeException('Multiple plugins named "%s" exist!' % name)
if api.config.save_plugin_before_starting:
e = self.pluginEditorDock.get_editor(plugins[0].source_file)
if self.pluginEditorDock.file_was_changed(e):
if not self.pluginEditorDock.save_file(e):
return
plugins = self.plugin_model.get_plugins_for_name(name)
if not plugins:
return None
if len(plugins) > 1:
raise SpykeException(
'Multiple plugins named "%s" exist!' % name)
return self._run_plugin(plugins[0], current, selections,
finish_progress)
def start_plugin_remote(self, plugin, current=None, selections=None):
""" Start given plugin (or plugin with given name) remotely.
Does not return any value. Raises a SpykeException if not
exactly one plugins with this name exist.
"""
if not isinstance(plugin, AnalysisPlugin):
plugins = self.plugin_model.get_plugins_for_name(plugin)
if not plugins:
raise SpykeException('No plugin named "%s" exists!' % plugin)
if len(plugins) > 1:
raise SpykeException(
'Multiple plugins named "%s" exist!' % plugin)
plugin = plugins[0]
self._execute_remote_plugin(plugin, current, selections)
def on_file_available(self, available):
""" Callback when availability of a file for a plugin changes.
"""
self.actionSavePlugin.setEnabled(available)
self.actionSavePluginAs.setEnabled(available)
##### General housekeeping ###########################################
@pyqtSignature("")
def on_actionSettings_triggered(self):
settings = SettingsWindow(self.selection_path, self.filter_path,
AnalysisPlugin.data_dir, self.remote_script,
self.plugin_paths, self)
if settings.exec_() == settings.Accepted:
try:
self.clean_temporary_selection_files(self.selection_path)
except:
pass # Does not matter if e.g. old directory does not exist
self.selection_path = settings.selection_path()
self.filter_path = settings.filter_path()
self.remote_script = settings.remote_script()
AnalysisPlugin.data_dir = settings.data_path()
self.plugin_paths = settings.plugin_paths()
if self.plugin_paths:
self.pluginEditorDock.set_default_path(self.plugin_paths[-1])
self.reload_plugins()
@pyqtSignature("")
def on_actionExit_triggered(self):
self.close()
@pyqtSignature("")
def on_actionAbout_triggered(self):
from .. import __version__
about = QMessageBox(self)
about.setWindowTitle(u'About Spyke Viewer')
about.setTextFormat(Qt.RichText)
about.setIconPixmap(QPixmap(':/Application/Main'))
about.setText(
u'Version ' + __version__ +
u'<br><br>Spyke Viewer is an application for navigating, '
u'analyzing and visualizing electrophysiological datasets.<br>'
u'<br><a href=http://www.ni.tu-berlin.de/software/spykeviewer>'
u'www.ni.tu-berlin.de/software/spykeviewer</a>'
u'<br><br>Copyright 2012, 2013 \xa9 Robert Pr\xf6pper<br>'
u'Neural Information Processing Group<br>'
u'TU Berlin, Germany<br><br>'
u'If you use Spyke Viewer in work that leads to a scientific '
u'publication, please cite:<br>'
u'Pr\xf6pper, R. and Obermayer, K. (2013). Spyke Viewer: a '
u'flexible and extensible platform for electrophysiological data '
u'analysis.<br>'
u'<i>Front. Neuroinform.</i> 7:26. doi: 10.3389/fninf.2013.00026'
u'<br><br>Licensed under the terms of the BSD license.<br>'
u'Icons from the Crystal Project (\xa9 2006-2007 Everaldo Coelho)')
about.show()
@pyqtSignature("")
def on_actionDocumentation_triggered(self):
webbrowser.open('http://spyke-viewer.readthedocs.org')
@pyqtSignature("")
def on_actionSpyke_Repository_triggered(self):
webbrowser.open('http://spyke-viewer.g-node.org')
def _get_closed_folders(self):
if not hasattr(self, 'plugin_model'):
return []
folders = self.plugin_model.get_all_folders()
paths = []
for f in folders:
if self.pluginsTreeView.isExpanded(f):
continue
path = [f.data()]
p = f.parent()
while p.row() >= 0:
path.append(p.data())
p = p.parent()
paths.append('/'.join(reversed(path)))
return paths
def clean_temporary_selection_files(self, path):
""" Remove temporary .temp_..._.sel files from a directory.
These files are written when executing plugins remotely.
"""
for f in os.listdir(path):
if f.startswith('.temp_') and f.endswith('_.sel'):
os.remove(os.path.join(path, f))
def closeEvent(self, event):
""" Saves filters, plugin configs and GUI state.
"""
if not self.pluginEditorDock.close_all():
event.ignore()
return
# Ensure that selection folder exists
if not os.path.exists(self.selection_path):
try:
os.makedirs(self.selection_path)
except OSError:
QMessageBox.critical(
self, 'Error', 'Could not create selection directory!')
self.save_selections_to_file(
os.path.join(self.selection_path, '.current.sel'))
self.clean_temporary_selection_files(self.selection_path)
# Ensure that filters folder exists
if not os.path.exists(self.filter_path):
try:
os.makedirs(self.filter_path)
except OSError:
QMessageBox.critical(self, 'Error',
'Could not create filter directory!')
self.filterDock.save()
# Save GUI configuration (docks and toolbars)
settings = QSettings()
settings.setValue('windowGeometry', self.saveGeometry())
settings.setValue('windowState', self.saveState())
# Save further configurations
settings.setValue('pluginPaths', self.plugin_paths)
settings.setValue('selectionPath', self.selection_path)
settings.setValue('filterPath', self.filter_path)
settings.setValue('remoteScript', self.remote_script)
settings.setValue('dataPath', AnalysisPlugin.data_dir)
# Save plugin configurations
configs = self.get_plugin_configs()
configs_path = os.path.join(self.data_path, 'plugin_configs.p')
with open(configs_path, 'w') as f:
pickle.dump(configs, f)
# Save closed plugin folders
settings.setValue('closedPluginFolders', self._get_closed_folders())
super(MainWindow, self).closeEvent(event)
# Prevent lingering threads
self.fileTreeView.setModel(None)
del self.file_system_model
self.pluginsTreeView.setModel(None)
del self.plugin_model
|
|
""" manage PyTables query interface via Expressions """
import ast
from functools import partial
from typing import Any, Dict, Optional, Tuple
import numpy as np
from pandas._libs.tslibs import Timedelta, Timestamp
from pandas.compat.chainmap import DeepChainMap
from pandas.core.dtypes.common import is_list_like
import pandas as pd
import pandas.core.common as com
from pandas.core.computation import expr, ops, scope as _scope
from pandas.core.computation.common import ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import UndefinedVariableError, is_term
from pandas.core.construction import extract_array
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
class PyTablesScope(_scope.Scope):
__slots__ = ("queryables",)
queryables: Dict[str, Any]
def __init__(
self,
level: int,
global_dict=None,
local_dict=None,
queryables: Optional[Dict[str, Any]] = None,
):
super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict)
self.queryables = queryables or {}
class Term(ops.Term):
env: PyTablesScope
def __new__(cls, name, env, side=None, encoding=None):
if isinstance(name, str):
klass = cls
else:
klass = Constant
return object.__new__(klass)
def __init__(self, name, env: PyTablesScope, side=None, encoding=None):
super().__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == "left":
# Note: The behavior of __new__ ensures that self.name is a str here
if self.name not in self.env.queryables:
raise NameError(f"name {repr(self.name)} is not defined")
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
# read-only property overwriting read/write property
@property # type: ignore[misc]
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env: PyTablesScope, side=None, encoding=None):
assert isinstance(env, PyTablesScope), type(env)
super().__init__(value, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
op: str
queryables: Dict[str, Any]
condition: Optional[str]
def __init__(self, op: str, lhs, rhs, queryables: Dict[str, Any], encoding):
super().__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if isinstance(right, ConditionBinOp):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if isinstance(right, FilterBinOp):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(
self.op, left, right, queryables=self.queryables, encoding=self.encoding
).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
""" inplace conform rhs """
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self) -> bool:
""" return True if this is a valid field """
return self.lhs in self.queryables
@property
def is_in_table(self) -> bool:
"""
return True if this is a valid column name for generation (e.g. an
actual column in the table)
"""
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
""" the kind of my field """
return getattr(self.queryables.get(self.lhs), "kind", None)
@property
def meta(self):
""" the meta of my field """
return getattr(self.queryables.get(self.lhs), "meta", None)
@property
def metadata(self):
""" the metadata of my field """
return getattr(self.queryables.get(self.lhs), "metadata", None)
def generate(self, v) -> str:
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return f"({self.lhs} {self.op} {val})"
def convert_value(self, v) -> "TermValue":
"""
convert the expression that is in the term to something that is
accepted by pytables
"""
def stringify(value):
if self.encoding is not None:
return pprint_thing_encoded(value, encoding=self.encoding)
return pprint_thing(value)
kind = ensure_decoded(self.kind)
meta = ensure_decoded(self.meta)
if kind == "datetime64" or kind == "datetime":
if isinstance(v, (int, float)):
v = stringify(v)
v = ensure_decoded(v)
v = Timestamp(v)
if v.tz is not None:
v = v.tz_convert("UTC")
return TermValue(v, v.value, kind)
elif kind == "timedelta64" or kind == "timedelta":
if isinstance(v, str):
v = Timedelta(v).value
else:
v = Timedelta(v, unit="s").value
return TermValue(int(v), v, kind)
elif meta == "category":
metadata = extract_array(self.metadata, extract_numpy=True)
result = metadata.searchsorted(v, side="left")
# result returns 0 if v is first element or if v is not in metadata
# check that metadata contains v
if not result and v not in metadata:
result = -1
return TermValue(result, result, "integer")
elif kind == "integer":
v = int(float(v))
return TermValue(v, v, kind)
elif kind == "float":
v = float(v)
return TermValue(v, v, kind)
elif kind == "bool":
if isinstance(v, str):
v = not v.strip().lower() in [
"false",
"f",
"no",
"n",
"none",
"0",
"[]",
"{}",
"",
]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, str):
# string quoting
return TermValue(v, stringify(v), "string")
else:
raise TypeError(f"Cannot compare {v} of type {type(v)} to {kind} column")
def convert_values(self):
pass
class FilterBinOp(BinOp):
filter: Optional[Tuple[Any, Any, pd.Index]] = None
def __repr__(self) -> str:
if self.filter is None:
return "Filter: Not Initialized"
return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]")
def invert(self):
""" invert the filter """
if self.filter is not None:
self.filter = (
self.filter[0],
self.generate_filter_op(invert=True),
self.filter[2],
)
return self
def format(self):
""" return the actual filter format """
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
rhs = self.conform(self.rhs)
values = list(rhs)
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ["==", "!="] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, pd.Index(values))
return self
return None
# equality conditions
if self.op in ["==", "!="]:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, pd.Index(values))
else:
raise TypeError(
f"passing a filterable condition to a non-table indexer [{self}]"
)
return self
def generate_filter_op(self, invert: bool = False):
if (self.op == "!=" and not invert) or (self.op == "==" and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __repr__(self) -> str:
return pprint_thing(f"[Condition : [{self.condition}]]")
def invert(self):
""" invert the condition """
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError(
"cannot use an invert condition when passing to numexpr"
)
def format(self):
""" return the actual ne format """
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ["==", "!="]:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = f"({' | '.join(vs)})"
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})"
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != "~":
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None and (
issubclass(klass, ConditionBinOp)
and operand.condition is not None
or not issubclass(klass, ConditionBinOp)
and issubclass(klass, FilterBinOp)
and operand.filter is not None
):
return operand.invert()
return None
class PyTablesExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super().__init__(env, engine, parser)
for bin_op in self.binary_ops:
bin_node = self.binary_op_nodes_map[bin_op]
setattr(
self,
f"visit_{bin_node}",
lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),
)
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp("~", self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError("Unary addition not supported")
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(
ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]
)
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple subscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except AttributeError:
pass
if isinstance(slobj, Term):
# In py39 np.ndarray lookups with Term containing int raise
slobj = slobj.value
try:
return self.const_type(value[slobj], self.env)
except TypeError as err:
raise ValueError(
f"cannot subscript {repr(value)} with {repr(slobj)}"
) from err
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = type(node.ctx)
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError(f"Invalid Attribute context {ctx.__name__}")
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):
raise TypeError(
"where must be passed as a string, PyTablesExpr, "
"or list-like of PyTablesExpr"
)
return w
class PyTablesExpr(expr.Expr):
"""
Hold a pytables-like expression, comprised of possibly multiple 'terms'.
Parameters
----------
where : string term expression, PyTablesExpr, or list-like of PyTablesExprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
a PyTablesExpr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
_visitor: Optional[PyTablesExprVisitor]
env: PyTablesScope
def __init__(
self,
where,
queryables: Optional[Dict[str, Any]] = None,
encoding=None,
scope_level: int = 0,
):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict: DeepChainMap[Any, Any] = DeepChainMap()
if isinstance(where, PyTablesExpr):
local_dict = where.env.scope
_where = where.expr
elif isinstance(where, (list, tuple)):
where = list(where)
for idx, w in enumerate(where):
if isinstance(w, PyTablesExpr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
_where = " & ".join(f"({w})" for w in com.flatten(where))
else:
_where = where
self.expr = _where
self.env = PyTablesScope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, str):
self.env.queryables.update(queryables)
self._visitor = PyTablesExprVisitor(
self.env,
queryables=queryables,
parser="pytables",
engine="pytables",
encoding=encoding,
)
self.terms = self.parse()
def __repr__(self) -> str:
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError as err:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid condition"
) from err
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError as err:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid filter"
) from err
return self.condition, self.filter
class TermValue:
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind: str):
assert isinstance(kind, str), kind
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding) -> str:
""" quote the string if not encoded else encode and return """
if self.kind == "string":
if encoding is not None:
return str(self.converted)
return f'"{self.converted}"'
elif self.kind == "float":
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return str(self.converted)
def maybe_expression(s) -> bool:
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, str):
return False
ops = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ("=",)
# make sure we have an op at least
return any(op in s for op in ops)
|
|
"""
pymc.distributions
A collection of common probability distributions for stochastic
nodes in PyMC.
"""
from __future__ import division
from .dist_math import *
from numpy.random import uniform as runiform, normal as rnormal
__all__ = ['Uniform', 'Flat', 'Normal', 'Beta', 'Exponential', 'Laplace',
'T', 'Cauchy', 'HalfCauchy', 'Gamma', 'Weibull','Bound',
'Tpos', 'Lognormal', 'ChiSquared', 'HalfNormal', 'Wald',
'Pareto', 'InverseGamma']
def get_tau(tau=None, sd=None):
if tau is None:
if sd is None:
return 1.
else:
return sd ** -2
else:
if sd is not None:
raise ValueError("Can't pass both tau and sd")
else:
return tau
class Uniform(Continuous):
"""
Continuous uniform log-likelihood.
.. math::
f(x \mid lower, upper) = \frac{1}{upper-lower}
Parameters
----------
lower : float
Lower limit (defaults to 0)
upper : float
Upper limit (defaults to 1)
"""
def __init__(self, lower=0, upper=1, *args, **kwargs):
super(Uniform, self).__init__(*args, **kwargs)
self.lower = lower
self.upper = upper
self.mean = (upper + lower) / 2.
self.median = self.mean
def logp(self, value):
lower = self.lower
upper = self.upper
return bound(
-log(upper - lower),
lower <= value, value <= upper)
def random(self, size=None):
return runiform(self.upper, self.lower, size)
class Flat(Continuous):
"""
Uninformative log-likelihood that returns 0 regardless of
the passed value.
"""
def __init__(self, *args, **kwargs):
super(Flat, self).__init__(*args, **kwargs)
self.median = 0
def logp(self, value):
return zeros_like(value)
class Normal(Continuous):
"""
Normal log-likelihood.
.. math::
f(x \mid \mu, \tau) = \sqrt{\frac{\tau}{2\pi}} \exp\left\{ -\frac{\tau}{2} (x-\mu)^2 \right\}
Parameters
----------
mu : float
Mean of the distribution.
tau : float
Precision of the distribution, which corresponds to
:math:`1/\sigma^2` (tau > 0).
sd : float
Standard deviation of the distribution. Alternative parameterization.
.. note::
- :math:`E(X) = \mu`
- :math:`Var(X) = 1/\tau`
"""
def __init__(self, mu=0.0, tau=None, sd=None, *args, **kwargs):
super(Normal, self).__init__(*args, **kwargs)
self.mean = self.median = self.mode = self.mu = mu
self.tau = get_tau(tau=tau, sd=sd)
self.variance = 1. / self.tau
def logp(self, value):
tau = self.tau
mu = self.mu
return bound(
(-tau * (value - mu) ** 2 + log(tau / pi / 2.)) / 2.,
tau > 0)
class HalfNormal(Continuous):
"""
Half-normal log-likelihood, a normal distribution with mean 0 limited
to the domain :math:`x \in [0, \infty)`.
.. math::
f(x \mid \tau) = \sqrt{\frac{2\tau}{\pi}}\exp\left\{ {\frac{-x^2 \tau}{2}}\right\}
:Parameters:
- `x` : :math:`x \ge 0`
- `tau` : tau > 0
"""
def __init__(self, tau=None, sd=None, *args, **kwargs):
super(HalfNormal, self).__init__(*args, **kwargs)
self.tau = get_tau(tau=tau, sd=sd)
self.mean = sqrt(2 / (pi * self.tau))
self.variance = (1. - 2/pi) / self.tau
def logp(self, value):
tau = self.tau
return bound(
-0.5 * tau * value**2 + 0.5 * log(tau * 2. / pi),
tau > 0)
class Wald(Continuous):
"""
Wald (inverse Gaussian) log likelihood.
.. math::
f(x \mid \mu) = \sqrt{\frac{1}{2\pi x^3}} \exp\left\{ \frac{-(x-\mu)^2}{2 \mu^2 x} \right\}
Parameters
----------
mu : float
Mean of the distribution.
.. note::
- :math:`E(X) = \mu`
- :math:`Var(X) = \mu^3`
"""
def __init__(self, mu, *args, **kwargs):
super(Wald, self).__init__(*args, **kwargs)
self.mu = mu
self.mean = mu
self.variance = mu**3
def logp(self, value):
mu = self.mu
return bound(
((- (value - mu) ** 2) /
(2. * value * (mu ** 2))) + logpow(2. * pi * value **3, -0.5),
mu > 0)
class Beta(Continuous):
"""
Beta log-likelihood. The conjugate prior for the parameter
:math:`p` of the binomial distribution.
.. math::
f(x \mid \alpha, \beta) = \frac{\Gamma(\alpha + \beta)}{\Gamma(\alpha) \Gamma(\beta)} x^{\alpha - 1} (1 - x)^{\beta - 1}
Parameters
----------
alpha : float
alpha > 0
beta : float
beta > 0
.. note::
- :math:`E(X)=\frac{\alpha}{\alpha+\beta}`
- :math:`Var(X)=\frac{\alpha \beta}{(\alpha+\beta)^2(\alpha+\beta+1)}`
"""
def __init__(self, alpha, beta, *args, **kwargs):
super(Beta, self).__init__(*args, **kwargs)
self.alpha = alpha
self.beta = beta
self.mean = alpha / (alpha + beta)
self.variance = alpha * beta / (
(alpha + beta) ** 2 * (alpha + beta + 1))
def logp(self, value):
alpha = self.alpha
beta = self.beta
return bound(
gammaln(alpha + beta) - gammaln(alpha) - gammaln(beta) +
logpow(
value, alpha - 1) + logpow(1 - value, beta - 1),
0 <= value, value <= 1,
alpha > 0,
beta > 0)
class Exponential(Continuous):
"""
Exponential distribution
Parameters
----------
lam : float
lam > 0
rate or inverse scale
"""
def __init__(self, lam, *args, **kwargs):
super(Exponential, self).__init__(*args, **kwargs)
self.lam = lam
self.mean = 1. / lam
self.median = self.mean * log(2)
self.mode = 0
self.variance = lam ** -2
def logp(self, value):
lam = self.lam
return bound(log(lam) - lam * value,
value > 0,
lam > 0)
class Laplace(Continuous):
"""
Laplace distribution
Parameters
----------
mu : float
mean
b : float
scale
"""
def __init__(self, mu, b, *args, **kwargs):
super(Laplace, self).__init__(*args, **kwargs)
self.b = b
self.mean = self.median = self.mode = self.mu = mu
self.variance = 2 * b ** 2
def logp(self, value):
mu = self.mu
b = self.b
return -log(2 * b) - abs(value - mu) / b
class Lognormal(Continuous):
"""
Log-normal log-likelihood.
Distribution of any random variable whose logarithm is normally
distributed. A variable might be modeled as log-normal if it can
be thought of as the multiplicative product of many small
independent factors.
.. math::
f(x \mid \mu, \tau) = \sqrt{\frac{\tau}{2\pi}}\frac{
\exp\left\{ -\frac{\tau}{2} (\ln(x)-\mu)^2 \right\}}{x}
:Parameters:
- `x` : x > 0
- `mu` : Location parameter.
- `tau` : Scale parameter (tau > 0).
.. note::
:math:`E(X)=e^{\mu+\frac{1}{2\tau}}`
:math:`Var(X)=(e^{1/\tau}-1)e^{2\mu+\frac{1}{\tau}}`
"""
def __init__(self, mu=0, tau=1, *args, **kwargs):
super(Lognormal, self).__init__(*args, **kwargs)
self.mu = mu
self.tau = tau
self.mean = exp(mu + 1./(2*tau))
self.median = exp(mu)
self.mode = exp(mu - 1./tau)
self.variance = (exp(1./tau) - 1) * exp(2*mu + 1./tau)
def logp(self, value):
mu = self.mu
tau = self.tau
return bound(
-0.5*tau*(log(value) - mu)**2 + 0.5*log(tau/(2.*pi)) - log(value),
tau > 0)
class T(Continuous):
"""
Non-central Student's T log-likelihood.
Describes a normal variable whose precision is gamma distributed. If
only nu parameter is passed, this specifies a standard (central)
Student's T.
.. math::
f(x|\mu,\lambda,\nu) = \frac{\Gamma(\frac{\nu +
1}{2})}{\Gamma(\frac{\nu}{2})}
\left(\frac{\lambda}{\pi\nu}\right)^{\frac{1}{2}}
\left[1+\frac{\lambda(x-\mu)^2}{\nu}\right]^{-\frac{\nu+1}{2}}
Parameters
----------
nu : int
Degrees of freedom
mu : float
Location parameter (defaults to 0)
lam : float
Scale parameter (defaults to 1)
"""
def __init__(self, nu, mu=0, lam=1, *args, **kwargs):
super(T, self).__init__(*args, **kwargs)
self.nu = nu
self.lam = lam
self.mean = self.median = self.mode = self.mu = mu
self.variance = switch((nu > 2) * 1, nu / (nu - 2) / lam, inf)
def logp(self, value):
nu = self.nu
mu = self.mu
lam = self.lam
return bound(
gammaln((nu + 1.0) / 2.0) + .5 * log(lam / (nu * pi)) - gammaln(nu / 2.0) - (nu + 1.0) / 2.0 * log(1.0 + lam * (value - mu) ** 2 / nu),
lam > 0,
nu > 0)
class Pareto(Continuous):
"""
Pareto log-likelihood. The Pareto is a continuous, positive
probability distribution with two parameters. It is often used
to characterize wealth distribution, or other examples of the
80/20 rule.
.. math::
f(x \mid \alpha, m) = \frac{\alpha m^{\alpha}}{x^{\alpha+1}}
Parameters
----------
alpha : float
Shape parameter (alpha>0)
m : float
Scale parameter (m>0)
.. note::
- :math:`E(x)=\frac{\alpha m}{\alpha-1} if \alpha > 1`
- :math:`Var(x)=\frac{m^2 \alpha}{(\alpha-1)^2(\alpha-2)} if \alpha > 2`
"""
def __init__(self, alpha, m, *args, **kwargs):
super(Pareto, self).__init__(*args, **kwargs)
self.alpha = alpha
self.m = m
self.mean = switch(gt(alpha,1), alpha * m / (alpha - 1.), inf)
self.variance = switch(gt(alpha,2), (alpha * m**2) / ((alpha - 2.) * (alpha - 1.)**2), inf)
def logp(self, value):
alpha = self.alpha
m = self.m
return bound(
log(alpha) + logpow(m, alpha) - logpow(value, alpha+1),
alpha > 0,
m > 0,
value >= m)
class Cauchy(Continuous):
"""
Cauchy log-likelihood. The Cauchy distribution is also known as the
Lorentz or the Breit-Wigner distribution.
.. math::
f(x \mid \alpha, \beta) = \frac{1}{\pi \beta [1 + (\frac{x-\alpha}{\beta})^2]}
Parameters
----------
alpha : float
Location parameter
beta : float
Scale parameter > 0
.. note::
Mode and median are at alpha.
"""
def __init__(self, alpha, beta, *args, **kwargs):
super(Cauchy, self).__init__(*args, **kwargs)
self.median = self.mode = self.alpha = alpha
self.beta = beta
def logp(self, value):
alpha = self.alpha
beta = self.beta
return bound(
-log(pi) - log(beta) - log(1 + ((
value - alpha) / beta) ** 2),
beta > 0)
class HalfCauchy(Continuous):
"""
Half-Cauchy log-likelihood. Simply the absolute value of Cauchy.
.. math::
f(x \mid \beta) = \frac{2}{\pi \beta [1 + (\frac{x}{\beta})^2]}
:Parameters:
- `beta` : Scale parameter (beta > 0).
.. note::
- x must be non-negative.
"""
def __init__(self, beta, *args, **kwargs):
super(HalfCauchy, self).__init__(*args, **kwargs)
self.mode = 0
self.beta = beta
def logp(self, value):
beta = self.beta
return bound(
log(2) - log(pi) - log(beta) - log(1 + (value / beta) ** 2),
beta > 0,
value >= 0)
class Gamma(Continuous):
"""
Gamma log-likelihood.
Represents the sum of alpha exponentially distributed random variables, each
of which has mean beta.
.. math::
f(x \mid \alpha, \beta) = \frac{\beta^{\alpha}x^{\alpha-1}e^{-\beta x}}{\Gamma(\alpha)}
Parameters
----------
x : float
math:`x \ge 0`
alpha : float
Shape parameter (alpha > 0).
beta : float
Rate parameter (beta > 0).
.. note::
- :math:`E(X) = \frac{\alpha}{\beta}`
- :math:`Var(X) = \frac{\alpha}{\beta^2}`
"""
def __init__(self, alpha, beta, *args, **kwargs):
super(Gamma, self).__init__(*args, **kwargs)
self.alpha = alpha
self.beta = beta
self.mean = alpha / beta
self.median = maximum((alpha - 1) / beta, 0)
self.variance = alpha / beta ** 2
def logp(self, value):
alpha = self.alpha
beta = self.beta
return bound(
-gammaln(alpha) + logpow(
beta, alpha) - beta * value + logpow(value, alpha - 1),
value >= 0,
alpha > 0,
beta > 0)
class InverseGamma(Continuous):
"""
Inverse gamma log-likelihood, the reciprocal of the gamma distribution.
.. math::
f(x \mid \alpha, \beta) = \frac{\beta^{\alpha}}{\Gamma(\alpha)} x^{-\alpha - 1} \exp\left(\frac{-\beta}{x}\right)
Parameters
----------
alpha : float
Shape parameter (alpha > 0).
beta : float
Scale parameter (beta > 0).
.. note::
:math:`E(X)=\frac{\beta}{\alpha-1}` for :math:`\alpha > 1`
:math:`Var(X)=\frac{\beta^2}{(\alpha-1)^2(\alpha)}` for :math:`\alpha > 2`
"""
def __init__(self, alpha, beta=1, *args, **kwargs):
super(InverseGamma, self).__init__(*args, **kwargs)
self.alpha = alpha
self.beta = beta
self.mean = (alpha > 1) * beta / (alpha - 1.) or inf
self.mode = beta / (alpha + 1.)
self.variance = switch(gt(alpha, 2), (beta ** 2) / (alpha * (alpha - 1.)**2), inf)
def logp(self, value):
alpha = self.alpha
beta = self.beta
return bound(
logpow(beta, alpha) - gammaln(alpha) - beta / value + logpow(value, -alpha-1),
value > 0,
alpha > 0,
beta > 0)
class ChiSquared(Gamma):
"""
Chi-squared :math:`\chi^2` log-likelihood.
.. math::
f(x \mid \nu) = \frac{x^{(\nu-2)/2}e^{-x/2}}{2^{\nu/2}\Gamma(\nu/2)}
:Parameters:
- `x` : > 0
- `nu` : [int] Degrees of freedom ( nu > 0 )
.. note::
- :math:`E(X)=\nu`
- :math:`Var(X)=2\nu`
"""
def __init__(self, nu, *args, **kwargs):
self.nu = nu
super(ChiSquared, self).__init__(alpha=nu/2., beta=0.5, *args, **kwargs)
class Weibull(Continuous):
"""
Weibull log-likelihood
.. math::
f(x \mid \alpha, \beta) = \frac{\alpha x^{\alpha - 1}
\exp(-(\frac{x}{\beta})^{\alpha})}{\beta^\alpha}
:Parameters:
- `x` : :math:`x \ge 0`
- `alpha` : alpha > 0
- `beta` : beta > 0
.. note::
- :math:`E(x)=\beta \Gamma(1+\frac{1}{\alpha})`
- :math:`median(x)=\Gamma(\log(2))^{1/\alpha}`
- :math:`Var(x)=\beta^2 \Gamma(1+\frac{2}{\alpha} - \mu^2)`
"""
def __init__(self, alpha, beta, *args, **kwargs):
super(Weibull, self).__init__(*args, **kwargs)
self.alpha = alpha
self.beta = beta
self.mean = beta * exp(gammaln(1 + 1./alpha))
self.median = beta * exp(gammaln(log(2)))**(1./alpha)
self.variance = (beta**2) * exp(gammaln(1 + 2./alpha - self.mean**2))
def logp(self, value):
alpha = self.alpha
beta = self.beta
return bound(
(log(alpha) - log(beta) + (alpha - 1)*log(value/beta)
- (value/beta)**alpha),
value >= 0,
alpha > 0,
beta > 0)
class Bounded(Continuous):
"""A bounded distribution."""
def __init__(self, distribution, lower, upper, *args, **kwargs):
self.dist = distribution.dist(*args, **kwargs)
self.__dict__.update(self.dist.__dict__)
self.__dict__.update(locals())
if hasattr(self.dist, 'mode'):
self.mode = self.dist.mode
def logp(self, value):
return bound(
self.dist.logp(value),
self.lower <= value, value <= self.upper)
class Bound(object):
"""Creates a new bounded distribution"""
def __init__(self, distribution, lower=-inf, upper=inf):
self.distribution = distribution
self.lower = lower
self.upper = upper
def __call__(self, *args, **kwargs):
first, args = args[0], args[1:]
return Bounded(first, self.distribution, self.lower, self.upper, *args, **kwargs)
def dist(*args, **kwargs):
return Bounded.dist(self.distribution, self.lower, self.upper, *args, **kwargs)
Tpos = Bound(T, 0)
|
|
#!/usr/bin/python
# Compresses the core Blockly files into a single JavaScript file.
#
# Copyright 2012 Google Inc.
# http://blockly.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates two files:
# blockly_compressed.js
# blockly_uncompressed.js
# The compressed file is a concatenation of all of Blockly's core files which
# have been run through Google's Closure Compiler. This is done using the
# online API (which takes a few seconds and requires an Internet connection).
# The uncompressed file is a script that loads in each of Blockly's core files
# one by one. This takes much longer for a browser to load, but is useful
# when debugging code since line numbers are meaningful and variables haven't
# been renamed. The uncompressed file also allows for a faster developement
# cycle since there is no need to rebuild or recompile, just reload.
import httplib, json, os, re, sys, threading, urllib
def import_path(fullpath):
"""Import a file with full path specification.
Allows one to import from any directory, something __import__ does not do.
Args:
fullpath: Path and filename of import.
Returns:
An imported module.
"""
path, filename = os.path.split(fullpath)
filename, ext = os.path.splitext(filename)
sys.path.append(path)
module = __import__(filename)
reload(module) # Might be out of date
del sys.path[-1]
return module
HEADER = ('// Do not edit this file; automatically generated by build.py.\n'
'"use strict";\n')
class Gen_uncompressed(threading.Thread):
"""Generate a JavaScript file that loads Blockly's raw files.
Runs in a separate thread.
"""
def __init__(self, search_paths):
threading.Thread.__init__(self)
self.search_paths = search_paths
def run(self):
target_filename = 'blockly_uncompressed.js'
f = open(target_filename, 'w')
f.write(HEADER)
f.write("""
window.BLOCKLY_DIR = (function() {
// Find name of current directory.
var scripts = document.getElementsByTagName('script');
var re = new RegExp('(.+)[\/]blockly_uncompressed\.js$');
for (var x = 0, script; script = scripts[x]; x++) {
var match = re.exec(script.src);
if (match) {
return match[1];
}
}
alert('Could not detect Blockly\\'s directory name.');
return '';
})();
window.BLOCKLY_BOOT = function() {
// Execute after Closure has loaded.
if (!window.goog) {
alert('Error: Closure not found. Read this:\\n' +
'http://code.google.com/p/blockly/wiki/Closure\\n');
}
// Build map of all dependencies (used and unused).
var dir = window.BLOCKLY_DIR.match(/[^\\/]+$/)[0];
""")
add_dependency = []
base_path = calcdeps.FindClosureBasePath(self.search_paths)
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
add_dependency.append(calcdeps.GetDepsLine(dep, base_path))
add_dependency = '\n'.join(add_dependency)
# Find the Blockly directory name and replace it with a JS variable.
# This allows blockly_uncompressed.js to be compiled on one computer and be
# used on another, even if the directory name differs.
m = re.search('[\\/]([^\\/]+)[\\/]core[\\/]blockly.js', add_dependency)
add_dependency = re.sub('([\\/])' + re.escape(m.group(1)) +
'([\\/]core[\\/])', '\\1" + dir + "\\2', add_dependency)
f.write(add_dependency + '\n')
provides = []
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
if not dep.filename.startswith('../'):
provides.extend(dep.provides)
provides.sort()
f.write('\n')
f.write('// Load Blockly.\n')
for provide in provides:
f.write('goog.require(\'%s\');\n' % provide)
f.write("""
delete window.BLOCKLY_DIR;
delete window.BLOCKLY_BOOT;
};
document.write('<script type="text/javascript" src="' + window.BLOCKLY_DIR +
'/../closure-library-read-only/closure/goog/base.js"></script>');
document.write('<script type="text/javascript">window.BLOCKLY_BOOT()</script>');
""")
f.close()
print 'SUCCESS: ' + target_filename
class Gen_compressed(threading.Thread):
"""Generate a JavaScript file that contains all of Blockly's core and all
required parts of Closure, compiled together.
Uses the Closure Compiler's online API.
Runs in a separate thread.
"""
def __init__(self, search_paths):
threading.Thread.__init__(self)
self.search_paths = search_paths
def run(self):
target_filename = 'blockly_compressed.js'
# Define the parameters for the POST request.
params = [
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('use_closure_library', 'true'),
('output_format', 'json'),
('output_info', 'compiled_code'),
('output_info', 'warnings'),
('output_info', 'errors'),
('output_info', 'statistics'),
]
# Read in all the source files.
filenames = calcdeps.CalculateDependencies(self.search_paths,
['core/blockly.js'])
for filename in filenames:
# Filter out the Closure files (the compiler will add them).
if filename.startswith('../'):
continue
f = open(filename)
params.append(('js_code', ''.join(f.readlines())))
f.close()
# Send the request to Google.
headers = { "Content-type": "application/x-www-form-urlencoded" }
conn = httplib.HTTPConnection('closure-compiler.appspot.com')
conn.request('POST', '/compile', urllib.urlencode(params), headers)
response = conn.getresponse()
json_str = response.read()
conn.close()
# Parse the JSON response.
json_data = json.loads(json_str)
def file_lookup(name):
if not name.startswith('Input_'):
return '???'
n = int(name[6:])
return filenames[n]
if json_data.has_key('errors'):
errors = json_data['errors']
for error in errors:
print 'FATAL ERROR'
print error['error']
print '%s at line %d:' % (
file_lookup(error['file']), error['lineno'])
print error['line']
print (' ' * error['charno']) + '^'
else:
if json_data.has_key('warnings'):
warnings = json_data['warnings']
for warning in warnings:
print 'WARNING'
print warning['warning']
print '%s at line %d:' % (
file_lookup(warning['file']), warning['lineno'])
print warning['line']
print (' ' * warning['charno']) + '^'
print
code = HEADER + '\n' + json_data['compiledCode']
stats = json_data['statistics']
original_b = stats['originalSize']
compressed_b = stats['compressedSize']
if original_b > 0 and compressed_b > 0:
f = open(target_filename, 'w')
f.write(code)
f.close()
original_kb = int(original_b / 1024 + 0.5)
compressed_kb = int(compressed_b / 1024 + 0.5)
ratio = int(float(compressed_b) / float(original_b) * 100 + 0.5)
print 'SUCCESS: ' + target_filename
print 'Size changed from %d KB to %d KB (%d%%).' % (
original_kb, compressed_kb, ratio)
else:
print 'UNKNOWN ERROR'
if __name__ == '__main__':
try:
calcdeps = import_path(
'../closure-library-read-only/closure/bin/calcdeps.py')
except ImportError:
print """Error: Closure not found. Read this:
http://code.google.com/p/blockly/wiki/Closure"""
sys.exit(1)
search_paths = calcdeps.ExpandDirectories(
['core/', '../closure-library-read-only/'])
# Run both tasks in parallel threads.
# Uncompressed is limited by processor speed.
# Compressed is limited by network and server speed.
Gen_uncompressed(search_paths).start()
Gen_compressed(search_paths).start()
|
|
import os
import textwrap
import warnings
import numpy as np
from astropy.table import Table, join
import astropy.units as u
from spectral_cube import SpectralCube
from pyspeckit.spectrum.models.ammonia_constants import voff_lines_dict
from skimage.morphology import disk,erosion
from . import first_look
from . import gasPipeline
from . import catalogs
from . import baseline
def FirstLook(regions=None, file_extension=None, release='all', trim_edge=True, overwrite=True):
"""
This runs through cubes in a directory tree and generates first
look products for each of them. This assumes a directory naming
convention specified in our observing logs.
----------
regions : list
List of region names (strings) to be included. If empty, all
regions in the log file are searched for and reduced.
release : string
Name of data release. Must match boolean column name in the
Observation Log.
file_extension : string
Name of file extensions to be searched for. Defaults to release name.
trim_edge : boolean
If true, use disk erosion to mask noisy map edges
overwrite : boolean
Note: The GAS file naming convention is
REGION_LINENAME_EXTENSION.fits. For example, for NGC1333 in
ammonia (1,1), this would look for
NGC1333_NH3_11_all.fits
"""
if file_extension is None:
file_extension = '_'+release
RegionCatalog = catalogs.GenerateRegions(release=release)
if regions is None:
RegionCatalog = catalogs.GenerateRegions(release=release)
else:
RegionCatalog = catalogs.GenerateRegions(release=release)
keep = [idx for idx, row in enumerate(RegionCatalog) if row['Region name'] in regions]
RegionCatalog = RegionCatalog[keep]
for ThisRegion in RegionCatalog:
region_name=ThisRegion['Region name']
print("Now NH3(1,1)")
vsys = ThisRegion['VAVG']*u.km/u.s
throw = 2*u.km/u.s + ThisRegion['VRANGE']*u.km/u.s/2
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
voff11 = voff_lines_dict['oneone']
try:
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
if trim_edge:
s = trim_edge_spectral_cube(s)
mask = np.ones(s.shape[0],dtype=np.bool)
for deltav in voff11:
mask*=(np.abs(s.spectral_axis-(deltav*u.km/u.s+vsys)) > throw)
'''
a_rms = (np.where(mask != np.roll(mask,1)))[0]
b_rms = (np.where(mask != np.roll(mask,-1)))[0]
index_rms=first_look.create_index(a_rms, b_rms)
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
#first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_in, index_rms=index_rms, index_peak=index_peak)
'''
cube_rms = s.with_mask(mask[:,None,None])
rms = cube_rms.std(axis=0)
mom_mask = ~mask
cube_mom = s.with_mask(mom_mask[:,None,None])
mom_0 = cube_mom.moment(order=0)
mom_1 = cube_mom.moment(order=1)
rms.write(file_in.replace('.fits','_rms.fits'),overwrite=overwrite)
mom_0.write(file_in.replace('.fits','_mom0.fits'),overwrite=overwrite)
mom_1.write(file_in.replace('.fits','_mom1.fits'),overwrite=overwrite)
except IOError:
warnings.warn("File not found: {0}".format(file_in))
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
try:
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
mask = (np.abs(s.spectral_axis-vsys) > throw)
'''
a_rms = [s.closest_spectral_channel(vsys+2*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-2*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out,
index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_in, index_rms=index_rms,
index_peak=index_peak)
'''
cube_rms = s.with_mask(mask[:,None,None])
rms = cube_rms.std(axis=0)
mom_mask = ~mask
cube_mom = s.with_mask(mom_mask[:,None,None])
mom_0 = cube_mom.moment(order=0)
mom_1 = cube_mom.moment(order=1)
rms.write(file_in.replace('.fits','_rms.fits'),overwrite=overwrite)
mom_0.write(file_in.replace('.fits','_mom0.fits'),overwrite=overwrite)
mom_1.write(file_in.replace('.fits','_mom1.fits'),overwrite=overwrite)
except IOError:
warnings.warn("File not found {0}".format(file_in))
def trim_edge_spectral_cube(scube):
""" trim_edge_cube: Function that reads in a spectral cube and removes the edges
in the cube.
It runs the erode function to make sure that pixels within 3 pixels away
from the edges are blanked.
This is useful to remove very noisy pixels due to lower coverage by KFPA.
Updated earlier function to work with spectral cube instance
----------------------------------------
Warning: This function modifies the cube.
"""
#
mask = np.isfinite(scube)
if len(scube.shape) == 2:
mask_2d = mask[:,:]
else:
mask_2d = mask[0,:,:]
# remove image edges
mask_2d[:,0] = mask_2d[:,-1] = False
mask_2d[0,:] = mask_2d[-1,:] = False
# now erode image (using disk) and convert back to 3D mask
# then replace all voxels with NaN
mask &= erosion(mask_2d,disk(3))
scube = scube.with_mask(mask)
scube_erode = scube.with_fill_value(np.nan)
return scube_erode
def plot_all_moments(file_extension='base_all'):
# Get list of regions - run from images/ directory
# Assume directories correspond to regions to be imaged
region_list = glob("*/")
for i in range(len(region_list)):
region_list[i] = region_list[i].strip("/")
line_list = ['NH3_11','NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
label_list = ['NH$_3$(1,1)','NH$_3$(2,2)','NH$_3$(3,3)','C$_2$S','HC$_5$N',
'HC$_7$N (21-20)','HC$_7$N (22-21)']
extension = file_extension
color_table='magma'
text_color='black'
text_size = 14
beam_color='#d95f02' # previously used '#E31A1C'
# Try single set of contours for first look images
w11_step = 0.3
cont_levs=2**np.arange( 0,20)*w11_step
# Masking of small (noisy) regions
selem = np.array([[0,1,0],[1,1,1],[0,1,0]])
for region in region_list:
file_w11='{0}/{0}_NH3_11_{1}_mom0.fits'.format(region,extension)
if os.path.isfile(file_w11):
LowestContour= cont_levs[0]*0.5
w11_hdu = fits.open(file_w11)
map = w11_hdu[0].data
mask = binary_opening(map > LowestContour, selem)
MaskedMap = mask*map
w11_hdu[0].data = MaskedMap
for i in range(len(line_list)):
line_i=line_list[i]
label_i=label_list[i]
file_mom0='{0}/{0}_{1}_{2}_mom0.fits'.format(region,line_i,extension)
if os.path.isfile(file_mom0):
line_hdu = fits.open(file_mom0)
# Use percentiles to set initial plot colourscale ranges
v_min=np.nanpercentile(line_hdu[0].data,0.1)
v_max=np.nanpercentile(line_hdu[0].data,99.9)
fig=aplpy.FITSFigure(file_mom0, hdu=0)
if line_i == 'NH3_11':
fig.show_colorscale(cmap=color_table,vmin=v_min, vmax=v_max, stretch='log',
vmid=v_min-(1.*np.abs(v_min)),cbar_ticks = [0,3,6,12,24,48,96])
# add colorbar
fig.add_colorbar()
#fig.colorbar.set_width(0.15)
fig.colorbar.show(box_orientation='horizontal', width=0.1, pad=0.0, ticks=cbar_ticks,
location='top', axis_label_text='Integrated Intensity (K km s$^{-1}$)')
elif (line_i in ['NH3_22','NH3_33']) :
fig.show_colorscale(cmap=color_table,vmin=v_min, vmax=v_max, stretch='linear',
vmid=v_min-(1.*np.abs(v_min)),cbar_ticks = [0,1,2,3,6,12])
# add colorbar
fig.add_colorbar()
#fig.colorbar.set_width(0.15)
fig.colorbar.show(box_orientation='horizontal', width=0.1, pad=0.0, ticks= cbar_ticks,
location='top', axis_label_text='Integrated Intensity (K km s$^{-1}$)')
else:
fig.show_colorscale( cmap=color_table,vmin=v_min, vmax=v_max)
# add colorbar
fig.add_colorbar()
#fig.colorbar.set_width(0.15)
fig.colorbar.show( box_orientation='horizontal', width=0.1, pad=0.0,
location='top', axis_label_text='Integrated Intensity (K km s$^{-1}$)')
fig.colorbar.set_font(family='sans_serif',size=text_size)
fig.colorbar.set_axis_label_font(family='sans_serif',size=text_size)
fig.set_nan_color('0.95')
#
fig.show_contour(w11_hdu, colors='gray', levels=cont_levs)
# Axis labels
fig.axis_labels.set_font(family='sans_serif',size=text_size)
# Ticks
fig.ticks.set_color(text_color)
fig.tick_labels.set_font(family='sans_serif',size=text_size)
fig.tick_labels.set_style('colons')
fig.tick_labels.set_xformat('hh:mm:ss')
fig.tick_labels.set_yformat('dd:mm')
# Add beam
fig.add_beam(major=0.0088441,minor=0.0088441,angle=0)
fig.beam.set_color(beam_color)
fig.beam.set_corner('bottom left')
'''
# Scale bar
# magic line of code to obtain scale in arcsec obtained from
# http://www.astropy.org/astropy-tutorials/Quantities.html
ang_sep = (plot_param['scalebar_size'].to(u.au)/plot_param['distance']).to(u.arcsec, equivalencies dimensionless_angles())
fig.add_scalebar(ang_sep.to(u.degree))
fig.scalebar.set_corner(plot_param['scalebar_pos'])
fig.scalebar.set_font(family='sans_serif',size=text_size)
fig.scalebar.set(color=text_color)
fig.scalebar.set_label('{0:4.2f}'.format(plot_param['scalebar_size']))
'''
# Labels
fig.add_label(0.025, 0.1,
'{0}\n{1}'.format(region,label_i),
relative=True, color=text_color,
horizontalalignment='left',
family='sans_serif',size=text_size)
# fig.set_system_latex(True)
fig.save( 'figures/{0}_{1}_{2}_mom0_map.pdf'.format(region,line_i,extension),adjust_bbox=True)
fig.close()
else:
print('File {0} not found'.format(file_mom0))
else:
print('File {0} not found'.format(file_w11))
def FirstLook_OrionA(file_extension='_all'):
"""
Function to create First Look products for OrionA. The file_extension
parameter is used to select the proper files to be processed.
"""
region_name='OrionA'
vsys = 10.*u.km/u.s
throw = 4.0*u.km/u.s
print("Now NH3(1,1)")
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
spaxis = s.spectral_axis.value
index_rms = baseline.ammoniaWindow(spaxis,spaxis,window=4,v0=vsys.value)
index_peak= ~baseline.tightWindow(spaxis,spaxis,window=3,v0=vsys.value)
first_look.peak_rms( file_in, index_rms=index_rms, index_peak=index_peak)
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+2*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-2*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
first_look.peak_rms( file_in, index_rms=index_rms,
index_peak=index_peak)
#region_name='OrionA'
#print("Now NH3(1,1)")
#a_rms = [ 0, 158, 315, 428, 530, 693]
#b_rms = [ 60, 230, 327, 438, 604, 735]
#index_rms=first_look.create_index( a_rms, b_rms)
#index_peak=np.arange(326,470)
#file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
#file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
#
#first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
#first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
## 2nd order polynomial
# file_out=file_in.replace('.fits','_base2.fits')
#file_new=first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=2)
#first_look.peak_rms( file_new, index_rms=index_rms, index_peak=index_peak)
#
#print("Now NH3(2,2)")
#a_rms = [ 0, 260, 520, 730]
#b_rms = [150, 380, 610, 850]
#index_rms=first_look.create_index( a_rms, b_rms)
#index_peak=np.arange(380,520)
#line='NH3_22'
#file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
#file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
#first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
#first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
## 2nd order polynomial
#file_out=file_in.replace('.fits','_base2.fits')
#file_new=first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=2)
#first_look.peak_rms( file_new, index_rms=index_rms, index_peak=index_peak)
#
#print("Now NH3(3,3)")
#a_rms = [ 10, 250, 530]
#b_rms = [210, 310, 930]
#index_rms=first_look.create_index( a_rms, b_rms)
#index_peak=np.arange(410,540)
#line='NH3_33'
#file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
#file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
#first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
#first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
##2nd order polynomial
#file_out=file_in.replace('.fits','_base2.fits')
#file_new=first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=2)
#first_look.peak_rms( file_new, index_rms=index_rms, index_peak=index_peak)
#
#print("Now CCS")
#a_rms = [ 0, 260]
#b_rms = [200, 490]
#index_rms=first_look.create_index( a_rms, b_rms)
#index_peak=np.arange(220,250)
#line='C2S'
#file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
#file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
#first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=2)
#first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
#
#print("Now HC5N")
# HC5N channel range must be updated
#a_rms = [ 0, 500]
#b_rms = [380, 545]
#index_rms=first_look.create_index( a_rms, b_rms)
#index_peak=np.arange(400,480)
#line='HC5N'
#file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
#file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
#first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=2)
#first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
#
#print("Now HC7N 21-20")
# HC7N channel range must be updated
#a_rms = [ 0, 160, 480]
#b_rms = [115, 360, 525]
#index_rms=first_look.create_index( a_rms, b_rms)
#index_peak=np.arange(400,460)
#line='HC7N_21_20'
#file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
#file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
#first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=2)
#first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
#
#print("Now HC7N 22-21")
# HC7N channel range must be updated
#a_rms = [ 0, 480]
#b_rms = [360, 525]
#index_rms=first_look.create_index( a_rms, b_rms)
#index_peak=np.arange(400,460)
#line='HC7N_22_21'
#file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
#file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
#first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=2)
#first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
def FirstLook_B18(file_extension='_all'):
"""
Function to create First Look products for B18. The file_extension
parameter is used to select the proper files to be processed.
"""
region_name='B18'
vsys = 6.*u.km/u.s
throw = 2.0*u.km/u.s
print("Now NH3(1,1)")
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
spaxis = s.spectral_axis.value
index_rms = baseline.ammoniaWindow(spaxis,spaxis,window=4,v0=vsys.value)
index_peak= ~baseline.tightWindow(spaxis,spaxis,window=3,v0=vsys.value)
first_look.peak_rms( file_in, index_rms=index_rms, index_peak=index_peak)
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+2*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-2*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
first_look.peak_rms( file_in, index_rms=index_rms,
index_peak=index_peak)
#region_name='B18'
#print("Now NH3(1,1)")
#a_rms = [ 0, 115, 280, 385, 490, 655]
#b_rms = [ 80, 230, 345, 455, 625, 760]
#index_rms=first_look.create_index( a_rms, b_rms)
#index_peak=np.arange(352,381)
#file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
#file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
#first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
#first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
#
#print("Now NH3(2,2)")
#a_rms = [ 0, 440]
#b_rms = [ 409, 870]
#index_rms=first_look.create_index( a_rms, b_rms)
#index_peak=np.arange(420,435)
#line='NH3_22'
#file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
#file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
#first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
#first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
#
#print("Now NH3(3,3)")
#a_rms = [ 0, 530]
#b_rms = [ 409, 960]
#index_rms=first_look.create_index( a_rms, b_rms)
#index_peak=np.arange(410,485)
#line='NH3_33'
#file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
#file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
#first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
#first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
#
#print("Now CCS")
#a_rms = [ 0, 245]
#b_rms = [ 210, 490]
#index_rms=first_look.create_index( a_rms, b_rms)
#index_peak=np.arange(225,243)
#line='C2S'
#file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
#file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
#first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
#first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
#
#print("Now HC5N")
#a_rms = [ 10, 435]
#b_rms = [ 409, 540]
#index_rms=first_look.create_index( a_rms, b_rms)
#index_peak=np.arange(414,430)
#line='HC5N'
#file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
#file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
#first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
#first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
#
#print("Now HC7N_21_20")
#a_rms = [ 10, 435]
#b_rms = [ 409, 540]
#index_rms=first_look.create_index( a_rms, b_rms)
#index_peak=np.arange(412,430)
#line='HC7N_21_20'
#file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
#file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
#first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
#first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
#
#print("Now HC7N_22_21")
#a_rms = [ 10, 435]
#b_rms = [ 409, 540]
#index_rms=first_look.create_index( a_rms, b_rms)
#index_peak=np.arange(412,430)
#line='HC7N_22_21'
#file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
#file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
#first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
#first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
def FirstLook_L1688(file_extension='_all'):
"""
Function to create First Look products for L1688. The file_extension
parameter is used to select the proper files to be processed.
"""
region_name='L1688'
vsys = 3.5*u.km/u.s
throw = 5*u.km/u.s
print("Now NH3(1,1)")
#a_rms = [ 0, 121, 290, 404, 505, 665]
#b_rms = [ 74, 239, 332, 447, 611, 749]
#index_rms=first_look.create_index( a_rms, b_rms)
#index_peak=np.arange(350,377)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
# file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
# first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
spaxis = s.spectral_axis.value
index_rms = baseline.ammoniaWindow(spaxis,spaxis,window=4,v0=vsys.value)
index_peak= ~baseline.tightWindow(spaxis,spaxis,window=3,v0=vsys.value)
first_look.peak_rms( file_in, index_rms=index_rms, index_peak=index_peak)
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+2*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-2*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
#file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
# first_look.baseline( file_in, file_out,
# index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_in, index_rms=index_rms,
index_peak=index_peak)
# print("Now NH3(2,2)")
# a_rms = [ 0, 349]
# b_rms = [ 285, 649]
# index_rms=first_look.create_index( a_rms, b_rms)
# index_peak=np.arange(298,342)
# line='NH3_22'
# file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
# file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
# first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
# first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
# #
# print("Now NH3(3,3)")
# a_rms = [ 0, 395]
# b_rms = [ 272, 649]
# index_rms=first_look.create_index( a_rms, b_rms)
# index_peak=np.arange(298,342)
# line='NH3_33'
# file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
# file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
# first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
# first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
# #
# print("Now CCS")
# a_rms = [ 0, 369]
# b_rms = [ 278, 649]
# index_rms=first_look.create_index( a_rms, b_rms)
# index_peak=np.arange(307,325)
# line='C2S'
# file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
# file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
# first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
# first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
# #
# print("Now HC5N")
# a_rms = [ 0, 358]
# b_rms = [ 288, 649]
# index_rms=first_look.create_index( a_rms, b_rms)
# index_peak=np.arange(306,317)
# line='HC5N'
# file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
# file_out=file_in.replace(file_extension+'.fits',
# '_base'+file_extension+'.fits')
# first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
# first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
# #
# #HC7N (21-20) shows an absorption feature at ~ 91 km/s (at 23.6951 GHz)
# #from its rest frequency (used 23.6879 GHz). There's no emission line.
# #Below are the channel indeces for the absorption feature.
# #a_rms = [ 0, 520]
# #b_rms = [480, 650]
# #index_peak = np.arange(485,510)
# #
# #The code didn't produce the fits file for HC7N (22-21).
def FirstLook_L1689(file_extension='_all'):
"""
Function to create First Look products for L1689. The
file_extension parameter is used to select the proper files to be
processed.
"""
region_name='L1689'
print("Now NH3(1,1)")
a_rms = [ 0, 150, 310, 420, 530, 690]
b_rms = [ 60, 230, 330, 440, 610, 760]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(340,420)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
#
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
vsys = 3.9*u.km/u.s
throw = 5*u.km/u.s
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+2*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-2*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out,
index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms,
index_peak=index_peak)
def FirstLook_SerAqu(file_extension='_all'):
"""
Function to create First Look products for Serpens_Aquila. The
file_extension parameter is used to select the proper files to be
processed.
"""
region_name='Serpens_Aquila'
print("Now NH3(1,1)")
a_rms = [ 0, 150, 310, 420, 530, 690]
b_rms = [ 60, 230, 330, 440, 610, 780]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(340,420)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
#
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
vsys = 6.35*u.km/u.s
throw = 8*u.km/u.s
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+2*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-2*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out,
index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms,
index_peak=index_peak)
def FirstLook_L1455(file_extension='_all'):
"""
Function to create First Look products for L1455. The file_extension
parameter is used to select the proper files to be processed.
"""
region_name='L1455'
print("Now NH3(1,1)")
a_rms = [ 0, 140, 300, 410, 520, 680]
b_rms = [ 105, 270, 370, 480, 630, 745]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(350,430)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now NH3(2,2)")
a_rms = [ 0, 340]
b_rms = [ 290, 648]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(260,400)
line='NH3_22'
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now NH3(3,3)")
a_rms = [ 0, 340] # No lines. Using the same as NH3(2,2)
b_rms = [ 290, 648] # No lines. Using the same as NH3(2,2)
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(260,400) # No lines. Using the same as NH3(2,2)
line='NH3_33'
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now CCS")
a_rms = [ 0, 350]
b_rms = [ 290, 648]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(309,334)
line='C2S'
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now HC5N")
a_rms = [ 0, 350]
b_rms = [ 290, 648]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(315,325)
line='HC5N'
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now HC7N_21_20")
a_rms = [ 0, 180]
b_rms = [ 130, 275]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(128,147)
line='HC7N_21_20'
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now HC7N_22_21")
a_rms = [ 0, 340] # No lines. Using the same as HC7N_21_20
b_rms = [ 290, 648] # No lines. Using the same as HC7N_21_20
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(308,328) # No lines. Using the same as HC7N_21_20
line='HC7N_22_21'
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
def FirstLook_NGC1333(file_extension='_all'):
"""
Function to create First Look products for NGC1333. The file_extension
parameter is used to select the proper files to be processed.
"""
region_name='NGC1333'
vsys = 7.9*u.km/u.s
throw = 2.0*u.km/u.s
print("Now NH3(1,1)")
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
spaxis = s.spectral_axis.value
index_rms = baseline.ammoniaWindow(spaxis,spaxis,window=4,v0=vsys.value)
index_peak= ~baseline.tightWindow(spaxis,spaxis,window=3,v0=vsys.value)
first_look.peak_rms( file_in, index_rms=index_rms, index_peak=index_peak)
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+2*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-2*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
first_look.peak_rms( file_in, index_rms=index_rms,
index_peak=index_peak)
def FirstLook_B1(file_extension='_all'):
"""
Function to create First Look products for B1. The file_extension
parameter is used to select the proper files to be processed.
"""
region_name='B1'
print("Now NH3(1,1)")
a_rms = [ 0, 130, 290, 400, 500, 660]
b_rms = [ 70, 240, 340, 440, 620, 740]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(340,400)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out, index_clean=index_rms,
polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now the rest")
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
vsys = 6.6*u.km/u.s
throw = 2.0*u.km/u.s
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+3*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-3*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out,
index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms,
index_peak=index_peak)
def FirstLook_IC348(file_extension='_all'):
"""
Function to create First Look products for IC348. The file_extension
parameter is used to select the proper files to be processed.
"""
region_name='IC348'
print("Now NH3(1,1)")
a_rms = [ 0, 130, 290, 400, 500, 660]
b_rms = [ 70, 240, 340, 440, 620, 740]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(340,400)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out, index_clean=index_rms,
polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now NH3(2,2)")
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
vsys = 9.0*u.km/u.s
throw = 2.0*u.km/u.s
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+3*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-3*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out,
index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms,
index_peak=index_peak)
def FirstLook_B59(file_extension='_all'):
"""
Function to create First Look products for B59. The file_extension
parameter is used to select the proper files to be processed.
"""
region_name='B59'
print("Now NH3(1,1)")
a_rms = [ 0, 130, 290, 400, 500, 660]
b_rms = [ 70, 240, 340, 440, 620, 740]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(340,400)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now NH3(2,2)")
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
vsys = 3.5*u.km/u.s
throw = 2.0*u.km/u.s
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+3*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-3*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out,
index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms,
index_peak=index_peak)
def FirstLook_Cepheus_L1228(file_extension='_all'):
"""
Function to create First Look products for Cepheus L1228. The file_extension
parameter is used to select the proper files to be processed.
"""
region_name = 'Cepheus_L1228'
print("Now NH3(1,1)")
a_rms = [ 0, 135, 290, 405, 505, 665]
b_rms = [ 70, 245, 350, 455, 625, 740]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(350,410)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now NH3(2,2)")
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
vsys = -8.0*u.km/u.s
throw = 2.0*u.km/u.s
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+3*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-3*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out,
index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms,
index_peak=index_peak)
def FirstLook_Cepheus_L1251(file_extension='_all'):
"""
Function to create First Look products for Cepheus_L1251. The file_extension
parameter is used to select the proper files to be processed.
"""
region_name = 'Cepheus_L1251'
print("Now NH3(1,1)")
a_rms = [ 0, 135, 290, 405, 505, 665]
b_rms = [ 70, 245, 350, 455, 625, 740]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(350,410)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now NH3(2,2)")
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
vsys = -3.8*u.km/u.s
throw = 2.0*u.km/u.s
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+3*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-3*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out,
index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms,
index_peak=index_peak)
def FirstLook_B1E(file_extension='_all'):
"""
Function to create First Look products for B1E. The file_extension
parameter is used to select the proper files to be processed.
"""
region_name = 'B1E'
print("Now NH3(1,1)")
a_rms = [ 0, 135, 290, 405, 505, 665]
b_rms = [ 70, 245, 350, 455, 625, 740]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(350,410)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now NH3(2,2)")
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
vsys = 7.3*u.km/u.s
throw = 2.0*u.km/u.s
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+3*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-3*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out,
index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms,
index_peak=index_peak)
def FirstLook_HC2(file_extension='_all'):
"""
Function to create First Look products for Heiles cloud2. The file_extension
parameter is used to select the proper files to be processed.
"""
region_name = 'HC2'
print("Now NH3(1,1)")
a_rms = [ 0, 135, 290, 405, 505, 665]
b_rms = [ 70, 245, 350, 455, 625, 740]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(350,410)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
#
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now NH3(2,2)")
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
vsys = 5.3*u.km/u.s
throw = 2.0*u.km/u.s
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+3*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-3*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out,
index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms,
index_peak=index_peak)
def FirstLook_OrionB_NGC2023_2024(file_extension='_all'):
"""
Function to create First Look products for OrionB NGC2023-2024. The
file_extension parameter is used to select the proper files to be
processed.
"""
region_name = 'OrionB_NGC2023-2024'
print("Now NH3(1,1)")
a_rms = [ 0, 150, 310, 420, 520, 680]
b_rms = [ 70, 225, 325, 435, 600, 740]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(350,410)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
#
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now NH3(2,2)")
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
vsys = 10.2*u.km/u.s
throw = 2.0*u.km/u.s
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+3*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-3*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out,
index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms,
index_peak=index_peak)
def FirstLook_OrionB_NGC2068_2071(file_extension='_all'):
"""
Function to create First Look products for OrionB_NGC2068_2071. The
file_extension parameter is used to select the proper files to be
processed.
"""
region_name = 'OrionB_NGC2068-2071'
print("Now NH3(1,1)")
a_rms = [ 0, 120, 270, 390, 480, 640]
b_rms = [ 60, 230, 330, 440, 600, 740]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(330,390)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
#
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now NH3(2,2)")
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
vsys = 10.0*u.km/u.s
throw = 2.0*u.km/u.s
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+3*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-3*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out,
index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms,
index_peak=index_peak)
def FirstLook_L1451(file_extension='_all'):
"""
Function to create First Look products for L1451. The file_extension
parameter is used to select the proper files to be processed.
"""
region_name = 'L1451'
print("Now NH3(1,1)")
a_rms = [ 0, 155, 310, 420, 525, 680]
b_rms = [ 70, 245, 350, 460, 625, 740]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(350,415)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
#
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now NH3(2,2)")
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
vsys = 4.3*u.km/u.s
throw = 2.0*u.km/u.s
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+3*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-3*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out,
index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms,
index_peak=index_peak)
def FirstLook_IC5146(file_extension='_all'):
"""
Function to create First Look products for IC5146. The file_extension
parameter is used to select the proper files to be processed.
"""
region_name = 'IC5146'
print("Now NH3(1,1)")
a_rms = [ 0, 135, 290, 405, 505, 660]
b_rms = [ 70, 235, 340, 445, 615, 740]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(350,410)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
#
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now NH3(2,2)")
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
vsys = 4.0*u.km/u.s
throw = 2.0*u.km/u.s
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+3*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-3*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out,
index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms,
index_peak=index_peak)
def FirstLook_template(file_extension='_all'):
"""
Function to create First Look products for TEMPLATE. The file_extension
parameter is used to select the proper files to be processed.
"""
region_name = 'TEMPLATE'
print("Now NH3(1,1)")
a_rms = [ 0, 135, 290, 405, 505, 665]
b_rms = [ 70, 245, 350, 455, 625, 740]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(350,410)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
#
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
print("Now NH3(2,2)")
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
vsys = 7.3*u.km/u.s
throw = 2.0*u.km/u.s
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+3*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-3*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out,
index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms,
index_peak=index_peak)
def FirstLook_SerMWC(file_extension='_all'):
"""
Function to create First Look products for Serpens_Aquila. The
file_extension parameter is used to select the proper files to be
processed.
"""
region_name='Serpens_MWC297'
print("Now NH3(1,1)")
a_rms = [ 0, 150, 310, 420, 530, 690]
b_rms = [ 60, 230, 330, 440, 610, 780]
index_rms=first_look.create_index( a_rms, b_rms)
index_peak=np.arange(340,420)
file_in='{0}/{0}_NH3_11{1}.fits'.format(region_name,file_extension)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms, index_peak=index_peak)
#
linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21']
vsys = 6.35*u.km/u.s
throw = 8*u.km/u.s
for line in linelist:
file_in = '{0}/{0}_{1}{2}.fits'.format(region_name,line,file_extension)
s = SpectralCube.read(file_in)
s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
a_rms = [s.closest_spectral_channel(vsys+2*throw),
s.closest_spectral_channel(vsys-throw)]
b_rms = [s.closest_spectral_channel(vsys+throw),
s.closest_spectral_channel(vsys-2*throw)]
index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s),
s.closest_spectral_channel(vsys-3*u.km/u.s))
index_rms=first_look.create_index( a_rms, b_rms)
file_out=file_in.replace(file_extension+'.fits',
'_base'+file_extension+'.fits')
first_look.baseline( file_in, file_out,
index_clean=index_rms, polyorder=1)
first_look.peak_rms( file_out, index_rms=index_rms,
index_peak=index_peak)
|
|
# Copyright 2013-2017 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest
from mock import Mock, patch
import time
try:
from twisted.test import proto_helpers
from twisted.python.failure import Failure
from cassandra.io import twistedreactor
except ImportError:
twistedreactor = None # NOQA
from cassandra.connection import _Frame
from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback
class TestTwistedTimer(unittest.TestCase):
"""
Simple test class that is used to validate that the TimerManager, and timer
classes function appropriately with the twisted infrastructure
"""
def setUp(self):
if twistedreactor is None:
raise unittest.SkipTest("Twisted libraries not available")
twistedreactor.TwistedConnection.initialize_reactor()
def test_multi_timer_validation(self):
"""
Verify that the timers are called in the correct order
"""
twistedreactor.TwistedConnection.initialize_reactor()
connection = twistedreactor.TwistedConnection('1.2.3.4',
cql_version='3.0.1')
# Tests timers submitted in order at various timeouts
submit_and_wait_for_completion(self, connection, 0, 100, 1, 100)
# Tests timers submitted in reverse order at various timeouts
submit_and_wait_for_completion(self, connection, 100, 0, -1, 100)
# Tests timers submitted in varying order at various timeouts
submit_and_wait_for_completion(self, connection, 0, 100, 1, 100, True)
def test_timer_cancellation(self, *args):
"""
Verify that timer cancellation is honored
"""
# Various lists for tracking callback stage
connection = twistedreactor.TwistedConnection('1.2.3.4',
cql_version='3.0.1')
timeout = .1
callback = TimerCallback(timeout)
timer = connection.create_timer(timeout, callback.invoke)
timer.cancel()
# Release context allow for timer thread to run.
time.sleep(.2)
timer_manager = connection._loop._timers
# Assert that the cancellation was honored
self.assertFalse(timer_manager._queue)
self.assertFalse(timer_manager._new_timers)
self.assertFalse(callback.was_invoked())
class TestTwistedProtocol(unittest.TestCase):
def setUp(self):
if twistedreactor is None:
raise unittest.SkipTest("Twisted libraries not available")
twistedreactor.TwistedConnection.initialize_reactor()
self.tr = proto_helpers.StringTransportWithDisconnection()
self.tr.connector = Mock()
self.mock_connection = Mock()
self.tr.connector.factory = twistedreactor.TwistedConnectionClientFactory(
self.mock_connection)
self.obj_ut = twistedreactor.TwistedConnectionProtocol()
self.tr.protocol = self.obj_ut
def tearDown(self):
pass
def test_makeConnection(self):
"""
Verify that the protocol class notifies the connection
object that a successful connection was made.
"""
self.obj_ut.makeConnection(self.tr)
self.assertTrue(self.mock_connection.client_connection_made.called)
def test_receiving_data(self):
"""
Verify that the dataReceived() callback writes the data to
the connection object's buffer and calls handle_read().
"""
self.obj_ut.makeConnection(self.tr)
self.obj_ut.dataReceived('foobar')
self.assertTrue(self.mock_connection.handle_read.called)
self.mock_connection._iobuf.write.assert_called_with("foobar")
class TestTwistedClientFactory(unittest.TestCase):
def setUp(self):
if twistedreactor is None:
raise unittest.SkipTest("Twisted libraries not available")
twistedreactor.TwistedConnection.initialize_reactor()
self.mock_connection = Mock()
self.obj_ut = twistedreactor.TwistedConnectionClientFactory(
self.mock_connection)
def test_client_connection_failed(self):
"""
Verify that connection failed causes the connection object to close.
"""
exc = Exception('a test')
self.obj_ut.clientConnectionFailed(None, Failure(exc))
self.mock_connection.defunct.assert_called_with(exc)
def test_client_connection_lost(self):
"""
Verify that connection lost causes the connection object to close.
"""
exc = Exception('a test')
self.obj_ut.clientConnectionLost(None, Failure(exc))
self.mock_connection.defunct.assert_called_with(exc)
class TestTwistedConnection(unittest.TestCase):
def setUp(self):
if twistedreactor is None:
raise unittest.SkipTest("Twisted libraries not available")
twistedreactor.TwistedConnection.initialize_reactor()
self.reactor_cft_patcher = patch(
'twisted.internet.reactor.callFromThread')
self.reactor_run_patcher = patch('twisted.internet.reactor.run')
self.mock_reactor_cft = self.reactor_cft_patcher.start()
self.mock_reactor_run = self.reactor_run_patcher.start()
self.obj_ut = twistedreactor.TwistedConnection('1.2.3.4',
cql_version='3.0.1')
def tearDown(self):
self.reactor_cft_patcher.stop()
self.reactor_run_patcher.stop()
def test_connection_initialization(self):
"""
Verify that __init__() works correctly.
"""
self.mock_reactor_cft.assert_called_with(self.obj_ut.add_connection)
self.obj_ut._loop._cleanup()
self.mock_reactor_run.assert_called_with(installSignalHandlers=False)
@patch('twisted.internet.reactor.connectTCP')
def test_add_connection(self, mock_connectTCP):
"""
Verify that add_connection() gives us a valid twisted connector.
"""
self.obj_ut.add_connection()
self.assertTrue(self.obj_ut.connector is not None)
self.assertTrue(mock_connectTCP.called)
def test_client_connection_made(self):
"""
Verifiy that _send_options_message() is called in
client_connection_made()
"""
self.obj_ut._send_options_message = Mock()
self.obj_ut.client_connection_made()
self.obj_ut._send_options_message.assert_called_with()
@patch('twisted.internet.reactor.connectTCP')
def test_close(self, mock_connectTCP):
"""
Verify that close() disconnects the connector and errors callbacks.
"""
self.obj_ut.error_all_requests = Mock()
self.obj_ut.add_connection()
self.obj_ut.is_closed = False
self.obj_ut.close()
self.obj_ut.connector.disconnect.assert_called_with()
self.assertTrue(self.obj_ut.connected_event.is_set())
self.assertTrue(self.obj_ut.error_all_requests.called)
def test_handle_read__incomplete(self):
"""
Verify that handle_read() processes incomplete messages properly.
"""
self.obj_ut.process_msg = Mock()
self.assertEqual(self.obj_ut._iobuf.getvalue(), b'') # buf starts empty
# incomplete header
self.obj_ut._iobuf.write(b'\x84\x00\x00\x00\x00')
self.obj_ut.handle_read()
self.assertEqual(self.obj_ut._iobuf.getvalue(), b'\x84\x00\x00\x00\x00')
# full header, but incomplete body
self.obj_ut._iobuf.write(b'\x00\x00\x00\x15')
self.obj_ut.handle_read()
self.assertEqual(self.obj_ut._iobuf.getvalue(),
b'\x84\x00\x00\x00\x00\x00\x00\x00\x15')
self.assertEqual(self.obj_ut._current_frame.end_pos, 30)
# verify we never attempted to process the incomplete message
self.assertFalse(self.obj_ut.process_msg.called)
def test_handle_read__fullmessage(self):
"""
Verify that handle_read() processes complete messages properly.
"""
self.obj_ut.process_msg = Mock()
self.assertEqual(self.obj_ut._iobuf.getvalue(), b'') # buf starts empty
# write a complete message, plus 'NEXT' (to simulate next message)
# assumes protocol v3+ as default Connection.protocol_version
body = b'this is the drum roll'
extra = b'NEXT'
self.obj_ut._iobuf.write(
b'\x84\x01\x00\x02\x03\x00\x00\x00\x15' + body + extra)
self.obj_ut.handle_read()
self.assertEqual(self.obj_ut._iobuf.getvalue(), extra)
self.obj_ut.process_msg.assert_called_with(
_Frame(version=4, flags=1, stream=2, opcode=3, body_offset=9, end_pos= 9 + len(body)), body)
@patch('twisted.internet.reactor.connectTCP')
def test_push(self, mock_connectTCP):
"""
Verifiy that push() calls transport.write(data).
"""
self.obj_ut.add_connection()
self.obj_ut.push('123 pickup')
self.mock_reactor_cft.assert_called_with(
self.obj_ut.connector.transport.write, '123 pickup')
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# For python version 2!
import datetime, time, sys, os, json
import logging, argparse
from grab.spider import Spider, Task
import dbdrivers, utils, display
import likes, lenta, lifenews
class NewsParser(Spider):
def __init__(self, config, modules, display, **kwargs):
Spider.__init__(self, **kwargs)
self.modules = modules
self.config = config
self.display = display
self.display.parser = self
self.major_tasks = [] # It affects only how theese tasks will be displayed
schema = []
for module in self.modules:
schema.extend(modules[module].schema)
self.major_tasks.extend(getattr(modules[module], 'major_tasks', []))
self.schema = schema
if self.config['repair']:
self.repair_tables = [
table_name
for table_name, _ in schema
if self.find_modules_method('hook_' + table_name)[0]
]
else:
self.repair_tables = []
self.hosts = { # Elements have following structure:
# 'http://lenta.ru/' : {
# 'last_request_time' : datetime.datetime(...),
# 'last_url' : 'http://lenta.ru/news/world/2015/05/12/merkel'
# 'success' : 10,
# 'failure' : 0,
# 'in_process' : False,
# }
}
self.db_stats = {
table : {
# 'missed_rows' : 0,
# 'init' : self.db.rows_count(table),
'added' : 0, # Number of rows added
}
for table, _ in self.schema
}
self.prepare_count = 0
def prepare(self):
self.display.prepare_begin()
self.prepare_count += 1
self.reached_repair_limit = False
if 'csv' in self.config:
self.db = dbdrivers.CSV(self.schema, self.db_write_callback, self.display, self.config['csv'])
elif 'sqlite' in self.config:
if self.config['repair']:
cache_keys = zip(*self.schema)[0]
else:
cache_keys = []
self.db = dbdrivers.SQLite(self.schema, self.db_write_callback, self.display, self.config['sqlite'], cache_keys=cache_keys)
for module in self.modules:
self.modules[module].db = self.db
self.display.db = self.db
self.modules_state = {
module_name : {
'generator' : getattr(module, 'generate_tasks')()
if hasattr(module, 'generate_tasks') and not self.config['notadd']
else None,
'gen_queue_count' : 0, # Number of tasks from this module in gen_queue
'max_gen_queue_pos' : 0,
'hook_queue_count' : 0,
'took_count' : 0,
}
for module_name, module in self.modules.items()
}
self.gen_queue = []
self.hooks_queue = []
self.db.prepare(self.repair_tables)
for table in self.db.schema:
if 'init' not in self.db_stats[table]:
self.db_stats[table]['init'] = self.db.prepare_info[table]['rows_read']
if 'missed_rows' in self.db.prepare_info[table]:
self.db_stats[table]['missed_rows'] = self.db.prepare_info[table]['missed_rows']
self.display.prepare_end()
def get_cur_curl_tasks(self):
return [
reg_obj['task'].url
for reg_obj in self.transport.registry.values()
]
def can_make_request(self, url, curl_urls):
host = utils.get_host(url)
# First, make sure there are no active requests to this host
for active_task in curl_urls:
task_host = utils.get_host(active_task)
if task_host == host:
return False
# Than, check allowed request rate from config
if host in self.config['hosts'] and host in self.hosts:
timedelta = datetime.datetime.now() - self.hosts[host]['last_request_time']
if timedelta.total_seconds() < self.config['hosts'][host]['min_delay']:
return False
return True
def populate_gen_queue(self):
max_size = self.config['module_max_queue_count']
for i in range(max_size): # Here will be unneccessary cycles, but algorithm is simplier
for module, module_info in self.modules_state.items():
if module_info['generator'] is not None:
if self.config['balance']:
# If we are in "balance" mode,
# there there should be no more than max_size tasks in total in gen_queue and hooks_queue
if module_info['gen_queue_count'] + module_info['hook_queue_count'] >= max_size:
continue
else:
# If we are not in "balance" mode,
# there should be no more than max_size tasks after last taken task from gen_queue
if module_info['gen_queue_count'] >= module_info['max_gen_queue_pos'] + max_size:
continue
try:
task = module_info['generator'].next()
except StopIteration:
module_info['generator'] = None
else:
if task is not None:
task.source_task = module
task.module = module
module_info['gen_queue_count'] += 1
self.gen_queue.append(task)
def get_next_task(self):
# first, look in hooks_queue
curl_urls = self.get_cur_curl_tasks()
for task_pos, task in enumerate(self.hooks_queue):
if self.can_make_request(task.url, curl_urls):
# Atomic, but I'm not sure there will be no problems with
# async call of db_write_callback
self.hooks_queue[task_pos:task_pos+1] = []
self.modules_state[task.module]['hook_queue_count'] -= 1
self.display.task_from_hook_queue(task)
return task
self.populate_gen_queue()
# if not found, look in generators queue
for task_pos, task in enumerate(self.gen_queue):
if self.can_make_request(task.url, curl_urls):
module_state = self.modules_state[task.module]
module_state['gen_queue_count'] -= 1
module_state['took_count'] += 1
module_state['max_gen_queue_pos'] = max(module_state['max_gen_queue_pos'], task_pos)
self.gen_queue[task_pos:task_pos+1] = []
self.display.task_from_gen_queue(task)
return task
for module in self.modules_state.values():
if module['generator'] is not None:
return None
return False
def find_modules_method(self, method):
for module_name, module_obj in self.modules.items():
if hasattr(module_obj, method):
return module_name, getattr(module_obj, method)
return None, None
def task_generator(self):
while True:
task = self.get_next_task()
if task is False:
if self.reached_repair_limit:
self.display.repair_restart()
else:
self.display.all_tasks_complete()
return
if task is None:
yield None
continue
host = utils.get_host(task.url)
if host not in self.hosts:
self.hosts[host] = {
'success' : 0,
'failure' : 0,
}
self.hosts[host]['last_url'] = task.url
self.hosts[host]['in_process'] = True
method_name = 'task_' + task.name
module, handler = self.find_modules_method(method_name)
if handler is None:
raise Exception('Can\'t find method ' + method_name + ' in modules')
def wrapper_wrapper(host, handler):
def wrapper(grab, task):
self.display.task_started(task)
self.hosts[host]['success'] += 1
self.hosts[host]['in_process'] = False
handler(grab, task)
return wrapper
task.callback = wrapper_wrapper(host, handler)
yield task
def db_write_callback(self, table, row, repair_mode=False):
if not repair_mode:
self.db_stats[table]['added'] += 1
hook_method_name = 'hook_' + table
module, handler = self.find_modules_method(hook_method_name)
if handler is not None:
for task in handler(row):
if repair_mode:
self.db.prepare_info[table]['missed_rows'] += 1
task.source_task = hook_method_name
if getattr(task, 'module', None) is None:
task.module = module
if repair_mode and self.modules_state[task.module]['hook_queue_count'] > self.config['repair']:
self.reached_repair_limit = True
break
self.modules_state[task.module]['hook_queue_count'] += 1
self.hooks_queue.append(task)
self.display.add_hook_callback_task(task)
def log_failed_network_result(self, res):
host = utils.get_host(res['task'].url)
self.hosts[host]['failure'] += 1
self.hosts[host]['in_process'] = False
if res['ok']:
msg = 'http-%s' % res['grab'].response.code
else:
msg = res['error_abbr']
self.display.failed_network_result(res['task'].url, msg)
super(NewsParser, self).log_failed_network_result(res)
def process_handler_error(self, func_name, ex, task):
self.display.failed_parser_result(task.url)
super(NewsParser, self).process_handler_error(func_name, ex, task)
def submit_task_to_transport(self, task, *args):
host = utils.get_host(task.url)
self.hosts[host]['last_request_time'] = datetime.datetime.now()
super(NewsParser, self).submit_task_to_transport(task, *args)
self.display.task_submitted_to_transport()
def shutdown(self):
self.display.finalize_begin()
if hasattr(self, 'db'):
self.db.finalize()
self.display.finalize_end()
def print_log(message):
print(message.replace('\n', ''))
sys.stdout.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='newsgrab version 3.0.')
db_type = parser.add_mutually_exclusive_group(required=True)
db_type.add_argument('-csv', type=str, help='directory for csv tables', metavar='CSV_DIR')
db_type.add_argument('-sqlite', type=str, help='file with sqlite3 database', metavar='FILE.DB')
parser.add_argument('-config', type=str, help='configuration file in JSON format', default=None, metavar='CONFIG.JSON')
parser.add_argument('-report', type=str, help='append short report to log file', default='newsgrab_report.log', metavar='FILE.LOG')
# parser.add_argument('-curses', help='show realtime information to console', action="store_true")
parser.add_argument('-cron', help='print report to stdout', action="store_true")
parser.add_argument('-log', type=str, help='spider request log file', default=None, metavar='FILE.LOG')
# parser.add_argument('-threads', type=int, help='set number of threads', default=5)
# parser.add_argument('-repair', type=int, help='check database for unfinished hooks and retry them', default=0)
# parser.add_argument('-notadd', help='disable generators', action="store_true")
# parser.add_argument('-balance', help='do not limit queue size', action="store_true")
args = parser.parse_args()
logger = logging.getLogger('grab')
logger.setLevel(logging.DEBUG)
err = logging.StreamHandler(sys.stderr)
err.setLevel(logging.ERROR)
logger.addHandler(err)
if args.config:
config = json.loads(open(args.config).read())
config['config'] = args.config
else:
config = {'threads' : 10, 'repair' : 0, 'hosts': {}, 'likes' : None, 'lenta' : None, 'lifenews' : None}
if args.log is not None:
config['log'] = args.log
open(config['log'], 'w').close()
logger.addHandler(logging.FileHandler(config['log']))
display_obj = display.Default(sys.stdout, open(args.report, 'a'), cron=args.cron)
if args.csv:
config['csv'] = args.csv
if args.sqlite:
config['sqlite'] = args.sqlite
if args.threads:
config['threads'] = args.threads
config['notadd'] = False
config['balance'] = True
config['module_max_queue_count'] = 10
module_classes = {
'likes' : likes.Module,
'lenta' : lenta.Module,
'lifenews' : lifenews.Module,
}
modules = {}
for module_name, module_class in module_classes.items():
if module_name in config:
modules[module_name] = module_class(config[module_name])
try:
while True:
spider = NewsParser(config, modules, display_obj, thread_number=config['threads'], max_task_generator_chunk=1)
spider.run()
if not spider.reached_repair_limit:
break
except KeyboardInterrupt:
print('\nKeyboardInterrupt received')
display_obj.keyboard_int()
display_obj.finalize()
|
|
from sympy import (symbols, factorial, sqrt, Rational, atan, I, log, fps, O,
Sum, oo, S, pi, cos, sin, Function, exp, Derivative, asin,
airyai, acos, acosh, gamma, erf, asech, Add, Mul,
integrate)
from sympy.series.formal import (rational_algorithm, FormalPowerSeries,
FormalPowerSeriesProduct, FormalPowerSeriesCompose,
FormalPowerSeriesInverse, simpleDE,
rational_independent, exp_re, hyper_re)
from sympy.utilities.pytest import raises, XFAIL, slow
x, y, z = symbols('x y z')
n, m, k = symbols('n m k', integer=True)
f, r = Function('f'), Function('r')
def test_rational_algorithm():
f = 1 / ((x - 1)**2 * (x - 2))
assert rational_algorithm(f, x, k) == \
(-2**(-k - 1) + 1 - (factorial(k + 1) / factorial(k)), 0, 0)
f = (1 + x + x**2 + x**3) / ((x - 1) * (x - 2))
assert rational_algorithm(f, x, k) == \
(-15*2**(-k - 1) + 4, x + 4, 0)
f = z / (y*m - m*x - y*x + x**2)
assert rational_algorithm(f, x, k) == \
(((-y**(-k - 1)*z) / (y - m)) + ((m**(-k - 1)*z) / (y - m)), 0, 0)
f = x / (1 - x - x**2)
assert rational_algorithm(f, x, k) is None
assert rational_algorithm(f, x, k, full=True) == \
(((Rational(-1, 2) + sqrt(5)/2)**(-k - 1) *
(-sqrt(5)/10 + S.Half)) +
((-sqrt(5)/2 - S.Half)**(-k - 1) *
(sqrt(5)/10 + S.Half)), 0, 0)
f = 1 / (x**2 + 2*x + 2)
assert rational_algorithm(f, x, k) is None
assert rational_algorithm(f, x, k, full=True) == \
((I*(-1 + I)**(-k - 1)) / 2 - (I*(-1 - I)**(-k - 1)) / 2, 0, 0)
f = log(1 + x)
assert rational_algorithm(f, x, k) == \
(-(-1)**(-k) / k, 0, 1)
f = atan(x)
assert rational_algorithm(f, x, k) is None
assert rational_algorithm(f, x, k, full=True) == \
(((I*I**(-k)) / 2 - (I*(-I)**(-k)) / 2) / k, 0, 1)
f = x*atan(x) - log(1 + x**2) / 2
assert rational_algorithm(f, x, k) is None
assert rational_algorithm(f, x, k, full=True) == \
(((I*I**(-k + 1)) / 2 - (I*(-I)**(-k + 1)) / 2) /
(k*(k - 1)), 0, 2)
f = log((1 + x) / (1 - x)) / 2 - atan(x)
assert rational_algorithm(f, x, k) is None
assert rational_algorithm(f, x, k, full=True) == \
((-(-1)**(-k) / 2 - (I*I**(-k)) / 2 + (I*(-I)**(-k)) / 2 +
S.Half) / k, 0, 1)
assert rational_algorithm(cos(x), x, k) is None
def test_rational_independent():
ri = rational_independent
assert ri([], x) == []
assert ri([cos(x), sin(x)], x) == [cos(x), sin(x)]
assert ri([x**2, sin(x), x*sin(x), x**3], x) == \
[x**3 + x**2, x*sin(x) + sin(x)]
assert ri([S.One, x*log(x), log(x), sin(x)/x, cos(x), sin(x), x], x) == \
[x + 1, x*log(x) + log(x), sin(x)/x + sin(x), cos(x)]
def test_simpleDE():
# Tests just the first valid DE
for DE in simpleDE(exp(x), x, f):
assert DE == (-f(x) + Derivative(f(x), x), 1)
break
for DE in simpleDE(sin(x), x, f):
assert DE == (f(x) + Derivative(f(x), x, x), 2)
break
for DE in simpleDE(log(1 + x), x, f):
assert DE == ((x + 1)*Derivative(f(x), x, 2) + Derivative(f(x), x), 2)
break
for DE in simpleDE(asin(x), x, f):
assert DE == (x*Derivative(f(x), x) + (x**2 - 1)*Derivative(f(x), x, x),
2)
break
for DE in simpleDE(exp(x)*sin(x), x, f):
assert DE == (2*f(x) - 2*Derivative(f(x)) + Derivative(f(x), x, x), 2)
break
for DE in simpleDE(((1 + x)/(1 - x))**n, x, f):
assert DE == (2*n*f(x) + (x**2 - 1)*Derivative(f(x), x), 1)
break
for DE in simpleDE(airyai(x), x, f):
assert DE == (-x*f(x) + Derivative(f(x), x, x), 2)
break
def test_exp_re():
d = -f(x) + Derivative(f(x), x)
assert exp_re(d, r, k) == -r(k) + r(k + 1)
d = f(x) + Derivative(f(x), x, x)
assert exp_re(d, r, k) == r(k) + r(k + 2)
d = f(x) + Derivative(f(x), x) + Derivative(f(x), x, x)
assert exp_re(d, r, k) == r(k) + r(k + 1) + r(k + 2)
d = Derivative(f(x), x) + Derivative(f(x), x, x)
assert exp_re(d, r, k) == r(k) + r(k + 1)
d = Derivative(f(x), x, 3) + Derivative(f(x), x, 4) + Derivative(f(x))
assert exp_re(d, r, k) == r(k) + r(k + 2) + r(k + 3)
def test_hyper_re():
d = f(x) + Derivative(f(x), x, x)
assert hyper_re(d, r, k) == r(k) + (k+1)*(k+2)*r(k + 2)
d = -x*f(x) + Derivative(f(x), x, x)
assert hyper_re(d, r, k) == (k + 2)*(k + 3)*r(k + 3) - r(k)
d = 2*f(x) - 2*Derivative(f(x), x) + Derivative(f(x), x, x)
assert hyper_re(d, r, k) == \
(-2*k - 2)*r(k + 1) + (k + 1)*(k + 2)*r(k + 2) + 2*r(k)
d = 2*n*f(x) + (x**2 - 1)*Derivative(f(x), x)
assert hyper_re(d, r, k) == \
k*r(k) + 2*n*r(k + 1) + (-k - 2)*r(k + 2)
d = (x**10 + 4)*Derivative(f(x), x) + x*(x**10 - 1)*Derivative(f(x), x, x)
assert hyper_re(d, r, k) == \
(k*(k - 1) + k)*r(k) + (4*k - (k + 9)*(k + 10) + 40)*r(k + 10)
d = ((x**2 - 1)*Derivative(f(x), x, 3) + 3*x*Derivative(f(x), x, x) +
Derivative(f(x), x))
assert hyper_re(d, r, k) == \
((k*(k - 2)*(k - 1) + 3*k*(k - 1) + k)*r(k) +
(-k*(k + 1)*(k + 2))*r(k + 2))
def test_fps():
assert fps(1) == 1
assert fps(2, x) == 2
assert fps(2, x, dir='+') == 2
assert fps(2, x, dir='-') == 2
assert fps(1/x + 1/x**2) == 1/x + 1/x**2
assert fps(log(1 + x), hyper=False, rational=False) == log(1 + x)
f = fps(x**2 + x + 1)
assert isinstance(f, FormalPowerSeries)
assert f.function == x**2 + x + 1
assert f[0] == 1
assert f[2] == x**2
assert f.truncate(4) == x**2 + x + 1 + O(x**4)
assert f.polynomial() == x**2 + x + 1
f = fps(log(1 + x))
assert isinstance(f, FormalPowerSeries)
assert f.function == log(1 + x)
assert f.subs(x, y) == f
assert f[:5] == [0, x, -x**2/2, x**3/3, -x**4/4]
assert f.as_leading_term(x) == x
assert f.polynomial(6) == x - x**2/2 + x**3/3 - x**4/4 + x**5/5
k = f.ak.variables[0]
assert f.infinite == Sum((-(-1)**(-k)*x**k)/k, (k, 1, oo))
ft, s = f.truncate(n=None), f[:5]
for i, t in enumerate(ft):
if i == 5:
break
assert s[i] == t
f = sin(x).fps(x)
assert isinstance(f, FormalPowerSeries)
assert f.truncate() == x - x**3/6 + x**5/120 + O(x**6)
raises(NotImplementedError, lambda: fps(y*x))
raises(ValueError, lambda: fps(x, dir=0))
@slow
def test_fps__rational():
assert fps(1/x) == (1/x)
assert fps((x**2 + x + 1) / x**3, dir=-1) == (x**2 + x + 1) / x**3
f = 1 / ((x - 1)**2 * (x - 2))
assert fps(f, x).truncate() == \
(Rational(-1, 2) - x*Rational(5, 4) - 17*x**2/8 - 49*x**3/16 - 129*x**4/32 -
321*x**5/64 + O(x**6))
f = (1 + x + x**2 + x**3) / ((x - 1) * (x - 2))
assert fps(f, x).truncate() == \
(S.Half + x*Rational(5, 4) + 17*x**2/8 + 49*x**3/16 + 113*x**4/32 +
241*x**5/64 + O(x**6))
f = x / (1 - x - x**2)
assert fps(f, x, full=True).truncate() == \
x + x**2 + 2*x**3 + 3*x**4 + 5*x**5 + O(x**6)
f = 1 / (x**2 + 2*x + 2)
assert fps(f, x, full=True).truncate() == \
S.Half - x/2 + x**2/4 - x**4/8 + x**5/8 + O(x**6)
f = log(1 + x)
assert fps(f, x).truncate() == \
x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6)
assert fps(f, x, dir=1).truncate() == fps(f, x, dir=-1).truncate()
assert fps(f, x, 2).truncate() == \
(log(3) - Rational(2, 3) - (x - 2)**2/18 + (x - 2)**3/81 -
(x - 2)**4/324 + (x - 2)**5/1215 + x/3 + O((x - 2)**6, (x, 2)))
assert fps(f, x, 2, dir=-1).truncate() == \
(log(3) - Rational(2, 3) - (-x + 2)**2/18 - (-x + 2)**3/81 -
(-x + 2)**4/324 - (-x + 2)**5/1215 + x/3 + O((x - 2)**6, (x, 2)))
f = atan(x)
assert fps(f, x, full=True).truncate() == x - x**3/3 + x**5/5 + O(x**6)
assert fps(f, x, full=True, dir=1).truncate() == \
fps(f, x, full=True, dir=-1).truncate()
assert fps(f, x, 2, full=True).truncate() == \
(atan(2) - Rational(2, 5) - 2*(x - 2)**2/25 + 11*(x - 2)**3/375 -
6*(x - 2)**4/625 + 41*(x - 2)**5/15625 + x/5 + O((x - 2)**6, (x, 2)))
assert fps(f, x, 2, full=True, dir=-1).truncate() == \
(atan(2) - Rational(2, 5) - 2*(-x + 2)**2/25 - 11*(-x + 2)**3/375 -
6*(-x + 2)**4/625 - 41*(-x + 2)**5/15625 + x/5 + O((x - 2)**6, (x, 2)))
f = x*atan(x) - log(1 + x**2) / 2
assert fps(f, x, full=True).truncate() == x**2/2 - x**4/12 + O(x**6)
f = log((1 + x) / (1 - x)) / 2 - atan(x)
assert fps(f, x, full=True).truncate(n=10) == 2*x**3/3 + 2*x**7/7 + O(x**10)
@slow
def test_fps__hyper():
f = sin(x)
assert fps(f, x).truncate() == x - x**3/6 + x**5/120 + O(x**6)
f = cos(x)
assert fps(f, x).truncate() == 1 - x**2/2 + x**4/24 + O(x**6)
f = exp(x)
assert fps(f, x).truncate() == \
1 + x + x**2/2 + x**3/6 + x**4/24 + x**5/120 + O(x**6)
f = atan(x)
assert fps(f, x).truncate() == x - x**3/3 + x**5/5 + O(x**6)
f = exp(acos(x))
assert fps(f, x).truncate() == \
(exp(pi/2) - x*exp(pi/2) + x**2*exp(pi/2)/2 - x**3*exp(pi/2)/3 +
5*x**4*exp(pi/2)/24 - x**5*exp(pi/2)/6 + O(x**6))
f = exp(acosh(x))
assert fps(f, x).truncate() == I + x - I*x**2/2 - I*x**4/8 + O(x**6)
f = atan(1/x)
assert fps(f, x).truncate() == pi/2 - x + x**3/3 - x**5/5 + O(x**6)
f = x*atan(x) - log(1 + x**2) / 2
assert fps(f, x, rational=False).truncate() == x**2/2 - x**4/12 + O(x**6)
f = log(1 + x)
assert fps(f, x, rational=False).truncate() == \
x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6)
f = airyai(x**2)
assert fps(f, x).truncate() == \
(3**Rational(5, 6)*gamma(Rational(1, 3))/(6*pi) -
3**Rational(2, 3)*x**2/(3*gamma(Rational(1, 3))) + O(x**6))
f = exp(x)*sin(x)
assert fps(f, x).truncate() == x + x**2 + x**3/3 - x**5/30 + O(x**6)
f = exp(x)*sin(x)/x
assert fps(f, x).truncate() == 1 + x + x**2/3 - x**4/30 - x**5/90 + O(x**6)
f = sin(x) * cos(x)
assert fps(f, x).truncate() == x - 2*x**3/3 + 2*x**5/15 + O(x**6)
def test_fps_shift():
f = x**-5*sin(x)
assert fps(f, x).truncate() == \
1/x**4 - 1/(6*x**2) + Rational(1, 120) - x**2/5040 + x**4/362880 + O(x**6)
f = x**2*atan(x)
assert fps(f, x, rational=False).truncate() == \
x**3 - x**5/3 + O(x**6)
f = cos(sqrt(x))*x
assert fps(f, x).truncate() == \
x - x**2/2 + x**3/24 - x**4/720 + x**5/40320 + O(x**6)
f = x**2*cos(sqrt(x))
assert fps(f, x).truncate() == \
x**2 - x**3/2 + x**4/24 - x**5/720 + O(x**6)
def test_fps__Add_expr():
f = x*atan(x) - log(1 + x**2) / 2
assert fps(f, x).truncate() == x**2/2 - x**4/12 + O(x**6)
f = sin(x) + cos(x) - exp(x) + log(1 + x)
assert fps(f, x).truncate() == x - 3*x**2/2 - x**4/4 + x**5/5 + O(x**6)
f = 1/x + sin(x)
assert fps(f, x).truncate() == 1/x + x - x**3/6 + x**5/120 + O(x**6)
f = sin(x) - cos(x) + 1/(x - 1)
assert fps(f, x).truncate() == \
-2 - x**2/2 - 7*x**3/6 - 25*x**4/24 - 119*x**5/120 + O(x**6)
def test_fps__asymptotic():
f = exp(x)
assert fps(f, x, oo) == f
assert fps(f, x, -oo).truncate() == O(1/x**6, (x, oo))
f = erf(x)
assert fps(f, x, oo).truncate() == 1 + O(1/x**6, (x, oo))
assert fps(f, x, -oo).truncate() == -1 + O(1/x**6, (x, oo))
f = atan(x)
assert fps(f, x, oo, full=True).truncate() == \
-1/(5*x**5) + 1/(3*x**3) - 1/x + pi/2 + O(1/x**6, (x, oo))
assert fps(f, x, -oo, full=True).truncate() == \
-1/(5*x**5) + 1/(3*x**3) - 1/x - pi/2 + O(1/x**6, (x, oo))
f = log(1 + x)
assert fps(f, x, oo) != \
(-1/(5*x**5) - 1/(4*x**4) + 1/(3*x**3) - 1/(2*x**2) + 1/x - log(1/x) +
O(1/x**6, (x, oo)))
assert fps(f, x, -oo) != \
(-1/(5*x**5) - 1/(4*x**4) + 1/(3*x**3) - 1/(2*x**2) + 1/x + I*pi -
log(-1/x) + O(1/x**6, (x, oo)))
def test_fps__fractional():
f = sin(sqrt(x)) / x
assert fps(f, x).truncate() == \
(1/sqrt(x) - sqrt(x)/6 + x**Rational(3, 2)/120 -
x**Rational(5, 2)/5040 + x**Rational(7, 2)/362880 -
x**Rational(9, 2)/39916800 + x**Rational(11, 2)/6227020800 + O(x**6))
f = sin(sqrt(x)) * x
assert fps(f, x).truncate() == \
(x**Rational(3, 2) - x**Rational(5, 2)/6 + x**Rational(7, 2)/120 -
x**Rational(9, 2)/5040 + x**Rational(11, 2)/362880 + O(x**6))
f = atan(sqrt(x)) / x**2
assert fps(f, x).truncate() == \
(x**Rational(-3, 2) - x**Rational(-1, 2)/3 + x**S.Half/5 -
x**Rational(3, 2)/7 + x**Rational(5, 2)/9 - x**Rational(7, 2)/11 +
x**Rational(9, 2)/13 - x**Rational(11, 2)/15 + O(x**6))
f = exp(sqrt(x))
assert fps(f, x).truncate().expand() == \
(1 + x/2 + x**2/24 + x**3/720 + x**4/40320 + x**5/3628800 + sqrt(x) +
x**Rational(3, 2)/6 + x**Rational(5, 2)/120 + x**Rational(7, 2)/5040 +
x**Rational(9, 2)/362880 + x**Rational(11, 2)/39916800 + O(x**6))
f = exp(sqrt(x))*x
assert fps(f, x).truncate().expand() == \
(x + x**2/2 + x**3/24 + x**4/720 + x**5/40320 + x**Rational(3, 2) +
x**Rational(5, 2)/6 + x**Rational(7, 2)/120 + x**Rational(9, 2)/5040 +
x**Rational(11, 2)/362880 + O(x**6))
def test_fps__logarithmic_singularity():
f = log(1 + 1/x)
assert fps(f, x) != \
-log(x) + x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6)
assert fps(f, x, rational=False) != \
-log(x) + x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6)
@XFAIL
def test_fps__logarithmic_singularity_fail():
f = asech(x) # Algorithms for computing limits probably needs improvemnts
assert fps(f, x) == log(2) - log(x) - x**2/4 - 3*x**4/64 + O(x**6)
def test_fps_symbolic():
f = x**n*sin(x**2)
assert fps(f, x).truncate(8) == x**(n + 2) - x**(n + 6)/6 + O(x**(n + 8), x)
f = x**n*log(1 + x)
fp = fps(f, x)
k = fp.ak.variables[0]
assert fp.infinite == \
Sum((-(-1)**(-k)*x**(k + n))/k, (k, 1, oo))
f = (x - 2)**n*log(1 + x)
assert fps(f, x, 2).truncate() == \
((x - 2)**n*log(3) + (x - 2)**(n + 1)/3 - (x - 2)**(n + 2)/18 + (x - 2)**(n + 3)/81 -
(x - 2)**(n + 4)/324 + (x - 2)**(n + 5)/1215 + O((x - 2)**(n + 6), (x, 2)))
f = x**(n - 2)*cos(x)
assert fps(f, x).truncate() == \
(x**(n - 2) - x**n/2 + x**(n + 2)/24 - x**(n + 4)/720 + O(x**(n + 6), x))
f = x**(n - 2)*sin(x) + x**n*exp(x)
assert fps(f, x).truncate() == \
(x**(n - 1) + x**n + 5*x**(n + 1)/6 + x**(n + 2)/2 + 7*x**(n + 3)/40 +
x**(n + 4)/24 + 41*x**(n + 5)/5040 + O(x**(n + 6), x))
f = x**n*atan(x)
assert fps(f, x, oo).truncate() == \
(-x**(n - 5)/5 + x**(n - 3)/3 + x**n*(pi/2 - 1/x) +
O((1/x)**(-n)/x**6, (x, oo)))
f = x**(n/2)*cos(x)
assert fps(f, x).truncate() == \
x**(n/2) - x**(n/2 + 2)/2 + x**(n/2 + 4)/24 + O(x**(n/2 + 6), x)
f = x**(n + m)*sin(x)
assert fps(f, x).truncate() == \
x**(m + n + 1) - x**(m + n + 3)/6 + x**(m + n + 5)/120 + O(x**(m + n + 6), x)
def test_fps__slow():
f = x*exp(x)*sin(2*x) # TODO: rsolve needs improvement
assert fps(f, x).truncate() == 2*x**2 + 2*x**3 - x**4/3 - x**5 + O(x**6)
def test_fps__operations():
f1, f2 = fps(sin(x)), fps(cos(x))
fsum = f1 + f2
assert fsum.function == sin(x) + cos(x)
assert fsum.truncate() == \
1 + x - x**2/2 - x**3/6 + x**4/24 + x**5/120 + O(x**6)
fsum = f1 + 1
assert fsum.function == sin(x) + 1
assert fsum.truncate() == 1 + x - x**3/6 + x**5/120 + O(x**6)
fsum = 1 + f2
assert fsum.function == cos(x) + 1
assert fsum.truncate() == 2 - x**2/2 + x**4/24 + O(x**6)
assert (f1 + x) == Add(f1, x)
assert -f2.truncate() == -1 + x**2/2 - x**4/24 + O(x**6)
assert (f1 - f1) is S.Zero
fsub = f1 - f2
assert fsub.function == sin(x) - cos(x)
assert fsub.truncate() == \
-1 + x + x**2/2 - x**3/6 - x**4/24 + x**5/120 + O(x**6)
fsub = f1 - 1
assert fsub.function == sin(x) - 1
assert fsub.truncate() == -1 + x - x**3/6 + x**5/120 + O(x**6)
fsub = 1 - f2
assert fsub.function == -cos(x) + 1
assert fsub.truncate() == x**2/2 - x**4/24 + O(x**6)
raises(ValueError, lambda: f1 + fps(exp(x), dir=-1))
raises(ValueError, lambda: f1 + fps(exp(x), x0=1))
fm = f1 * 3
assert fm.function == 3*sin(x)
assert fm.truncate() == 3*x - x**3/2 + x**5/40 + O(x**6)
fm = 3 * f2
assert fm.function == 3*cos(x)
assert fm.truncate() == 3 - 3*x**2/2 + x**4/8 + O(x**6)
assert (f1 * f2) == Mul(f1, f2)
assert (f1 * x) == Mul(f1, x)
fd = f1.diff()
assert fd.function == cos(x)
assert fd.truncate() == 1 - x**2/2 + x**4/24 + O(x**6)
fd = f2.diff()
assert fd.function == -sin(x)
assert fd.truncate() == -x + x**3/6 - x**5/120 + O(x**6)
fd = f2.diff().diff()
assert fd.function == -cos(x)
assert fd.truncate() == -1 + x**2/2 - x**4/24 + O(x**6)
f3 = fps(exp(sqrt(x)))
fd = f3.diff()
assert fd.truncate().expand() == \
(1/(2*sqrt(x)) + S.Half + x/12 + x**2/240 + x**3/10080 + x**4/725760 +
x**5/79833600 + sqrt(x)/4 + x**Rational(3, 2)/48 + x**Rational(5, 2)/1440 +
x**Rational(7, 2)/80640 + x**Rational(9, 2)/7257600 + x**Rational(11, 2)/958003200 +
O(x**6))
assert f1.integrate((x, 0, 1)) == -cos(1) + 1
assert integrate(f1, (x, 0, 1)) == -cos(1) + 1
fi = integrate(f1, x)
assert fi.function == -cos(x)
assert fi.truncate() == -1 + x**2/2 - x**4/24 + O(x**6)
fi = f2.integrate(x)
assert fi.function == sin(x)
assert fi.truncate() == x - x**3/6 + x**5/120 + O(x**6)
def test_fps__product():
f1, f2, f3 = fps(sin(x)), fps(exp(x)), fps(cos(x))
raises(ValueError, lambda: f1.product(exp(x), x))
raises(ValueError, lambda: f1.product(fps(exp(x), dir=-1), x, 4))
raises(ValueError, lambda: f1.product(fps(exp(x), x0=1), x, 4))
raises(ValueError, lambda: f1.product(fps(exp(y)), x, 4))
fprod = f1.product(f2, x)
assert isinstance(fprod, FormalPowerSeriesProduct)
assert isinstance(fprod.ffps, FormalPowerSeries)
assert isinstance(fprod.gfps, FormalPowerSeries)
assert fprod.f == sin(x)
assert fprod.g == exp(x)
assert fprod.function == sin(x) * exp(x)
assert fprod._eval_terms(4) == x + x**2 + x**3/3
assert fprod.truncate(4) == x + x**2 + x**3/3 + O(x**4)
assert fprod.polynomial(4) == x + x**2 + x**3/3
raises(NotImplementedError, lambda: fprod._eval_term(5))
raises(NotImplementedError, lambda: fprod.infinite)
raises(NotImplementedError, lambda: fprod._eval_derivative(x))
raises(NotImplementedError, lambda: fprod.integrate(x))
assert f1.product(f3, x)._eval_terms(4) == x - 2*x**3/3
assert f1.product(f3, x).truncate(4) == x - 2*x**3/3 + O(x**4)
def test_fps__compose():
f1, f2, f3 = fps(exp(x)), fps(sin(x)), fps(cos(x))
raises(ValueError, lambda: f1.compose(sin(x), x))
raises(ValueError, lambda: f1.compose(fps(sin(x), dir=-1), x, 4))
raises(ValueError, lambda: f1.compose(fps(sin(x), x0=1), x, 4))
raises(ValueError, lambda: f1.compose(fps(sin(y)), x, 4))
raises(ValueError, lambda: f1.compose(f3, x))
raises(ValueError, lambda: f2.compose(f3, x))
fcomp = f1.compose(f2, x)
assert isinstance(fcomp, FormalPowerSeriesCompose)
assert isinstance(fcomp.ffps, FormalPowerSeries)
assert isinstance(fcomp.gfps, FormalPowerSeries)
assert fcomp.f == exp(x)
assert fcomp.g == sin(x)
assert fcomp.function == exp(sin(x))
assert fcomp._eval_terms(6) == 1 + x + x**2/2 - x**4/8 - x**5/15
assert fcomp.truncate() == 1 + x + x**2/2 - x**4/8 - x**5/15 + O(x**6)
assert fcomp.truncate(5) == 1 + x + x**2/2 - x**4/8 + O(x**5)
raises(NotImplementedError, lambda: fcomp._eval_term(5))
raises(NotImplementedError, lambda: fcomp.infinite)
raises(NotImplementedError, lambda: fcomp._eval_derivative(x))
raises(NotImplementedError, lambda: fcomp.integrate(x))
assert f1.compose(f2, x).truncate(4) == 1 + x + x**2/2 + O(x**4)
assert f1.compose(f2, x).truncate(8) == \
1 + x + x**2/2 - x**4/8 - x**5/15 - x**6/240 + x**7/90 + O(x**8)
assert f1.compose(f2, x).truncate(6) == \
1 + x + x**2/2 - x**4/8 - x**5/15 + O(x**6)
assert f2.compose(f2, x).truncate(4) == x - x**3/3 + O(x**4)
assert f2.compose(f2, x).truncate(8) == x - x**3/3 + x**5/10 - 8*x**7/315 + O(x**8)
assert f2.compose(f2, x).truncate(6) == x - x**3/3 + x**5/10 + O(x**6)
def test_fps__inverse():
f1, f2, f3 = fps(sin(x)), fps(exp(x)), fps(cos(x))
raises(ValueError, lambda: f1.inverse(x))
finv = f2.inverse(x)
assert isinstance(finv, FormalPowerSeriesInverse)
assert isinstance(finv.ffps, FormalPowerSeries)
raises(ValueError, lambda: finv.gfps)
assert finv.f == exp(x)
assert finv.function == exp(-x)
assert finv._eval_terms(5) == 1 - x + x**2/2 - x**3/6 + x**4/24
assert finv.truncate() == 1 - x + x**2/2 - x**3/6 + x**4/24 - x**5/120 + O(x**6)
assert finv.truncate(5) == 1 - x + x**2/2 - x**3/6 + x**4/24 + O(x**5)
raises(NotImplementedError, lambda: finv._eval_term(5))
raises(ValueError, lambda: finv.g)
raises(NotImplementedError, lambda: finv.infinite)
raises(NotImplementedError, lambda: finv._eval_derivative(x))
raises(NotImplementedError, lambda: finv.integrate(x))
assert f2.inverse(x).truncate(8) == \
1 - x + x**2/2 - x**3/6 + x**4/24 - x**5/120 + x**6/720 - x**7/5040 + O(x**8)
assert f3.inverse(x).truncate() == 1 + x**2/2 + 5*x**4/24 + O(x**6)
assert f3.inverse(x).truncate(8) == 1 + x**2/2 + 5*x**4/24 + 61*x**6/720 + O(x**8)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import binascii
import socket
import time
import ssl
from datetime import datetime, timedelta
from functools import wraps
from libcloud.utils.py3 import httplib
from libcloud.common.exceptions import RateLimitReachedError
from libcloud.common.providers import get_driver as _get_driver
from libcloud.common.providers import set_driver as _set_driver
__all__ = [
'find',
'get_driver',
'set_driver',
'merge_valid_keys',
'get_new_obj',
'str2dicts',
'dict2str',
'reverse_dict',
'lowercase_keys',
'get_secure_random_string',
'retry',
'ReprMixin'
]
# Error message which indicates a transient SSL error upon which request
# can be retried
TRANSIENT_SSL_ERROR = 'The read operation timed out'
class TransientSSLError(ssl.SSLError):
"""Represent transient SSL errors, e.g. timeouts"""
pass
# Constants used by the ``retry`` decorator
DEFAULT_TIMEOUT = 30 # default retry timeout
DEFAULT_DELAY = 1 # default sleep delay used in each iterator
DEFAULT_BACKOFF = 1 # retry backup multiplier
RETRY_EXCEPTIONS = (RateLimitReachedError, socket.error, socket.gaierror,
httplib.NotConnected, httplib.ImproperConnectionState,
TransientSSLError)
def find(l, predicate):
results = [x for x in l if predicate(x)]
return results[0] if len(results) > 0 else None
# Note: Those are aliases for backward-compatibility for functions which have
# been moved to "libcloud.common.providers" module
get_driver = _get_driver
set_driver = _set_driver
def merge_valid_keys(params, valid_keys, extra):
"""
Merge valid keys from extra into params dictionary and return
dictionary with keys which have been merged.
Note: params is modified in place.
"""
merged = {}
if not extra:
return merged
for key in valid_keys:
if key in extra:
params[key] = extra[key]
merged[key] = extra[key]
return merged
def get_new_obj(obj, klass, attributes):
"""
Pass attributes from the existing object 'obj' and attributes
dictionary to a 'klass' constructor.
Attributes from 'attributes' dictionary are only passed to the
constructor if they are not None.
"""
kwargs = {}
for key, value in list(obj.__dict__.items()):
if isinstance(value, dict):
kwargs[key] = value.copy()
elif isinstance(value, (tuple, list)):
kwargs[key] = value[:]
else:
kwargs[key] = value
for key, value in list(attributes.items()):
if value is None:
continue
if isinstance(value, dict):
kwargs_value = kwargs.get(key, {})
for key1, value2 in list(value.items()):
if value2 is None:
continue
kwargs_value[key1] = value2
kwargs[key] = kwargs_value
else:
kwargs[key] = value
return klass(**kwargs)
def str2dicts(data):
"""
Create a list of dictionaries from a whitespace and newline delimited text.
For example, this:
cpu 1100
ram 640
cpu 2200
ram 1024
becomes:
[{'cpu': '1100', 'ram': '640'}, {'cpu': '2200', 'ram': '1024'}]
"""
list_data = []
list_data.append({})
d = list_data[-1]
lines = data.split('\n')
for line in lines:
line = line.strip()
if not line:
d = {}
list_data.append(d)
d = list_data[-1]
continue
whitespace = line.find(' ')
if not whitespace:
continue
key = line[0:whitespace]
value = line[whitespace + 1:]
d.update({key: value})
list_data = [val for val in list_data if val != {}]
return list_data
def str2list(data):
"""
Create a list of values from a whitespace and newline delimited text
(keys are ignored).
For example, this:
ip 1.2.3.4
ip 1.2.3.5
ip 1.2.3.6
becomes:
['1.2.3.4', '1.2.3.5', '1.2.3.6']
"""
list_data = []
for line in data.split('\n'):
line = line.strip()
if not line:
continue
try:
splitted = line.split(' ')
# key = splitted[0]
value = splitted[1]
except Exception:
continue
list_data.append(value)
return list_data
def dict2str(data):
"""
Create a string with a whitespace and newline delimited text from a
dictionary.
For example, this:
{'cpu': '1100', 'ram': '640', 'smp': 'auto'}
becomes:
cpu 1100
ram 640
smp auto
cpu 2200
ram 1024
"""
result = ''
for k in data:
if data[k] is not None:
result += '%s %s\n' % (str(k), str(data[k]))
else:
result += '%s\n' % str(k)
return result
def reverse_dict(dictionary):
return dict([(value, key) for key, value in list(dictionary.items())])
def lowercase_keys(dictionary):
return dict(((k.lower(), v) for k, v in dictionary.items()))
def get_secure_random_string(size):
"""
Return a string of ``size`` random bytes. Returned string is suitable for
cryptographic use.
:param size: Size of the generated string.
:type size: ``int``
:return: Random string.
:rtype: ``str``
"""
value = os.urandom(size)
value = binascii.hexlify(value)
value = value.decode('utf-8')[:size]
return value
class ReprMixin(object):
"""
Mixin class which adds __repr__ and __str__ methods for the attributes
specified on the class.
"""
_repr_attributes = []
def __repr__(self):
attributes = []
for attribute in self._repr_attributes:
value = getattr(self, attribute, None)
attributes.append('%s=%s' % (attribute, value))
values = (self.__class__.__name__, ', '.join(attributes))
result = '<%s %s>' % values
return result
def __str__(self):
return str(self.__repr__())
def retry(retry_exceptions=RETRY_EXCEPTIONS, retry_delay=DEFAULT_DELAY,
timeout=DEFAULT_TIMEOUT, backoff=DEFAULT_BACKOFF):
"""
Retry decorator that helps to handle common transient exceptions.
:param retry_exceptions: types of exceptions to retry on.
:param retry_delay: retry delay between the attempts.
:param timeout: maximum time to wait.
:param backoff: multiplier added to delay between attempts.
:Example:
retry_request = retry(timeout=1, retry_delay=1, backoff=1)
retry_request(self.connection.request)()
"""
if retry_exceptions is None:
retry_exceptions = RETRY_EXCEPTIONS
if retry_delay is None:
retry_delay = DEFAULT_DELAY
if timeout is None:
timeout = DEFAULT_TIMEOUT
if backoff is None:
backoff = DEFAULT_BACKOFF
timeout = max(timeout, 0)
def transform_ssl_error(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except ssl.SSLError:
exc = sys.exc_info()[1]
if TRANSIENT_SSL_ERROR in str(exc):
raise TransientSSLError(*exc.args)
raise exc
def decorator(func):
@wraps(func)
def retry_loop(*args, **kwargs):
current_delay = retry_delay
end = datetime.now() + timedelta(seconds=timeout)
while True:
try:
return transform_ssl_error(func, *args, **kwargs)
except retry_exceptions:
exc = sys.exc_info()[1]
if isinstance(exc, RateLimitReachedError):
time.sleep(exc.retry_after)
# Reset retries if we're told to wait due to rate
# limiting
current_delay = retry_delay
end = datetime.now() + timedelta(
seconds=exc.retry_after + timeout)
elif datetime.now() >= end:
raise
else:
time.sleep(current_delay)
current_delay *= backoff
return retry_loop
return decorator
|
|
from safemdp.grid_world import *
from osgeo import gdal
from scipy import interpolate
import numpy as np
import os
import matplotlib.pyplot as plt
import GPy
__all__ = ['mars_map', 'initialize_SafeMDP_object', 'performance_metrics']
def mars_map(plot_map=False, interpolation=False):
"""
Extract the map for the simulation from the HiRISE data. If the HiRISE
data is not in the current folder it will be downloaded and converted to
GeoTiff extension with gdal.
Parameters
----------
plot_map: bool
If true plots the map that will be used for exploration
interpolation: bool
If true the data of the map will be interpolated with splines to
obtain a finer grid
Returns
-------
altitudes: np.array
1-d vector with altitudes for each node
coord: np.array
Coordinate of the map we use for exploration
world_shape: tuple
Size of the grid world (rows, columns)
step_size: tuple
Step size for the grid (row, column)
num_of_points: int
Interpolation parameter. Indicates the scaling factor for the
original step size
"""
# Define the dimension of the map we want to investigate and its resolution
world_shape = (120, 70)
step_size = (1., 1.)
# Download and convert to GEOtiff Mars data
if not os.path.exists('./mars.tif'):
if not os.path.exists("./mars.IMG"):
import urllib
print('Downloading MARS map, this make take a while...')
# Download the IMG file
urllib.urlretrieve(
"http://www.uahirise.org/PDS/DTM/PSP/ORB_010200_010299"
"/PSP_010228_1490_ESP_016320_1490"
"/DTEEC_010228_1490_016320_1490_A01.IMG", "mars.IMG")
# Convert to tif
print('Converting map to geotif...')
os.system("gdal_translate -of GTiff ./mars.IMG ./mars.tif")
print('Done')
# Read the data with gdal module
gdal.UseExceptions()
ds = gdal.Open("./mars.tif")
band = ds.GetRasterBand(1)
elevation = band.ReadAsArray()
# Extract the area of interest
startX = 2890
startY = 1955
altitudes = np.copy(elevation[startX:startX + world_shape[0],
startY:startY + world_shape[1]])
# Center the data
mean_val = (np.max(altitudes) + np.min(altitudes)) / 2.
altitudes[:] = altitudes - mean_val
# Define coordinates
n, m = world_shape
step1, step2 = step_size
xx, yy = np.meshgrid(np.linspace(0, (n - 1) * step1, n),
np.linspace(0, (m - 1) * step2, m), indexing="ij")
coord = np.vstack((xx.flatten(), yy.flatten())).T
# Interpolate data
if interpolation:
# Interpolating function
spline_interpolator = interpolate.RectBivariateSpline(
np.linspace(0, (n - 1) * step1, n),
np.linspace(0, (m - 1) * step1, m), altitudes)
# New size and resolution
num_of_points = 1
world_shape = tuple([(x - 1) * num_of_points + 1 for x in world_shape])
step_size = tuple([x / num_of_points for x in step_size])
# New coordinates and altitudes
n, m = world_shape
step1, step2 = step_size
xx, yy = np.meshgrid(np.linspace(0, (n - 1) * step1, n),
np.linspace(0, (m - 1) * step2, m), indexing="ij")
coord = np.vstack((xx.flatten(), yy.flatten())).T
altitudes = spline_interpolator(np.linspace(0, (n - 1) * step1, n),
np.linspace(0, (m - 1) * step2, m))
else:
num_of_points = 1
# Plot area
if plot_map:
plt.imshow(altitudes.T, origin="lower", interpolation="nearest")
plt.colorbar()
plt.show()
altitudes = altitudes.flatten()
return altitudes, coord, world_shape, step_size, num_of_points
def initialize_SafeMDP_object(altitudes, coord, world_shape, step_size, L=0.2,
beta=2, length=14.5, sigma_n=0.075, start_x=60,
start_y=61):
"""
Parameters
----------
altitudes: np.array
1-d vector with altitudes for each node
coord: np.array
Coordinate of the map we use for exploration
world_shape: tuple
Size of the grid world (rows, columns)
step_size: tuple
Step size for the grid (row, column)
L: float
Lipschitz constant to compute expanders
beta: float
Scaling factor for confidence intervals
length: float
Lengthscale for Matern kernel
sigma_n:
Standard deviation for gaussian noise
start_x: int
x coordinate of the starting point
start_y:
y coordinate of the starting point
Returns
-------
start: int
Node number of initial state
x: SafeMDP
Instance of the SafeMDP class for the mars exploration problem
true_S_hat: np.array
True S_hat if safety feature is known with no error and h_hard is used
true_S_hat_epsilon: np.array
True S_hat if safety feature is known up to epsilon and h is used
h_hard: float
True safety thrshold. It can be different from the safety threshold
used for classification in case the agent needs to use extra caution
(in our experiments h=25 deg, h_har=30 deg)
"""
# Safety threshold
h = -np.tan(np.pi / 9. + np.pi / 36.) * step_size[0]
#Initial node
start = start_x * world_shape[1] + start_y
# Initial safe sets
S_hat0 = compute_S_hat0(start, world_shape, 4, altitudes,
step_size, h)
S0 = np.copy(S_hat0)
S0[:, 0] = True
# Initialize GP
X = coord[start, :].reshape(1, 2)
Y = altitudes[start].reshape(1, 1)
kernel = GPy.kern.Matern52(input_dim=2, lengthscale=length, variance=100.)
lik = GPy.likelihoods.Gaussian(variance=sigma_n ** 2)
gp = GPy.core.GP(X, Y, kernel, lik)
# Define SafeMDP object
x = GridWorld(gp, world_shape, step_size, beta, altitudes, h, S0,
S_hat0, L, update_dist=25)
# Add samples about actions from starting node
for i in range(5):
x.add_observation(start, 1)
x.add_observation(start, 2)
x.add_observation(start, 3)
x.add_observation(start, 4)
x.gp.set_XY(X=x.gp.X[1:, :], Y=x.gp.Y[1:, :]) # Necessary for results as in
# paper
# True safe set for false safe
h_hard = -np.tan(np.pi / 6.) * step_size[0]
true_S = compute_true_safe_set(x.world_shape, x.altitudes, h_hard)
true_S_hat = compute_true_S_hat(x.graph, true_S, x.initial_nodes)
# True safe set for completeness
epsilon = sigma_n * beta
true_S_epsilon = compute_true_safe_set(x.world_shape, x.altitudes,
x.h + epsilon)
true_S_hat_epsilon = compute_true_S_hat(x.graph, true_S_epsilon,
x.initial_nodes)
return start, x, true_S_hat, true_S_hat_epsilon, h_hard
def performance_metrics(path, x, true_S_hat_epsilon, true_S_hat, h_hard):
"""
Parameters
----------
path: np.array
Nodes of the shortest safe path
x: SafeMDP
Instance of the SafeMDP class for the mars exploration problem
true_S_hat_epsilon: np.array
True S_hat if safety feature is known up to epsilon and h is used
true_S_hat: np.array
True S_hat if safety feature is known with no error and h_hard is used
h_hard: float
True safety thrshold. It can be different from the safety threshold
used for classification in case the agent needs to use extra caution
(in our experiments h=25 deg, h_har=30 deg)
Returns
-------
unsafe_transitions: int
Number of unsafe transitions along the path
coverage: float
Percentage of coverage of true_S_hat_epsilon
false_safe: int
Number of misclassifications (classifing something as safe when it
acutally is unsafe according to h_hard )
"""
# Count unsafe transitions along the path
path_altitudes = x.altitudes[path]
unsafe_transitions = np.sum(-np.diff(path_altitudes) < h_hard)
# Coverage
max_size = float(np.count_nonzero(true_S_hat_epsilon))
coverage = 100 * np.count_nonzero(np.logical_and(x.S_hat,
true_S_hat_epsilon))/max_size
# False safe
false_safe = np.count_nonzero(np.logical_and(x.S_hat, ~true_S_hat))
return unsafe_transitions, coverage, false_safe
|
|
from datetime import timedelta
import httplib as http
from django.utils import timezone
from nose.tools import * # noqa (PEP8 asserts)
from modularodm.exceptions import KeyExistsException
from framework.auth import campaigns, views as auth_views, cas
from website.util import web_url_for
from website.project.model import ensure_schemas
from osf_tests import factories
from tests.base import OsfTestCase
from tests.utils import mock_auth
def set_preprint_providers():
"""Populate `PreprintProvider` to test database for testing."""
providers = {
'osf': 'Open Science Framework',
'socarxiv': 'SocArXiv',
'engrxiv': 'EngrXiv',
'psyarxiv': 'PsyArXiv',
}
for key, value in providers.items():
provider = factories.PreprintProviderFactory()
provider._id = key
provider.name = value
try:
provider.save()
except KeyExistsException:
continue
# tests for campaign initialization and update
class TestCampaignInitialization(OsfTestCase):
def setUp(self):
super(TestCampaignInitialization, self).setUp()
set_preprint_providers()
self.campaign_lists = [
'prereg',
'erpc',
'institution',
'osf-preprints',
'socarxiv-preprints',
'engrxiv-preprints',
'psyarxiv-preprints',
'osf-registries',
]
self.refresh = timezone.now()
campaigns.CAMPAIGNS = None # force campaign refresh now that preprint providers are populated
campaigns.CAMPAIGNS_LAST_REFRESHED = self.refresh
def test_get_campaigns_init(self):
campaign_dict = campaigns.get_campaigns()
assert_equal(len(campaign_dict), len(self.campaign_lists))
for campaign in campaign_dict:
assert_in(campaign, self.campaign_lists)
assert_not_equal(self.refresh, campaigns.CAMPAIGNS_LAST_REFRESHED)
def test_get_campaigns_update_not_expired(self):
campaigns.get_campaigns()
self.refresh = campaigns.CAMPAIGNS_LAST_REFRESHED
campaigns.get_campaigns()
assert_equal(self.refresh, campaigns.CAMPAIGNS_LAST_REFRESHED)
def test_get_campaigns_update_expired(self):
campaigns.get_campaigns()
self.refresh = timezone.now() - timedelta(minutes=5)
campaigns.CAMPAIGNS_LAST_REFRESHED = self.refresh
campaigns.get_campaigns()
assert_not_equal(self.refresh, campaigns.CAMPAIGNS_LAST_REFRESHED)
# tests for campaign helper methods
class TestCampaignMethods(OsfTestCase):
def setUp(self):
super(TestCampaignMethods, self).setUp()
set_preprint_providers()
self.campaign_lists = [
'prereg',
'erpc',
'institution',
'osf-preprints',
'socarxiv-preprints',
'engrxiv-preprints',
'psyarxiv-preprints',
]
self.invalid_campaign = 'invalid_campaign'
campaigns.CAMPAIGNS = None # force campaign refresh now that preprint providers are populated
def test_is_institution_login(self):
for campaign in self.campaign_lists:
institution = campaigns.is_institution_login(campaign)
if campaign == 'institution':
assert_true(institution)
else:
assert_false(institution)
institution = campaigns.is_institution_login(self.invalid_campaign)
assert_true(institution is None)
def test_is_native_login(self):
for campaign in self.campaign_lists:
native = campaigns.is_native_login(campaign)
if campaign == 'prereg' or campaign == 'erpc':
assert_true(native)
else:
assert_false(native)
native = campaigns.is_proxy_login(self.invalid_campaign)
assert_true(native is None)
def test_is_proxy_login(self):
for campaign in self.campaign_lists:
proxy = campaigns.is_proxy_login(campaign)
if campaign.endswith('-preprints'):
assert_true(proxy)
else:
assert_false(proxy)
proxy = campaigns.is_proxy_login(self.invalid_campaign)
assert_true(proxy is None)
def test_system_tag_for_campaign(self):
for campaign in self.campaign_lists:
tag = campaigns.system_tag_for_campaign(campaign)
assert_true(tag is not None)
tag = campaigns.system_tag_for_campaign(self.invalid_campaign)
assert_true(tag is None)
def test_email_template_for_campaign(self):
for campaign in self.campaign_lists:
template = campaigns.email_template_for_campaign(campaign)
if campaigns.is_institution_login(campaign):
assert_true(template is None)
else:
assert_true(template is not None)
template = campaigns.email_template_for_campaign(self.invalid_campaign)
assert_true(template is None)
def test_campaign_url_for(self):
for campaign in self.campaign_lists:
url = campaigns.campaign_url_for(campaign)
assert_true(url is not None)
url = campaigns.campaign_url_for(self.invalid_campaign)
assert_true(url is None)
def test_get_service_provider(self):
for campaign in self.campaign_lists:
provider = campaigns.get_service_provider(campaign)
if campaigns.is_proxy_login(campaign):
assert_true(provider is not None)
else:
assert_true(provider is None)
provider = campaigns.get_service_provider(self.invalid_campaign)
assert_true(provider is None)
def test_campaign_for_user(self):
user = factories.UserFactory()
user.add_system_tag('osf_preprints')
user.save()
campaign = campaigns.campaign_for_user(user)
assert_equal(campaign, 'osf-preprints')
# tests for prereg, erpc, which follow similar auth login/register logic
class TestCampaignsAuthViews(OsfTestCase):
def setUp(self):
super(TestCampaignsAuthViews, self).setUp()
self.campaigns = {
'prereg': {
'title_register': 'Preregistration Challenge',
'title_landing': 'Welcome to the Prereg Challenge!'
},
'erpc': {
'title_register': 'Election Research Preacceptance Competition',
'title_landing': 'The Election Research Preacceptance Competition is Now Closed'
},
}
for key, value in self.campaigns.items():
value.update({'url_login': web_url_for('auth_login', campaign=key)})
value.update({'url_register': web_url_for('auth_register', campaign=key)})
value.update({'url_landing': campaigns.campaign_url_for(key)})
self.user = factories.AuthUserFactory()
def test_campaign_register_view_logged_in(self):
for key, value in self.campaigns.items():
resp = self.app.get(value['url_register'], auth=self.user.auth)
assert_equal(resp.status_code, http.FOUND)
assert_equal(value['url_landing'], resp.headers['Location'])
def test_campaign_register_view_logged_out(self):
for key, value in self.campaigns.items():
resp = self.app.get(value['url_register'])
assert_equal(resp.status_code, http.OK)
assert_in(value['title_register'], resp)
def test_campaign_login_logged_in(self):
for key, value in self.campaigns.items():
resp = self.app.get(value['url_login'], auth=self.user.auth)
assert_equal(resp.status_code, http.FOUND)
assert_in(value['url_landing'], resp.headers['Location'])
def test_campaign_login_logged_out(self):
for key, value in self.campaigns.items():
resp = self.app.get(value['url_login'])
assert_equal(resp.status_code, http.FOUND)
assert_in(value['url_register'], resp.headers['Location'])
def test_campaign_landing_logged_in(self):
ensure_schemas()
for key, value in self.campaigns.items():
resp = self.app.get(value['url_landing'], auth=self.user.auth)
assert_equal(resp.status_code, http.OK)
assert_in(value['title_landing'], resp)
def test_auth_prereg_landing_page_logged_out(self):
for key, value in self.campaigns.items():
resp = self.app.get(value['url_landing'])
assert_equal(resp.status_code, http.FOUND)
assert_in(cas.get_login_url(value['url_landing']), resp.headers['Location'])
# tests for registration through campaigns
class TestRegistrationThroughCampaigns(OsfTestCase):
def setUp(self):
super(TestRegistrationThroughCampaigns, self).setUp()
def test_confirm_email_get_with_campaign(self):
for key, value in campaigns.CAMPAIGNS.items():
user = factories.UnconfirmedUserFactory()
user.add_system_tag(value.get('system_tag'))
user.save()
token = user.get_confirmation_token(user.username)
kwargs = {
'uid': user._id,
}
with self.app.app.test_request_context(), mock_auth(user):
res = auth_views.confirm_email_get(token, **kwargs)
assert_equal(res.status_code, http.FOUND)
assert_equal(res.location, campaigns.campaign_url_for(key))
# tests for institution
class TestCampaignsCASInstitutionLogin(OsfTestCase):
def setUp(self):
super(TestCampaignsCASInstitutionLogin, self).setUp()
self.url_login = web_url_for('auth_login', campaign='institution')
self.url_register = web_url_for('auth_register', campaign='institution')
self.service_url = web_url_for('dashboard', _absolute=True)
# go to CAS institution login page if not logged in
def test_institution_not_logged_in(self):
resp = self.app.get(self.url_login)
assert_equal(resp.status_code, http.FOUND)
assert_in(cas.get_login_url(self.service_url, campaign='institution'), resp.headers['Location'])
# register behave the same as login
resp2 = self.app.get(self.url_register)
assert_equal(resp.headers['Location'], resp2.headers['Location'])
# go to target page (service url_ if logged in
def test_institution_logged_in(self):
resp = self.app.get(self.url_login)
assert_equal(resp.status_code, http.FOUND)
assert_in(self.service_url, resp.headers['Location'])
# register behave the same as login
resp2 = self.app.get(self.url_register)
assert_equal(resp.headers['Location'], resp2.headers['Location'])
|
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import argparse
import logging
import sys
import six
from six.moves import filter
from six.moves import range
from .cluster_balancer import ClusterBalancer
from .error import BrokerDecommissionError
from .error import EmptyReplicationGroupError
from .error import InvalidBrokerIdError
from .error import InvalidPartitionError
from .error import InvalidReplicationFactorError
from .error import NotEligibleGroupError
from .error import RebalanceError
from .util import compute_optimum
from .util import separate_groups
class PartitionCountBalancer(ClusterBalancer):
"""An implementation of cluster rebalancing that tries to achieve balance
by considering the number of partitions and leaders on each broker.
:param cluster_topology: The ClusterTopology object that should be acted
on.
:param args: The program arguments.
"""
def __init__(self, cluster_topology, args):
super(PartitionCountBalancer, self).__init__(cluster_topology, args)
self.log = logging.getLogger(self.__class__.__name__)
def _set_arg_default(self, arg, value):
if not hasattr(self.args, arg):
setattr(self.args, arg, value)
def parse_args(self, balancer_args):
self._set_arg_default('replication_groups', False)
self._set_arg_default('brokers', False)
self._set_arg_default('leaders', False)
self._set_arg_default('max_partition_movements', None)
self._set_arg_default('max_movement_size', None)
self._set_arg_default('max_leader_changes', None)
parser = argparse.ArgumentParser(
prog=self.__class__.__name__,
description='Balance the cluster based on the number of partitions'
' per broker and replication-group.',
)
parser.parse_args(balancer_args, self.args)
def decommission_brokers(self, broker_ids):
"""Decommission a list of brokers trying to keep the replication group
the brokers belong to balanced.
:param broker_ids: list of string representing valid broker ids in the cluster
:raises: InvalidBrokerIdError when the id is invalid.
"""
groups = set()
for b_id in broker_ids:
try:
broker = self.cluster_topology.brokers[b_id]
except KeyError:
self.log.error("Invalid broker id %s.", b_id)
# Raise an error for now. As alternative we may ignore the
# invalid id and continue with the others.
raise InvalidBrokerIdError(
"Broker id {} does not exist in cluster".format(b_id),
)
broker.mark_decommissioned()
groups.add(broker.replication_group)
for group in groups:
self._decommission_brokers_in_group(group)
def _decommission_brokers_in_group(self, group):
"""Decommission the marked brokers of a group."""
try:
group.rebalance_brokers()
except EmptyReplicationGroupError:
self.log.warning("No active brokers left in replication group %s", group)
for broker in group.brokers:
if broker.decommissioned and not broker.empty():
# In this case we need to reassign the remaining partitions
# to other replication groups
self.log.info(
"Broker %s can't be decommissioned within the same "
"replication group %s. Moving partitions to other "
"replication groups.",
broker,
broker.replication_group,
)
self._force_broker_decommission(broker)
# Broker should be empty now
if not broker.empty():
# Decommission may be impossible if there are not enough
# brokers to redistributed the replicas.
self.log.error(
"Could not decommission broker %s. "
"Partitions %s cannot be reassigned.",
broker,
broker.partitions,
)
raise BrokerDecommissionError("Broker decommission failed.")
def _force_broker_decommission(self, broker):
available_groups = [
rg for rg in six.itervalues(self.cluster_topology.rgs)
if rg is not broker.replication_group
]
for partition in broker.partitions.copy(): # partitions set changes during loop
groups = sorted(
available_groups,
key=lambda x: x.count_replica(partition),
)
for group in groups:
self.log.debug(
"Try to move partition: %s from broker %s to "
"replication group %s",
partition,
broker,
broker.replication_group,
)
try:
group.acquire_partition(partition, broker)
break
except NotEligibleGroupError:
pass
def rebalance(self):
if self.args.max_movement_size:
self.log.error(
'--max-movement-size can not be specified for {balancer}.'
' Exiting.'.format(
balancer=self.__class__.__name__,
),
)
sys.exit(1)
if self.args.replication_groups:
self.log.info(
'Re-balancing replica-count over replication groups: %s',
', '.join(str(rg) for rg in self.cluster_topology.rgs.keys()),
)
self.rebalance_replication_groups()
if self.args.brokers:
self.log.info(
'Re-balancing partition-count across brokers: %s',
', '.join(str(e) for e in self.cluster_topology.brokers.keys()),
)
self.rebalance_brokers()
if self.args.leaders:
self.log.info(
'Re-balancing leader-count across brokers: %s',
', '.join(str(e) for e in self.cluster_topology.brokers.keys()),
)
self.rebalance_leaders()
def rebalance_replication_groups(self):
"""Rebalance partitions over replication groups.
First step involves rebalancing replica-count for each partition across
replication-groups.
Second step involves rebalancing partition-count across replication-groups
of the cluster.
"""
# Balance replicas over replication-groups for each partition
if any(b.inactive for b in six.itervalues(self.cluster_topology.brokers)):
self.log.error(
"Impossible to rebalance replication groups because of inactive "
"brokers."
)
raise RebalanceError(
"Impossible to rebalance replication groups because of inactive "
"brokers"
)
# Balance replica-count over replication-groups
self.rebalance_replicas()
# Balance partition-count over replication-groups
self._rebalance_groups_partition_cnt()
# Re-balancing partition count across brokers
def rebalance_brokers(self):
"""Rebalance partition-count across brokers within each replication-group."""
for rg in six.itervalues(self.cluster_topology.rgs):
rg.rebalance_brokers()
def revoke_leadership(self, broker_ids):
"""Revoke leadership for given brokers.
:param broker_ids: List of broker-ids whose leadership needs to be revoked.
"""
for b_id in broker_ids:
try:
broker = self.cluster_topology.brokers[b_id]
except KeyError:
self.log.error("Invalid broker id %s.", b_id)
raise InvalidBrokerIdError(
"Broker id {} does not exist in cluster".format(b_id),
)
broker.mark_revoked_leadership()
assert(len(self.cluster_topology.brokers) - len(broker_ids) > 0), "Not " \
"all brokers can be revoked for leadership"
opt_leader_cnt = len(self.cluster_topology.partitions) // (
len(self.cluster_topology.brokers) - len(broker_ids)
)
# Balanced brokers transfer leadership to their under-balanced followers
self.rebalancing_non_followers(opt_leader_cnt)
# If the broker-ids to be revoked from leadership are still leaders for any
# partitions, try to forcefully move their leadership to followers if possible
pending_brokers = [
b for b in six.itervalues(self.cluster_topology.brokers)
if b.revoked_leadership and b.count_preferred_replica() > 0
]
for b in pending_brokers:
self._force_revoke_leadership(b)
def _force_revoke_leadership(self, broker):
"""Revoke the leadership of given broker for any remaining partitions.
Algorithm:
1. Find the partitions (owned_partitions) with given broker as leader.
2. For each partition find the eligible followers.
Brokers which are not to be revoked from leadership are eligible followers.
3. Select the follower who is leader for minimum partitions.
4. Assign the selected follower as leader.
5. Notify for any pending owned_partitions whose leader cannot be changed.
This could be due to replica size 1 or eligible followers are None.
"""
owned_partitions = list(filter(
lambda p: broker is p.leader,
broker.partitions,
))
for partition in owned_partitions:
if len(partition.replicas) == 1:
self.log.error(
"Cannot be revoked leadership for broker {b} for partition {p}. Replica count: 1"
.format(p=partition, b=broker),
)
continue
eligible_followers = [
follower for follower in partition.followers
if not follower.revoked_leadership
]
if eligible_followers:
# Pick follower with least leader-count
best_fit_follower = min(
eligible_followers,
key=lambda follower: follower.count_preferred_replica(),
)
partition.swap_leader(best_fit_follower)
else:
self.log.error(
"All replicas for partition {p} on broker {b} are to be revoked for leadership.".format(
p=partition,
b=broker,
)
)
# Re-balancing leaders
def rebalance_leaders(self):
"""Re-order brokers in replicas such that, every broker is assigned as
preferred leader evenly.
"""
opt_leader_cnt = len(self.cluster_topology.partitions) // len(self.cluster_topology.brokers)
# Balanced brokers transfer leadership to their under-balanced followers
self.rebalancing_non_followers(opt_leader_cnt)
def rebalancing_non_followers(self, opt_cnt):
"""Transfer leadership to any under-balanced followers on the pretext
that they remain leader-balanced or can be recursively balanced through
non-followers (followers of other leaders).
Context:
Consider a graph G:
Nodes: Brokers (e.g. b1, b2, b3)
Edges: From b1 to b2 s.t. b1 is a leader and b2 is its follower
State of nodes:
1. Over-balanced/Optimally-balanced: (OB)
if leadership-count(broker) >= opt-count
2. Under-balanced (UB) if leadership-count(broker) < opt-count
leader-balanced: leadership-count(broker) is in [opt-count, opt-count+1]
Algorithm:
1. Use Depth-first-search algorithm to find path between
between some UB-broker to some OB-broker.
2. If path found, update UB-broker and delete path-edges (skip-partitions).
3. Continue with step-1 until all possible paths explored.
"""
# Don't include leaders if they are marked for leadership removal
under_brokers = list(filter(
lambda b: b.count_preferred_replica() < opt_cnt and not b.revoked_leadership,
six.itervalues(self.cluster_topology.brokers),
))
if under_brokers:
skip_brokers, skip_partitions = [], []
for broker in under_brokers:
skip_brokers.append(broker)
broker.request_leadership(opt_cnt, skip_brokers, skip_partitions)
over_brokers = list(filter(
lambda b: b.count_preferred_replica() > opt_cnt + 1,
six.itervalues(self.cluster_topology.brokers),
))
# Any over-balanced brokers tries to donate their leadership to followers
if over_brokers:
skip_brokers, used_edges = [], []
for broker in over_brokers:
skip_brokers.append(broker)
broker.donate_leadership(opt_cnt, skip_brokers, used_edges)
# Re-balancing partition count across brokers
def _rebalance_groups_partition_cnt(self):
"""Re-balance partition-count across replication-groups.
Algorithm:
The key constraint is not to create any replica-count imbalance while
moving partitions across replication-groups.
1) Divide replication-groups into over and under loaded groups in terms
of partition-count.
2) For each over-loaded replication-group, select eligible partitions
which can be moved to under-replicated groups. Partitions with greater
than optimum replica-count for the group have the ability to donate one
of their replicas without creating replica-count imbalance.
3) Destination replication-group is selected based on minimum partition-count
and ability to accept one of the eligible partition-replicas.
4) Source and destination brokers are selected based on :-
* their ability to donate and accept extra partition-replica respectively.
* maximum and minimum partition-counts respectively.
5) Move partition-replica from source to destination-broker.
6) Repeat steps 1) to 5) until groups are balanced or cannot be balanced further.
"""
# Segregate replication-groups based on partition-count
total_elements = sum(len(rg.partitions) for rg in six.itervalues(self.cluster_topology.rgs))
over_loaded_rgs, under_loaded_rgs = separate_groups(
list(self.cluster_topology.rgs.values()),
lambda rg: len(rg.partitions),
total_elements,
)
if over_loaded_rgs and under_loaded_rgs:
self.cluster_topology.log.info(
'Over-loaded replication-groups {over_loaded}, under-loaded '
'replication-groups {under_loaded} based on partition-count'
.format(
over_loaded=[rg.id for rg in over_loaded_rgs],
under_loaded=[rg.id for rg in under_loaded_rgs],
)
)
else:
self.cluster_topology.log.info('Replication-groups are balanced based on partition-count.')
return
# Get optimal partition-count per replication-group
opt_partition_cnt, _ = compute_optimum(
len(self.cluster_topology.rgs),
total_elements,
)
# Balance replication-groups
for over_loaded_rg in over_loaded_rgs:
for under_loaded_rg in under_loaded_rgs:
# Filter unique partition with replica-count > opt-replica-count
# in over-loaded-rgs and <= opt-replica-count in under-loaded-rgs
eligible_partitions = set(filter(
lambda partition:
over_loaded_rg.count_replica(partition) >
len(partition.replicas) // len(self.cluster_topology.rgs) and
under_loaded_rg.count_replica(partition) <=
len(partition.replicas) // len(self.cluster_topology.rgs),
over_loaded_rg.partitions,
))
# Move all possible partitions
for eligible_partition in eligible_partitions:
# The difference of partition-count b/w the over-loaded and under-loaded
# replication-groups should be greater than 1 for convergence
if len(over_loaded_rg.partitions) - len(under_loaded_rg.partitions) > 1:
over_loaded_rg.move_partition_replica(
under_loaded_rg,
eligible_partition,
)
else:
break
# Move to next replication-group if either of the groups got
# balanced, otherwise try with next eligible partition
if (len(under_loaded_rg.partitions) == opt_partition_cnt or
len(over_loaded_rg.partitions) == opt_partition_cnt):
break
if len(over_loaded_rg.partitions) == opt_partition_cnt:
# Move to next over-loaded replication-group if balanced
break
def add_replica(self, partition_name, count=1):
"""Increase the replication-factor for a partition.
The replication-group to add to is determined as follows:
1. Find all replication-groups that have brokers not already
replicating the partition.
2. Of these, find replication-groups that have fewer than the
average number of replicas for this partition.
3. Choose the replication-group with the fewest overall partitions.
:param partition_name: (topic_id, partition_id) of the partition to add
replicas of.
:param count: The number of replicas to add.
:raises InvalidReplicationFactorError when the resulting replication
factor is greater than the number of brokers in the cluster.
"""
try:
partition = self.cluster_topology.partitions[partition_name]
except KeyError:
raise InvalidPartitionError(
"Partition name {name} not found".format(name=partition_name),
)
if partition.replication_factor + count > len(self.cluster_topology.brokers):
raise InvalidReplicationFactorError(
"Cannot increase replication factor to {0}. There are only "
"{1} brokers."
.format(
partition.replication_factor + count,
len(self.cluster_topology.brokers),
)
)
non_full_rgs = [
rg
for rg in self.cluster_topology.rgs.values()
if rg.count_replica(partition) < len(rg.brokers)
]
for _ in range(count):
total_replicas = sum(
rg.count_replica(partition)
for rg in non_full_rgs
)
opt_replicas, _ = compute_optimum(
len(non_full_rgs),
total_replicas,
)
under_replicated_rgs = [
rg
for rg in non_full_rgs
if rg.count_replica(partition) < opt_replicas
]
candidate_rgs = under_replicated_rgs or non_full_rgs
rg = min(candidate_rgs, key=lambda rg: len(rg.partitions))
rg.add_replica(partition)
if rg.count_replica(partition) >= len(rg.brokers):
non_full_rgs.remove(rg)
def remove_replica(self, partition_name, osr_broker_ids, count=1):
"""Remove one replica of a partition from the cluster.
The replication-group to remove from is determined as follows:
1. Find all replication-groups that contain at least one
out-of-sync replica for this partition.
2. Of these, find replication-groups with more than the average
number of replicas of this partition.
3. Choose the replication-group with the most overall partitions.
4. Repeat steps 1-3 with in-sync replicas
After this operation, the preferred leader for this partition will
be set to the broker that leads the fewest other partitions, even if
the current preferred leader is not removed.
This is done to keep the number of preferred replicas balanced across
brokers in the cluster.
:param partition_name: (topic_id, partition_id) of the partition to
remove replicas of.
:param osr_broker_ids: A list of the partition's out-of-sync broker ids.
:param count: The number of replicas to remove.
:raises: InvalidReplicationFactorError when count is greater than the
replication factor of the partition.
"""
try:
partition = self.cluster_topology.partitions[partition_name]
except KeyError:
raise InvalidPartitionError(
"Partition name {name} not found".format(name=partition_name),
)
if partition.replication_factor <= count:
raise InvalidReplicationFactorError(
"Cannot remove {0} replicas. Replication factor is only {1}."
.format(count, partition.replication_factor)
)
osr = []
for broker_id in osr_broker_ids:
try:
osr.append(self.cluster_topology.brokers[broker_id])
except KeyError:
raise InvalidBrokerIdError(
"No broker found with id {bid}".format(bid=broker_id),
)
non_empty_rgs = [
rg
for rg in self.cluster_topology.rgs.values()
if rg.count_replica(partition) > 0
]
rgs_with_osr = [
rg
for rg in non_empty_rgs
if any(b in osr for b in rg.brokers)
]
for _ in range(count):
candidate_rgs = rgs_with_osr or non_empty_rgs
total_replicas = sum(
rg.count_replica(partition)
for rg in candidate_rgs
)
opt_replica_cnt, _ = compute_optimum(
len(candidate_rgs),
total_replicas,
)
over_replicated_rgs = [
rg
for rg in candidate_rgs
if rg.count_replica(partition) > opt_replica_cnt
]
candidate_rgs = over_replicated_rgs or candidate_rgs
rg = max(candidate_rgs, key=lambda rg: len(rg.partitions))
osr_in_rg = [b for b in rg.brokers if b in osr]
rg.remove_replica(partition, osr_in_rg)
osr = [b for b in osr if b in partition.replicas]
if rg in rgs_with_osr and len(osr_in_rg) == 1:
rgs_with_osr.remove(rg)
if rg.count_replica(partition) == 0:
non_empty_rgs.remove(rg)
new_leader = min(
partition.replicas,
key=lambda broker: broker.count_preferred_replica(),
)
partition.swap_leader(new_leader)
|
|
""" Module with various calculators """
from dynamic_dynamodb.log_handler import LOGGER as logger
from dynamic_dynamodb.config_handler import get_table_option
def get_min_provisioned_reads(current_provisioning, key_name):
""" Returns the minimum provisioned reads
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type key_name: str
:param key_name: Name of the key
:returns: int -- Minimum provisioned reads
"""
if get_table_option(key_name, 'min_provisioned_reads'):
return int(min(
get_table_option(key_name, 'min_provisioned_reads'),
(current_provisioning * 2)))
return int(current_provisioning * 2)
def get_min_provisioned_writes(current_provisioning, key_name):
""" Returns the minimum provisioned writes
:type current_provisioning: int
:param current_provisioning: The current provisioning
:returns: int -- Minimum provisioned writes
:type key_name: str
:param key_name: Name of the key
"""
if get_table_option(key_name, 'min_provisioned_writes'):
return int(min(
get_table_option(key_name, 'min_provisioned_writes'),
(current_provisioning * 2)))
return int(current_provisioning * 2)
def decrease_reads_in_percent(current_provisioning, percent, key_name):
""" Decrease the current_provisioning with percent %
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type percent: int
:param percent: How many percent should we decrease with
:type key_name: str
:param key_name: Name of the key
:returns: int -- New provisioning value
"""
decrease = int(float(current_provisioning)*(float(percent)/100))
updated_provisioning = current_provisioning - decrease
logger.debug(
'Read provisioning will be decreased to {0:d} units'.format(
updated_provisioning))
min_provisioned_reads = get_min_provisioned_reads(
current_provisioning,
key_name)
if min_provisioned_reads > 0:
if updated_provisioning < min_provisioned_reads:
logger.info('Reached provisioned reads min limit: {0:d}'.format(
min_provisioned_reads))
return min_provisioned_reads
return updated_provisioning
def increase_reads_in_percent(current_provisioning, percent, key_name):
""" Increase the current_provisioning with percent %
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type percent: int
:param percent: How many percent should we increase with
:returns: int -- New provisioning value
:type key_name: str
:param key_name: Name of the key
"""
increase = int(float(current_provisioning)*(float(percent)/100))
updated_provisioning = current_provisioning + increase
logger.debug(
'Read provisioning will be increased to {0:d} units'.format(
updated_provisioning))
if get_table_option(key_name, 'max_provisioned_reads') > 0:
if (updated_provisioning >
get_table_option(key_name, 'max_provisioned_reads')):
logger.info('Reached provisioned reads max limit: {0:d}'.format(
int(get_table_option(key_name, 'max_provisioned_reads'))))
return get_table_option(key_name, 'max_provisioned_reads')
return updated_provisioning
def decrease_writes_in_percent(current_provisioning, percent, key_name):
""" Decrease the current_provisioning with percent %
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type percent: int
:param percent: How many percent should we decrease with
:returns: int -- New provisioning value
:type key_name: str
:param key_name: Name of the key
"""
decrease = int(float(current_provisioning)*(float(percent)/100))
updated_provisioning = current_provisioning - decrease
logger.debug(
'Write provisioning will be decreased to {0:d} units'.format(
updated_provisioning))
min_provisioned_writes = get_min_provisioned_writes(
current_provisioning,
key_name)
if min_provisioned_writes > 0:
if updated_provisioning < min_provisioned_writes:
logger.info('Reached provisioned writes min limit: {0:d}'.format(
min_provisioned_writes))
return min_provisioned_writes
return updated_provisioning
def increase_writes_in_percent(current_provisioning, percent, key_name):
""" Increase the current_provisioning with percent %
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type percent: int
:param percent: How many percent should we increase with
:returns: int -- New provisioning value
:type key_name: str
:param key_name: Name of the key
"""
increase = int(float(current_provisioning)*(float(percent)/100))
updated_provisioning = current_provisioning + increase
logger.debug(
'Write provisioning will be increased to {0:d} units'.format(
updated_provisioning))
if get_table_option(key_name, 'max_provisioned_writes') > 0:
if (updated_provisioning >
get_table_option(key_name, 'max_provisioned_writes')):
logger.info('Reached provisioned writes max limit: {0:d}'.format(
int(get_table_option(key_name, 'max_provisioned_writes'))))
return get_table_option(key_name, 'max_provisioned_writes')
return updated_provisioning
def decrease_reads_in_units(current_provisioning, units, key_name):
""" Decrease the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we decrease with
:returns: int -- New provisioning value
:type key_name: str
:param key_name: Name of the key
"""
updated_provisioning = int(current_provisioning) - int(units)
logger.debug(
'Read provisioning will be decreased to {0:d} units'.format(
updated_provisioning))
min_provisioned_reads = get_min_provisioned_reads(
current_provisioning,
key_name)
if min_provisioned_reads > 0:
if updated_provisioning < min_provisioned_reads:
logger.info('Reached provisioned reads min limit: {0:d}'.format(
min_provisioned_reads))
return min_provisioned_reads
return updated_provisioning
def increase_reads_in_units(current_provisioning, units, key_name):
""" Increase the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we increase with
:returns: int -- New provisioning value
:type key_name: str
:param key_name: Name of the key
"""
updated_provisioning = 0
if int(units) > int(current_provisioning):
updated_provisioning = 2 * int(current_provisioning)
else:
updated_provisioning = int(current_provisioning) + int(units)
logger.debug(
'Read provisioning will be increased to {0:d} units'.format(
updated_provisioning))
if get_table_option(key_name, 'max_provisioned_reads') > 0:
if (updated_provisioning >
get_table_option(key_name, 'max_provisioned_reads')):
logger.info('Reached provisioned reads max limit: {0:d}'.format(
int(get_table_option(key_name, 'max_provisioned_reads'))))
return get_table_option(key_name, 'max_provisioned_reads')
return updated_provisioning
def decrease_writes_in_units(current_provisioning, units, key_name):
""" Decrease the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we decrease with
:returns: int -- New provisioning value
:type key_name: str
:param key_name: Name of the key
"""
updated_provisioning = int(current_provisioning) - int(units)
logger.debug(
'Write provisioning will be decreased to {0:d} units'.format(
updated_provisioning))
min_provisioned_writes = get_min_provisioned_writes(
current_provisioning,
key_name)
if min_provisioned_writes > 0:
if updated_provisioning < min_provisioned_writes:
logger.info('Reached provisioned writes min limit: {0:d}'.format(
min_provisioned_writes))
return min_provisioned_writes
return updated_provisioning
def increase_writes_in_units(current_provisioning, units, key_name):
""" Increase the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we increase with
:returns: int -- New provisioning value
:type key_name: str
:param key_name: Name of the key
"""
updated_provisioning = 0
if int(units) > int(current_provisioning):
updated_provisioning = 2 * int(current_provisioning)
else:
updated_provisioning = int(current_provisioning) + int(units)
logger.debug(
'Write provisioning will be increased to {0:d} units'.format(
updated_provisioning))
if get_table_option(key_name, 'max_provisioned_writes') > 0:
if (updated_provisioning >
get_table_option(key_name, 'max_provisioned_writes')):
logger.info('Reached provisioned writes max limit: {0:d}'.format(
int(get_table_option(key_name, 'max_provisioned_writes'))))
return get_table_option(key_name, 'max_provisioned_writes')
return updated_provisioning
|
|
from toontown.shtiker import ShtikerPage
from panda3d.core import TextNode, Vec4
from direct.gui.DirectGui import DirectLabel, DirectFrame, DirectButton, DirectScrolledList, DGG
from toontown.toonbase import TTLocalizer
from toontown.building import GroupTrackerGlobals
SUIT_ICON_COLORS = (Vec4(0.863, 0.776, 0.769, 1.0), Vec4(0.749, 0.776, 0.824, 1.0),
Vec4(0.749, 0.769, 0.749, 1.0), Vec4(0.843, 0.745, 0.745, 1.0))
class GroupTrackerGroup(DirectButton):
def __init__(self, parent, leaderId, leaderName, shardName, category, memberIds, memberNames, **kw):
self.leaderId = leaderId
self.leaderName = leaderName
self.shardName = shardName
self.category = category
self.memberIds = memberIds
self.memberNames = memberNames
self.playerCount = None
if parent is None:
parent = aspect2d
text = TTLocalizer.GroupTrackerCategoryToText[self.category]
optiondefs = (
('text', text, None),
('text_fg', (0.0, 0.0, 0.0, 1.0), None),
('text_align', TextNode.ALeft, None),
('text_pos', (0.0, 0.0, 0.0), None),
('text_scale', 0.05, None),
('relief', None, None)
)
self.defineoptions(kw, optiondefs)
DirectButton.__init__(self, parent)
self.initialiseoptions(GroupTrackerGroup)
self.playerCount = DirectLabel(parent=self, pos=(0.6, 0, 0), relief=None, text='', text_align=TextNode.ARight, text_scale=0.05, text_fg=(0, 0, 0, 1))
self.updatePlayerCount()
def destroy(self):
if hasattr(self, 'playerCount'):
if self.playerCount:
self.playerCount.destroy()
del self.playerCount
DirectButton.destroy(self)
def updatePlayerCount(self):
maxPlayers = GroupTrackerGlobals.CATEGORY_TO_MAX_PLAYERS[self.category]
self.playerCount['text'] = (`len(self.memberIds)` + '/' + `maxPlayers`)
def getLeaderId(self):
return self.leaderId
def getLeader(self):
return self.leaderName
def getDistrict(self):
return self.shardName
def getTitle(self):
return TTLocalizer.GroupTrackerCategoryToText[self.category]
def getCurrentPlayers(self):
return len(self.memberIds)
def getCategory(self):
return self.category
def getMaxPlayers(self):
return GroupTrackerGlobals.CATEGORY_TO_MAX_PLAYERS[self.category]
def getMemberNames(self):
return self.memberNames
def getMemberIds(self):
return self.memberIds
class GroupTrackerPlayer(DirectButton):
def __init__(self, parent, avId, name, isLeader, **kw):
self.avId = avId
self.name = name
self.isLeader = isLeader
self.leaderImage = None
if parent is None:
parent = aspect2d
text=self.getName()
optiondefs = (
('text', text, None),
('text_fg', (0.0, 0.0, 0.0, 1.0), None),
('text_align', TextNode.ALeft, None),
('text_pos', (-0.2, 0.0, 0.0), None),
('relief', None, None),
('text_scale', 0.05, None),
('command', self.loadPlayerDetails, None)
)
self.defineoptions(kw, optiondefs)
DirectButton.__init__(self, parent)
self.initialiseoptions(GroupTrackerPlayer)
boardingGroupIcons = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_status')
self.leaderButtonImage = boardingGroupIcons.find('**/tt_t_gui_brd_statusLeader')
self.leaderImage = DirectButton(parent=self, relief=None, state=DGG.DISABLED, image=(self.leaderButtonImage), image_scale=(0.06, 1.0, 0.06), pos=(-0.26, 0, 0.02), command=None)
self.setLeaderStatus(self.isLeader)
boardingGroupIcons.removeNode()
def destroy(self):
if hasattr(self, 'playerCount'):
if self.leaderImage:
self.leaderImage.destroy()
del self.leaderImage
DirectButton.destroy(self)
def setLeaderStatus(self, isLeader):
self.isLeader = isLeader
if self.isLeader:
self.leaderImage.show()
if not self.isLeader:
self.leaderImage.hide()
def getLeader(self):
return self.isLeader
def getName(self):
# Lets cap a length so we dont have too long of names
name = self.name
if len(name) > 15:
name = name[:16] + '...' # Chop the first x characters
return name
def getId(self):
return self.avId
def loadPlayerDetails(self):
# TODO: Load player details based off avId for localAvatar
pass
class GroupTrackerPage(ShtikerPage.ShtikerPage):
notify = directNotify.newCategory('GroupTrackerPage')
def __init__(self):
ShtikerPage.ShtikerPage.__init__(self)
self.groupWidgets = []
self.playerWidgets = []
self.images = [] # image nodes: Possible images to apply on groups
self.scrollList = None # DirectScrolledList: Holds the GroupTrackerGroup widgets
self.scrollTitle = None # DirectLabel: Title of the list that holds the groups
self.playerList = None # DirectScrolledList: Holds players when showing a specific group details
self.playerListTitle = None # DirectLabel: Title of the playerList
self.groupInfoTitle = None # DirectLabel: holds the group detail title to show on the right
self.groupInfoDistrict = None # DirectLabel: shows group detail district on the right
self.statusMessage = None # DirectLabel: Shows important messages like Loading... or "No boarding groups available"
self.groupIcon = None # DirectButton: Icon to associate with the group ex. sellbot icon or cashbot icon depending on group info
self.wantGroupToggle = None # DirectButton: Allows the toon to toggle his listing
def load(self):
self.listXorigin = -0.02
self.listFrameSizeX = 0.67
self.listZorigin = -0.96
self.listFrameSizeZ = 1.04
self.arrowButtonScale = 1.3
self.itemFrameXorigin = -0.237
self.itemFrameZorigin = 0.365
self.buttonXstart = self.itemFrameXorigin + 0.293
self.gui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
guiButton = loader.loadModel('phase_3/models/gui/quit_button')
self.scrollList = DirectScrolledList(parent=self,
relief=None,
pos=(-0.5, 0, 0),
incButton_image=(self.gui.find('**/FndsLst_ScrollUp'),
self.gui.find('**/FndsLst_ScrollDN'),
self.gui.find('**/FndsLst_ScrollUp_Rllvr'),
self.gui.find('**/FndsLst_ScrollUp')
),
incButton_relief=None,
incButton_scale=(self.arrowButtonScale, self.arrowButtonScale, -self.arrowButtonScale),
incButton_pos=(self.buttonXstart, 0, self.itemFrameZorigin - 0.999),
incButton_image3_color=Vec4(1, 1, 1, 0.2),
decButton_image=(self.gui.find('**/FndsLst_ScrollUp'),
self.gui.find('**/FndsLst_ScrollDN'),
self.gui.find('**/FndsLst_ScrollUp_Rllvr'),
self.gui.find('**/FndsLst_ScrollUp')
),
decButton_relief=None,
decButton_scale=(self.arrowButtonScale, self.arrowButtonScale, self.arrowButtonScale),
decButton_pos=(self.buttonXstart, 0, self.itemFrameZorigin + 0.227),
decButton_image3_color=Vec4(1, 1, 1, 0.2),
itemFrame_pos=(self.itemFrameXorigin, 0, self.itemFrameZorigin),
itemFrame_scale=1.0,
itemFrame_relief=DGG.SUNKEN,
itemFrame_frameSize=(self.listXorigin, self.listXorigin + self.listFrameSizeX,
self.listZorigin, self.listZorigin + self.listFrameSizeZ
),
itemFrame_frameColor=(0.85, 0.95, 1, 1),
itemFrame_borderWidth=(0.01, 0.01),
numItemsVisible=15,
forceHeight=0.065,
items=self.groupWidgets
)
self.scrollTitle = DirectFrame(parent=self.scrollList,
text=TTLocalizer.GroupTrackerListTitle,
text_scale=0.06,
text_align=TextNode.ACenter,
relief=None,
pos=(self.buttonXstart, 0, self.itemFrameZorigin + 0.127)
)
self.playerList = DirectScrolledList(parent=self,
relief=None,
pos=(0.45, 0, 0.1),
incButton_image=(self.gui.find('**/FndsLst_ScrollUp'),
self.gui.find('**/FndsLst_ScrollDN'),
self.gui.find('**/FndsLst_ScrollUp_Rllvr'),
self.gui.find('**/FndsLst_ScrollUp')
),
incButton_relief=None,
incButton_scale=(1.0, 1.0, -1.0),
incButton_pos=(0, 0, -0.28),
incButton_image3_color=Vec4(1, 1, 1, 0.05),
decButton_image=(self.gui.find('**/FndsLst_ScrollUp'),
self.gui.find('**/FndsLst_ScrollDN'),
self.gui.find('**/FndsLst_ScrollUp_Rllvr'),
self.gui.find('**/FndsLst_ScrollUp')
),
decButton_relief=None,
decButton_scale=(1.0, 1.0, 1.0),
decButton_pos=(0.0, 0, 0.04),
decButton_image3_color=Vec4(1, 1, 1, 0.25),
itemFrame_pos=(0, 0, -0.05),
itemFrame_scale=1.0,
itemFrame_relief=DGG.SUNKEN,
itemFrame_frameSize=(-0.3, 0.3, #x
-0.2, 0.06), #z
itemFrame_frameColor=(0.85, 0.95, 1, 1),
itemFrame_borderWidth=(0.01, 0.01),
numItemsVisible=4,
forceHeight=0.05,
items=self.playerWidgets
)
self.playerListTitle = DirectFrame(parent=self.playerList,
text='',
text_scale=0.05,
text_align=TextNode.ACenter,
relief=None,
pos=(0, 0, 0.08)
)
self.groupInfoTitle = DirectLabel(parent=self, text='',
text_scale=0.080, text_align=TextNode.ACenter,
text_wordwrap=15, relief=None, pos=(0.45, 0, 0.5))
self.groupInfoDistrict = DirectLabel(parent=self,
text='',
text_scale=0.050,
text_align=TextNode.ACenter,
text_wordwrap=15,
relief=None,
pos=(0.45, 0, 0.4)
)
self.statusMessage = DirectLabel(parent=self, text='', text_scale=0.060, text_align=TextNode.ACenter, text_wordwrap=5, relief=None, pos=(0.45,0,0.1))
# Group Image:
self.groupIcon = DirectButton(parent=self, relief=None, state=DGG.DISABLED, image=None, image_scale=(0.35, 1, 0.35), image_color=Vec4(1.0, 1.0, 1.0, 0.75), pos=(0.45, 10, -0.45), command=self.doNothing)
# Group Toggle:
self.wantGroupToggle = DirectButton(parent=self, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale=(0.7, 1, 1), text='', text_scale=0.052, text_pos=(0, -0.02), pos=(0.2, 0, -0.65), command=self.toggleWantGroup)
self.updateWantGroupButton()
# Loading possible group icons
suitIcons = loader.loadModel('phase_3/models/gui/cog_icons')
bossbotIcon = suitIcons.find('**/CorpIcon')
bossbotIcon.setColor(SUIT_ICON_COLORS[0])
self.images.append(bossbotIcon)
lawbotIcon = suitIcons.find('**/LegalIcon')
lawbotIcon.setColor(SUIT_ICON_COLORS[1])
self.images.append(lawbotIcon)
cashbotIcon = suitIcons.find('**/MoneyIcon')
cashbotIcon.setColor(SUIT_ICON_COLORS[2])
self.images.append(cashbotIcon)
sellbotIcon = suitIcons.find('**/SalesIcon')
sellbotIcon.setColor(SUIT_ICON_COLORS[3])
self.images.append(sellbotIcon)
# Clean up
self.clearGroupInfo()
self.statusMessage.hide()
suitIcons.removeNode()
self.gui.removeNode()
guiButton.removeNode()
self.accept('GroupTrackerResponse', self.updatePage)
def unload(self):
self.scrollList.destroy()
self.groupInfoDistrict.destroy()
self.playerList.destroy()
self.groupInfoTitle.destroy()
self.groupIcon.destroy()
self.wantGroupToggle.destroy()
for widget in self.playerWidgets:
widget.destroy()
for widget in self.groupWidgets:
widget.destroy()
self.playerWidgets = []
del self.scrollList
del self.groupInfoDistrict
del self.playerList
del self.groupInfoTitle
del self.groupIcon
del self.wantGroupToggle
ShtikerPage.ShtikerPage.unload(self)
def enter(self):
ShtikerPage.ShtikerPage.enter(self)
self.setGroups([]) # CLEAR IT ALL
self.setPlayers() # CLEAR IT ALL
if(self.scrollList['items'] == []):
self.statusMessage['text'] = TTLocalizer.GroupTrackerLoading
self.statusMessage.show()
base.cr.globalGroupTracker.requestGroups()
taskMgr.doMethodLater(3, self.displayNoGroupsTaskHandler, self.uniqueName('timeout'))
def displayNoGroups(self):
self.statusMessage['text'] = TTLocalizer.GroupTrackerEmpty
self.statusMessage.show()
self.clearGroupInfo()
def displayNoGroupsTaskHandler(self, task):
self.displayNoGroups()
return task.done
def updatePage(self):
taskMgr.remove(self.uniqueName('timeout'))
groups = base.cr.globalGroupTracker.getGroupInfo()
self.setGroups(groups)
def exit(self):
self.clearGroupInfo()
ShtikerPage.ShtikerPage.exit(self)
base.cr.globalGroupTracker.doneRequesting()
def updateGroupInfoEventHandle(self, groupWidget, mouseEvent):
self.updateGroupInfo(groupWidget)
def updateGroupInfo(self, groupWidget):
''' Updates the Right Page of the Group Tracker Page with new Info '''
self.statusMessage.hide()
# Update the Player List
self.setPlayers(groupWidget)
self.playerList.show()
# Update the Player List Title
self.playerListTitle['text'] = ('Players ' + str(groupWidget.getCurrentPlayers()) + '/' + str(groupWidget.getMaxPlayers()) + ':')
self.playerListTitle.show()
# Update the District
self.groupInfoDistrict['text'] = TTLocalizer.BoardingGroupDistrictInformation % { 'district' : groupWidget.getDistrict() }
self.groupInfoDistrict.show()
# Update the Title
self.groupInfoTitle['text'] = groupWidget.getTitle()
self.groupInfoTitle.show()
# Update the Image
self.groupIcon['image'] = self.images[GroupTrackerGlobals.CATEGORY_TO_IMAGE_ID[groupWidget.getCategory()]]
self.groupIcon['image_scale'] = (0.35, 1, 0.35)
self.groupIcon.show()
def clearGroupInfo(self):
self.playerList.hide()
self.playerListTitle.hide()
self.groupInfoDistrict.hide()
self.groupInfoTitle.hide()
self.groupIcon.hide()
def setPlayers(self, groupWidget=None):
''' Calls updatePlayerList '''
# Clear the Widgets that were held in the listings
for playerWidget in self.playerWidgets:
playerWidget.destroy()
self.playerWidgets = []
# Make a player widget for each player
# TODO: Edit this stuff when avIds come from players
if groupWidget:
leaderId = groupWidget.getLeaderId()
playerNames = groupWidget.getMemberNames()
playerIds = groupWidget.getMemberIds()
for playerName in playerNames:
playerId = playerIds[playerNames.index(playerName)]
isLeader = playerId == leaderId
self.playerWidgets.append(GroupTrackerPlayer(parent=self, avId=playerId, name=playerName, isLeader=isLeader))
self.updatePlayerList()
def reconsiderGroupInfo(self, groupWidget):
''' If someone is viewing this info and it was updated, we also want to update the info being viewed '''
if self.playerWidgets is None or self.playerList['items'] == []:
return # No Info is being viewed at the moment since you cant have an empty group
# We have to update if this group's leader is the leader in the playerlist being viewed right now
leaderId = groupWidget.getLeaderId()
# Check all the players in the playerList being viewed for the same leader
for playerWidget in self.playerWidgets:
if playerWidget.getLeader():
if leaderId == playerWidget.getId():
self.updateGroupInfo(groupWidget)
return False
return True
def setGroups(self, groups):
''' Calls updateGroupList '''
# Clear our Group Widgets
for group in self.groupWidgets:
group.destroy()
self.groupWidgets = []
wantReconsiderInfo = True
# Create a new group widget for each group
for group in groups:
if not group[GroupTrackerGlobals.SHOW] or len(group[GroupTrackerGlobals.MEMBER_IDS]) == 0:
continue # We are using this to see if this group is dead or if someone doesnt want it up
leaderId = 0
for i, g in base.cr.globalGroupTracker.leader2Group.items():
if g == group:
leaderId = i
if not leaderId:
continue
leaderName = group[GroupTrackerGlobals.LEADER_NAME]
shardName = group[GroupTrackerGlobals.SHARD_NAME]
category = group[GroupTrackerGlobals.CATEGORY]
memberIds = group[GroupTrackerGlobals.MEMBER_IDS]
memberNames = group[GroupTrackerGlobals.MEMBER_NAMES]
groupWidget = GroupTrackerGroup(parent=self, leaderId=leaderId, leaderName=leaderName, shardName=shardName, category=category, memberIds=memberIds, memberNames=memberNames)
groupWidget.bind(DGG.WITHIN, self.updateGroupInfoEventHandle, extraArgs=[groupWidget])
self.groupWidgets.append(groupWidget)
if wantReconsiderInfo:
wantReconsiderInfo = self.reconsiderGroupInfo(groupWidget)
# Edge case where a group that was removed, info might remain on the screen if it didn't exist any more
if wantReconsiderInfo:
self.clearGroupInfo()
# There are no groups, hide the information
if len(self.groupWidgets) == 0:
self.displayNoGroups()
self.updateGroupList()
def updateGroupList(self):
self.statusMessage.hide()
if self.scrollList is None:
return
# Clear the Group Listing
for item in self.scrollList['items']:
if item:
self.scrollList.removeItem(item, refresh=True)
self.scrollList['items'] = []
# Re-populate the Group Listing
for groupWidget in self.groupWidgets:
self.scrollList.addItem(groupWidget, refresh=True)
if len(self.groupWidgets) == 0:
self.displayNoGroups()
def updatePlayerList(self):
if self.playerList is None:
return
# Clear the Player Listing
for item in self.playerList['items']:
if item:
self.playerList.removeItem(item)
self.playerList['items'] = []
# Re-Populate the List
for playerWidget in self.playerWidgets:
self.playerList.addItem(playerWidget)
def toggleWantGroup(self):
if settings.get('grouptracker', False):
settings['grouptracker'] = False
base.cr.globalGroupTracker.showMe(False)
else:
settings['grouptracker'] = True
base.cr.globalGroupTracker.showMe(True)
base.localAvatar.wantGroupTracker()
base.localAvatar.wantGroupTracker() # Updates the ai toon so the boarding group AI could know what he wants
self.updateWantGroupButton()
def updateWantGroupButton(self):
if settings.get('grouptracker', False):
self.wantGroupToggle['text'] = 'Hide Me'
else:
self.wantGroupToggle['text'] = 'Show Me'
def doNothing(self):
pass
|
|
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import datetime
import marathon
import mock
from paasta_tools import bounce_lib
from paasta_tools import utils
class TestBounceLib:
def fake_system_paasta_config(self):
return utils.SystemPaastaConfig({"synapse_port": 123456}, "/fake/configs")
def test_bounce_lock(self):
import fcntl
lock_name = 'the_internet'
lock_file = '/var/lock/%s.lock' % lock_name
fake_fd = mock.MagicMock(spec=file)
with contextlib.nested(
mock.patch('paasta_tools.bounce_lib.open', create=True, return_value=fake_fd),
mock.patch('fcntl.lockf'),
mock.patch('os.remove')
) as (
open_patch,
lockf_patch,
remove_patch
):
with bounce_lib.bounce_lock(lock_name):
pass
open_patch.assert_called_once_with(lock_file, 'w')
lockf_patch.assert_called_once_with(fake_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
fake_fd.close.assert_called_once_with()
remove_patch.assert_called_once_with(lock_file)
def test_bounce_lock_zookeeper(self):
lock_name = 'watermelon'
fake_lock = mock.Mock()
fake_zk = mock.MagicMock(Lock=mock.Mock(return_value=fake_lock))
fake_zk_hosts = 'awjti42ior'
with contextlib.nested(
mock.patch('paasta_tools.bounce_lib.KazooClient', return_value=fake_zk, autospec=True),
mock.patch(
'paasta_tools.bounce_lib.load_system_paasta_config',
return_value=mock.Mock(
get_zk_hosts=lambda: fake_zk_hosts
),
autospec=True,
),
) as (
client_patch,
hosts_patch,
):
with bounce_lib.bounce_lock_zookeeper(lock_name):
pass
hosts_patch.assert_called_once_with()
client_patch.assert_called_once_with(hosts=fake_zk_hosts,
timeout=bounce_lib.ZK_LOCK_CONNECT_TIMEOUT_S)
fake_zk.start.assert_called_once_with()
fake_zk.Lock.assert_called_once_with('%s/%s' % (bounce_lib.ZK_LOCK_PATH, lock_name))
fake_lock.acquire.assert_called_once_with(timeout=1)
fake_lock.release.assert_called_once_with()
fake_zk.stop.assert_called_once_with()
def test_create_marathon_app(self):
marathon_client_mock = mock.create_autospec(marathon.MarathonClient)
fake_client = marathon_client_mock
fake_config = {'id': 'fake_creation'}
with contextlib.nested(
mock.patch('paasta_tools.bounce_lib.create_app_lock', spec=contextlib.contextmanager),
mock.patch('paasta_tools.bounce_lib.wait_for_create'),
) as (
lock_patch,
wait_patch,
):
bounce_lib.create_marathon_app('fake_creation', fake_config, fake_client)
assert lock_patch.called
assert fake_client.create_app.call_count == 1
actual_call_args = fake_client.create_app.call_args
actual_config = actual_call_args[0][1]
assert actual_config.id == 'fake_creation'
wait_patch.assert_called_once_with(fake_config['id'], fake_client)
def test_delete_marathon_app(self):
fake_client = mock.Mock(delete_app=mock.Mock())
fake_id = 'fake_deletion'
with contextlib.nested(
mock.patch('paasta_tools.bounce_lib.create_app_lock', spec=contextlib.contextmanager),
mock.patch('paasta_tools.bounce_lib.wait_for_delete'),
mock.patch('time.sleep')
) as (
lock_patch,
wait_patch,
sleep_patch
):
bounce_lib.delete_marathon_app(fake_id, fake_client)
fake_client.scale_app.assert_called_once_with(fake_id, instances=0, force=True)
fake_client.delete_app.assert_called_once_with(fake_id, force=True)
sleep_patch.assert_called_once_with(1)
wait_patch.assert_called_once_with(fake_id, fake_client)
assert lock_patch.called
def test_kill_old_ids(self):
old_ids = ['mmm.whatcha.say', 'that.you', 'only.meant.well']
fake_client = mock.MagicMock()
with mock.patch('paasta_tools.bounce_lib.delete_marathon_app') as delete_patch:
bounce_lib.kill_old_ids(old_ids, fake_client)
for old_id in old_ids:
delete_patch.assert_any_call(old_id, fake_client)
assert delete_patch.call_count == len(old_ids)
def test_wait_for_create_slow(self):
fake_id = 'my_created'
fake_client = mock.Mock(spec='paasta_tools.setup_marathon_job.MarathonClient')
fake_is_app_running_values = [False, False, True]
with contextlib.nested(
mock.patch('paasta_tools.marathon_tools.is_app_id_running'),
mock.patch('time.sleep'),
) as (
is_app_id_running_patch,
sleep_patch,
):
is_app_id_running_patch.side_effect = fake_is_app_running_values
bounce_lib.wait_for_create(fake_id, fake_client)
assert sleep_patch.call_count == 2
assert is_app_id_running_patch.call_count == 3
def test_wait_for_create_fast(self):
fake_id = 'my_created'
fake_client = mock.Mock(spec='paasta_tools.setup_marathon_job.MarathonClient')
fake_is_app_running_values = [True]
with contextlib.nested(
mock.patch('paasta_tools.marathon_tools.is_app_id_running'),
mock.patch('time.sleep'),
) as (
is_app_id_running_patch,
sleep_patch,
):
is_app_id_running_patch.side_effect = fake_is_app_running_values
bounce_lib.wait_for_create(fake_id, fake_client)
assert sleep_patch.call_count == 0
assert is_app_id_running_patch.call_count == 1
def test_wait_for_delete_slow(self):
fake_id = 'my_deleted'
fake_client = mock.Mock(spec='paasta_tools.setup_marathon_job.MarathonClient')
fake_is_app_running_values = [True, True, False]
with contextlib.nested(
mock.patch('paasta_tools.marathon_tools.is_app_id_running'),
mock.patch('time.sleep'),
) as (
is_app_id_running_patch,
sleep_patch,
):
is_app_id_running_patch.side_effect = fake_is_app_running_values
bounce_lib.wait_for_delete(fake_id, fake_client)
assert sleep_patch.call_count == 2
assert is_app_id_running_patch.call_count == 3
def test_wait_for_delete_fast(self):
fake_id = 'my_deleted'
fake_client = mock.Mock(spec='paasta_tools.setup_marathon_job.MarathonClient')
fake_is_app_running_values = [False]
with contextlib.nested(
mock.patch('paasta_tools.marathon_tools.is_app_id_running'),
mock.patch('time.sleep'),
) as (
is_app_id_running_patch,
sleep_patch,
):
is_app_id_running_patch.side_effect = fake_is_app_running_values
bounce_lib.wait_for_delete(fake_id, fake_client)
assert sleep_patch.call_count == 0
assert is_app_id_running_patch.call_count == 1
def test_get_bounce_method_func(self):
actual = bounce_lib.get_bounce_method_func('brutal')
expected = bounce_lib.brutal_bounce
assert actual == expected
def test_get_happy_tasks_when_running_without_healthchecks_defined(self):
"""All running tasks with no health checks results are healthy if the app does not define healthchecks"""
tasks = [mock.Mock(health_check_results=[]) for _ in xrange(5)]
fake_app = mock.Mock(tasks=tasks, health_checks=[])
assert bounce_lib.get_happy_tasks(fake_app, 'service', 'namespace', self.fake_system_paasta_config()) == tasks
def test_get_happy_tasks_when_running_with_healthchecks_defined(self):
"""All running tasks with no health check results are unhealthy if the app defines healthchecks"""
tasks = [mock.Mock(health_check_results=[]) for _ in xrange(5)]
fake_app = mock.Mock(tasks=tasks, health_checks=["fake_healthcheck_definition"])
assert bounce_lib.get_happy_tasks(fake_app, 'service', 'namespace', self.fake_system_paasta_config()) == []
def test_get_happy_tasks_when_all_healthy(self):
"""All tasks with only passing healthchecks should be happy"""
tasks = [mock.Mock(health_check_results=[mock.Mock(alive=True)]) for _ in xrange(5)]
fake_app = mock.Mock(tasks=tasks, health_checks=[])
assert bounce_lib.get_happy_tasks(fake_app, 'service', 'namespace', self.fake_system_paasta_config()) == tasks
def test_get_happy_tasks_when_some_unhealthy(self):
"""Only tasks with a passing healthcheck should be happy"""
fake_failing_healthcheck_results = [mock.Mock(alive=False)]
fake_successful_healthcheck_results = [mock.Mock(alive=True)]
tasks = [mock.Mock(health_check_results=fake_failing_healthcheck_results),
mock.Mock(health_check_results=fake_failing_healthcheck_results),
mock.Mock(health_check_results=fake_successful_healthcheck_results)]
fake_app = mock.Mock(tasks=tasks, health_checks=[])
actual = bounce_lib.get_happy_tasks(fake_app, 'service', 'namespace', self.fake_system_paasta_config())
expected = tasks[-1:]
assert actual == expected
def test_get_happy_tasks_with_multiple_healthchecks_success(self):
"""All tasks with at least one passing healthcheck should be happy"""
fake_successful_healthcheck_results = [mock.Mock(alive=True), mock.Mock(alive=False)]
tasks = [mock.Mock(health_check_results=fake_successful_healthcheck_results)]
fake_app = mock.Mock(tasks=tasks, health_checks=[])
assert bounce_lib.get_happy_tasks(fake_app, 'service', 'namespace', self.fake_system_paasta_config()) == tasks
def test_get_happy_tasks_with_multiple_healthchecks_fail(self):
"""Only tasks with at least one passing healthcheck should be happy"""
fake_successful_healthcheck_results = [mock.Mock(alive=False), mock.Mock(alive=False)]
tasks = [mock.Mock(health_check_results=fake_successful_healthcheck_results)]
fake_app = mock.Mock(tasks=tasks, health_checks=[])
assert bounce_lib.get_happy_tasks(fake_app, 'service', 'namespace', self.fake_system_paasta_config()) == []
def test_get_happy_tasks_min_task_uptime(self):
"""If we specify a minimum task age, tasks newer than that should not be considered happy."""
now = datetime.datetime(2000, 1, 1, 0, 0, 0)
tasks = [mock.Mock(health_check_results=[], started_at=(now - datetime.timedelta(minutes=i)))
for i in xrange(5)]
fake_app = mock.Mock(tasks=tasks, health_checks=[])
# I would have just mocked datetime.datetime.utcnow, but that's apparently difficult; I have to mock
# datetime.datetime instead, and give it a utcnow attribute.
with mock.patch('paasta_tools.bounce_lib.datetime.datetime', utcnow=lambda: now, autospec=True):
actual = bounce_lib.get_happy_tasks(fake_app, 'service', 'namespace', self.fake_system_paasta_config(),
min_task_uptime=121)
expected = tasks[3:]
assert actual == expected
def test_get_happy_tasks_min_task_uptime_when_unhealthy(self):
"""If we specify a minimum task age, tasks newer than that should not be considered happy."""
now = datetime.datetime(2000, 1, 1, 0, 0, 0)
tasks = [mock.Mock(health_check_results=[mock.Mock(alive=False)],
started_at=(now - datetime.timedelta(minutes=i)))
for i in xrange(5)]
fake_app = mock.Mock(tasks=tasks, health_checks=[])
with mock.patch('paasta_tools.bounce_lib.datetime.datetime', utcnow=lambda: now, autospec=True):
actual = bounce_lib.get_happy_tasks(fake_app, 'service', 'namespace', self.fake_system_paasta_config(),
min_task_uptime=121)
expected = []
assert actual == expected
def test_get_happy_tasks_check_haproxy(self):
"""If we specify that a task should be in haproxy, don't call it happy unless it's in haproxy."""
tasks = [mock.Mock(health_check_results=[mock.Mock(alive=True)]) for i in xrange(5)]
fake_app = mock.Mock(tasks=tasks, health_checks=[])
with contextlib.nested(
mock.patch('paasta_tools.bounce_lib.get_registered_marathon_tasks', return_value=tasks[2:], autospec=True),
mock.patch('paasta_tools.mesos_tools.get_mesos_slaves_grouped_by_attribute',
return_value={'fake_region': ['fake_host']}, autospec=True),
) as (
_,
get_mesos_slaves_grouped_by_attribute_patch,
):
actual = bounce_lib.get_happy_tasks(fake_app, 'service', 'namespace', self.fake_system_paasta_config(),
check_haproxy=True)
expected = tasks[2:]
assert actual == expected
def test_get_happy_tasks_check_haproxy_when_unhealthy(self):
"""If we specify that a task should be in haproxy, don't call it happy unless it's in haproxy."""
tasks = [mock.Mock(health_check_results=[mock.Mock(alive=False)]) for i in xrange(5)]
fake_app = mock.Mock(tasks=tasks, health_checks=[])
with contextlib.nested(
mock.patch('paasta_tools.bounce_lib.get_registered_marathon_tasks', return_value=tasks[2:], autospec=True),
mock.patch('paasta_tools.mesos_tools.get_mesos_slaves_grouped_by_attribute',
return_value={'fake_region': ['fake_host']}, autospec=True),
) as (
_,
get_mesos_slaves_grouped_by_attribute_patch,
):
actual = bounce_lib.get_happy_tasks(fake_app, 'service', 'namespace', self.fake_system_paasta_config(),
check_haproxy=True)
expected = []
assert actual == expected
def test_get_happy_tasks_check_haproxy_multiple_locations(self):
"""If we specify that a task should be in haproxy, don't call it happy unless it's in haproxy."""
tasks = [mock.Mock(health_check_results=[mock.Mock(alive=True)]) for i in xrange(5)]
fake_app = mock.Mock(tasks=tasks, health_checks=[])
with contextlib.nested(
mock.patch(
'paasta_tools.bounce_lib.get_registered_marathon_tasks',
side_effect=[tasks[2:3], tasks[3:]], autospec=True,
),
mock.patch('paasta_tools.mesos_tools.get_mesos_slaves_grouped_by_attribute', autospec=True),
) as (
get_registered_marathon_tasks_patch,
get_mesos_slaves_grouped_by_attribute_patch,
):
get_mesos_slaves_grouped_by_attribute_patch.return_value = {
'fake_region': ['fake_host1'],
'fake_other_region': ['fake_host2'],
}
actual = bounce_lib.get_happy_tasks(fake_app, 'service', 'namespace', self.fake_system_paasta_config(),
check_haproxy=True)
expected = tasks[2:]
assert actual == expected
get_registered_marathon_tasks_patch.assert_any_call(
'fake_host1',
123456,
utils.DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
'service.namespace',
tasks,
)
get_registered_marathon_tasks_patch.assert_any_call(
'fake_host2',
123456,
utils.DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
'service.namespace',
tasks,
)
def test_flatten_tasks(self):
"""Simple check of flatten_tasks."""
all_tasks = [mock.Mock(task_id='id_%d' % i) for i in range(10)]
expected = set(all_tasks)
actual = bounce_lib.flatten_tasks({
'app_id_1': set(all_tasks[:5]),
'app_id_2': set(all_tasks[5:])
})
assert actual == expected
class TestBrutalBounce:
def test_brutal_bounce_no_existing_apps(self):
"""When marathon is unaware of a service, brutal bounce should try to
create a marathon app."""
new_config = {'id': 'foo.bar.12345'}
happy_tasks = []
assert bounce_lib.brutal_bounce(
new_config=new_config,
new_app_running=False,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks={},
old_app_live_unhappy_tasks={},
) == {
"create_app": True,
"tasks_to_drain": set(),
}
def test_brutal_bounce_done(self):
"""When marathon has the desired app, and there are no other copies of
the service running, brutal bounce should neither start nor stop
anything."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = [mock.Mock() for _ in xrange(5)]
assert bounce_lib.brutal_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks={},
old_app_live_unhappy_tasks={},
) == {
"create_app": False,
"tasks_to_drain": set(),
}
def test_brutal_bounce_mid_bounce(self):
"""When marathon has the desired app, but there are other copies of
the service running, brutal bounce should stop the old ones."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = [mock.Mock() for _ in xrange(5)]
old_app_live_happy_tasks = {
'app1': set(mock.Mock() for _ in xrange(3)),
'app2': set(mock.Mock() for _ in xrange(2)),
}
old_app_live_unhappy_tasks = {
'app1': set(mock.Mock() for _ in xrange(2)),
'app2': set(mock.Mock() for _ in xrange(3)),
}
assert bounce_lib.brutal_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
) == {
"create_app": False,
"tasks_to_drain": set.union(
old_app_live_happy_tasks['app1'],
old_app_live_happy_tasks['app2'],
old_app_live_unhappy_tasks['app1'],
old_app_live_unhappy_tasks['app2'],
),
}
def test_brutal_bounce_old_but_no_new(self):
"""When marathon does not have the desired app, but there are other copies of
the service running, brutal bounce should stop the old ones and start
the new one."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
old_app_live_happy_tasks = {
'app1': set(mock.Mock() for _ in xrange(3)),
'app2': set(mock.Mock() for _ in xrange(2)),
}
old_app_live_unhappy_tasks = {
'app1': set(mock.Mock() for _ in xrange(2)),
'app2': set(mock.Mock() for _ in xrange(3)),
}
assert bounce_lib.brutal_bounce(
new_config=new_config,
new_app_running=False,
happy_new_tasks=[],
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
) == {
"create_app": True,
"tasks_to_drain": set.union(
old_app_live_happy_tasks['app1'],
old_app_live_happy_tasks['app2'],
old_app_live_unhappy_tasks['app1'],
old_app_live_unhappy_tasks['app2'],
),
}
class TestUpthendownBounce:
def test_upthendown_bounce_no_existing_apps(self):
"""When marathon is unaware of a service, upthendown bounce should try to
create a marathon app."""
new_config = {'id': 'foo.bar.12345'}
happy_tasks = []
assert bounce_lib.upthendown_bounce(
new_config=new_config,
new_app_running=False,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks={},
old_app_live_unhappy_tasks={},
) == {
"create_app": True,
"tasks_to_drain": set(),
}
def test_upthendown_bounce_old_but_no_new(self):
"""When marathon has the desired app, but there are other copies of
the service running, upthendown bounce should start the new one. but
not stop the old one yet."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
old_app_live_happy_tasks = {
'app1': set(mock.Mock() for _ in xrange(3)),
'app2': set(mock.Mock() for _ in xrange(2)),
}
old_app_live_unhappy_tasks = {
'app1': set(mock.Mock() for _ in xrange(2)),
'app2': set(mock.Mock() for _ in xrange(3)),
}
assert bounce_lib.upthendown_bounce(
new_config=new_config,
new_app_running=False,
happy_new_tasks=[],
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
) == {
"create_app": True,
"tasks_to_drain": set(),
}
def test_upthendown_bounce_mid_bounce(self):
"""When marathon has the desired app, and there are other copies of
the service running, but the new app is not fully up, upthendown bounce
should not stop the old ones."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = [mock.Mock() for _ in xrange(3)]
old_app_live_happy_tasks = {
'app1': set(mock.Mock() for _ in xrange(3)),
'app2': set(mock.Mock() for _ in xrange(2)),
}
old_app_live_unhappy_tasks = {
'app1': set(mock.Mock() for _ in xrange(2)),
'app2': set(mock.Mock() for _ in xrange(3)),
}
assert bounce_lib.upthendown_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
) == {
"create_app": False,
"tasks_to_drain": set(),
}
def test_upthendown_bounce_cleanup(self):
"""When marathon has the desired app, and there are other copies of
the service running, and the new app is fully up, upthendown bounce
should stop the old ones."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = [mock.Mock() for _ in xrange(5)]
old_app_live_happy_tasks = {
'app1': set(mock.Mock() for _ in xrange(3)),
'app2': set(mock.Mock() for _ in xrange(2)),
}
old_app_live_unhappy_tasks = {
'app1': set(mock.Mock() for _ in xrange(2)),
'app2': set(mock.Mock() for _ in xrange(3)),
}
assert bounce_lib.upthendown_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
) == {
"create_app": False,
"tasks_to_drain": set.union(
old_app_live_happy_tasks['app1'],
old_app_live_happy_tasks['app2'],
old_app_live_unhappy_tasks['app1'],
old_app_live_unhappy_tasks['app2'],
),
}
def test_upthendown_bounce_done(self):
"""When marathon has the desired app, and there are no other copies of
the service running, upthendown bounce should neither start nor stop
anything."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = [mock.Mock() for _ in xrange(5)]
old_app_live_happy_tasks = {}
old_app_live_unhappy_tasks = {}
assert bounce_lib.upthendown_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
) == {
"create_app": False,
"tasks_to_drain": set(),
}
class TestCrossoverBounce:
def test_crossover_bounce_no_existing_apps(self):
"""When marathon is unaware of a service, crossover bounce should try to
create a marathon app."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = []
old_app_live_happy_tasks = {}
old_app_live_unhappy_tasks = {}
assert bounce_lib.crossover_bounce(
new_config=new_config,
new_app_running=False,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
) == {
"create_app": True,
"tasks_to_drain": set(),
}
def test_crossover_bounce_old_but_no_new(self):
"""When marathon only has old apps for this service, crossover bounce should start the new one, but not kill any
old tasks yet."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = []
old_app_live_happy_tasks = {
'app1': set(mock.Mock() for _ in xrange(3)),
'app2': set(mock.Mock() for _ in xrange(2)),
}
old_app_live_unhappy_tasks = {
'app1': set(),
'app2': set(),
}
assert bounce_lib.crossover_bounce(
new_config=new_config,
new_app_running=False,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
) == {
"create_app": True,
"tasks_to_drain": set(),
}
def test_crossover_bounce_old_app_is_happy_but_no_new_app_happy_tasks(self):
"""When marathon only has old apps for this service and margin_factor != 1,
crossover bounce should start the new app and kill some old tasks."""
new_config = {'id': 'foo.bar.12345', 'instances': 100}
happy_tasks = []
old_app_live_happy_tasks = {
'app1': set(mock.Mock() for _ in xrange(60)),
'app2': set(mock.Mock() for _ in xrange(40)),
}
old_app_live_unhappy_tasks = {
'app1': set(),
'app2': set(),
}
actual = bounce_lib.crossover_bounce(
new_config=new_config,
new_app_running=False,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
margin_factor=0.95,
)
assert actual["create_app"] is True
assert len(actual["tasks_to_drain"]) == 5
def test_crossover_bounce_some_unhappy_old_some_happy_old_no_new(self):
"""When marathon only has old apps for this service, and some of them are unhappy (maybe they've been recently
started), the crossover bounce should start a new app and prefer killing the unhappy tasks over the happy ones.
"""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = []
old_app_live_happy_tasks = {
'app1': set(mock.Mock() for _ in xrange(3)),
'app2': set(mock.Mock() for _ in xrange(2)),
}
old_app_live_unhappy_tasks = {
'app1': set(mock.Mock() for _ in xrange(2)),
'app2': set(mock.Mock() for _ in xrange(3)),
}
assert bounce_lib.crossover_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
) == {
"create_app": False,
"tasks_to_drain": set(old_app_live_unhappy_tasks['app1'] | old_app_live_unhappy_tasks['app2']),
}
def test_crossover_bounce_some_unhappy_old_no_happy_old_no_new_tasks_no_excess(self):
"""When marathon only has old apps for this service, and all of their tasks are unhappy, and there are no excess
tasks, the crossover bounce should start a new app and not kill any old tasks.
"""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = []
old_app_live_happy_tasks = {}
old_app_live_unhappy_tasks = {
'app1': set(mock.Mock() for _ in xrange(2)),
'app2': set(mock.Mock() for _ in xrange(3)),
}
assert bounce_lib.crossover_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
) == {
"create_app": False,
"tasks_to_drain": set(),
}
def test_crossover_bounce_lots_of_unhappy_old_no_happy_old_no_new(self):
"""When marathon has a new app and multiple old apps, no new tasks are up, all old tasks are unhappy, and there
are too many tasks running, the crossover bounce should kill some (but not all) of the old
tasks.
This represents a situation where
"""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = []
old_app_live_happy_tasks = {}
old_app_live_unhappy_tasks = {
'app1': set(mock.Mock() for _ in xrange(5)),
'app2': set(mock.Mock() for _ in xrange(5)),
}
actual = bounce_lib.crossover_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
)
assert actual['create_app'] is False
assert len(actual['tasks_to_drain']) == 5
def test_crossover_bounce_lots_of_unhappy_old_some_happy_old_new_app_exists_no_new_tasks(self):
"""When marathon has a new app and multiple old apps, no new tasks are up, one of the old apps is healthy and
the other is not, only unhealthy tasks should get killed.
"""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = []
old_app_live_happy_tasks = {
'app1': set(mock.Mock() for _ in xrange(5)),
}
old_app_live_unhappy_tasks = {
'app2': set(mock.Mock() for _ in xrange(5)),
}
actual = bounce_lib.crossover_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
)
assert actual['create_app'] is False
assert actual['tasks_to_drain'] == old_app_live_unhappy_tasks['app2']
# Since there are plenty of unhappy old tasks, we should not kill any new ones.
assert len(actual['tasks_to_drain'] & old_app_live_happy_tasks['app1']) == 0
def test_crossover_bounce_mid_bounce(self):
"""When marathon has the desired app, and there are other copies of the service running, but the new app is not
fully up, crossover bounce should only stop a few of the old instances."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = [mock.Mock() for _ in xrange(3)]
old_app_live_happy_tasks = {
'app1': set(mock.Mock() for _ in xrange(3)),
'app2': set(mock.Mock() for _ in xrange(2)),
}
old_app_live_unhappy_tasks = {}
actual = bounce_lib.crossover_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
)
assert actual['create_app'] is False
assert len(actual['tasks_to_drain']) == 3
def test_crossover_bounce_mid_bounce_some_happy_old_some_unhappy_old(self):
"""When marathon has the desired app, and there are other copies of the service running, and some of those
older tasks are unhappy, we should prefer killing the unhappy tasks."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = [mock.Mock() for _ in xrange(3)]
old_app_live_happy_tasks = {
'app1': set(mock.Mock() for _ in xrange(3)),
'app2': set(mock.Mock() for _ in xrange(2)),
}
old_app_live_unhappy_tasks = {
'app1': set(mock.Mock() for _ in xrange(1)),
'app2': set(),
}
actual = bounce_lib.crossover_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
)
assert actual['create_app'] is False
assert len(actual['tasks_to_drain']) == 4
# There are fewer unhappy old tasks than excess tasks, so we should kill all unhappy old ones, plus a few
# happy ones.
assert old_app_live_unhappy_tasks['app1'].issubset(actual['tasks_to_drain'])
def test_crossover_bounce_mid_bounce_some_happy_old_lots_of_unhappy_old(self):
"""When marathon has the desired app, and there are other copies of the service running, and there are more
unhappy old tasks than excess tasks, we should only kill unhappy tasks.
"""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = [mock.Mock() for _ in xrange(3)]
old_app_live_happy_tasks = {
'app1': set(mock.Mock() for _ in xrange(2)),
'app2': set(),
}
old_app_live_unhappy_tasks = {
'app1': set(),
'app2': set(mock.Mock() for _ in xrange(5)),
}
actual = bounce_lib.crossover_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
)
assert actual['create_app'] is False
# There are as many unhappy old tasks as excess tasks, so all tasks that we kill should be old unhappy ones.
assert len(actual['tasks_to_drain']) == 5
assert actual['tasks_to_drain'] == old_app_live_unhappy_tasks['app2']
def test_crossover_bounce_mid_bounce_no_happy_old_lots_of_unhappy_old(self):
"""When marathon has the desired app, and there are other copies of the service running, but none of the old
tasks are happy, and there are excess tasks, we should kill some (but not all) unhappy old tasks."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = [mock.Mock() for _ in xrange(3)]
old_app_live_happy_tasks = {
'app1': set(),
'app2': set(),
}
old_app_live_unhappy_tasks = {
'app1': set(mock.Mock() for _ in xrange(3)),
'app2': set(mock.Mock() for _ in xrange(3)),
}
actual = bounce_lib.crossover_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
)
assert actual['create_app'] is False
assert len(actual['tasks_to_drain']) == 4
def test_crossover_bounce_using_margin_factor_big_numbers(self):
new_config = {'id': 'foo.bar.12345', 'instances': 500}
happy_tasks = [mock.Mock() for _ in xrange(100)]
old_app_live_happy_tasks = {
'app1': set(mock.Mock() for _ in xrange(300)),
'app2': set(),
}
old_app_live_unhappy_tasks = {
'app1': set(),
'app2': set(mock.Mock() for _ in xrange(100)),
}
actual = bounce_lib.crossover_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
margin_factor=0.95,
)
assert actual['create_app'] is False
assert len(actual['tasks_to_drain']) == 25
def test_crossover_bounce_using_margin_factor_small_numbers(self):
new_config = {'id': 'foo.bar.12345', 'instances': 3}
happy_tasks = []
old_app_live_happy_tasks = {
'app1': set(mock.Mock() for _ in xrange(3)),
'app2': set(),
}
old_app_live_unhappy_tasks = {
'app1': set(),
'app2': set(),
}
actual = bounce_lib.crossover_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
margin_factor=0.66,
)
assert actual['create_app'] is False
assert len(actual['tasks_to_drain']) == 1
def test_crossover_bounce_cleanup(self):
"""When marathon has the desired app, and there are other copies of
the service running, which have no remaining tasks, those apps should
be killed."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = [mock.Mock() for _ in xrange(5)]
old_app_live_happy_tasks = {
'app1': set(),
'app2': set(),
}
old_app_live_unhappy_tasks = {
'app1': set(),
'app2': set(),
}
assert bounce_lib.crossover_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
) == {
"create_app": False,
"tasks_to_drain": set(),
}
def test_crossover_bounce_done(self):
"""When marathon has the desired app, and there are no other copies of
the service running, crossover bounce should neither start nor stop
anything."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = [mock.Mock() for _ in xrange(5)]
old_app_live_happy_tasks = {}
old_app_live_unhappy_tasks = {}
assert bounce_lib.crossover_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
) == {
"create_app": False,
"tasks_to_drain": set(),
}
class TestDownThenUpBounce(object):
def test_downthenup_bounce_no_existing_apps(self):
"""When marathon is unaware of a service, downthenup bounce should try to
create a marathon app."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = []
old_app_live_happy_tasks = {}
old_app_live_unhappy_tasks = {}
assert bounce_lib.downthenup_bounce(
new_config=new_config,
new_app_running=False,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
) == {
"create_app": True,
"tasks_to_drain": set(),
}
def test_downthenup_bounce_old_but_no_new(self):
"""When marathon has only old copies of the service, downthenup_bounce should kill them and not start a new one
yet."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = []
old_app_live_happy_tasks = {
'app1': set(mock.Mock() for _ in xrange(3)),
'app2': set(mock.Mock() for _ in xrange(2)),
}
old_app_live_unhappy_tasks = {
'app1': set(),
'app2': set(mock.Mock() for _ in xrange(1)),
}
assert bounce_lib.downthenup_bounce(
new_config=new_config,
new_app_running=False,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
) == {
"create_app": False,
"tasks_to_drain": set.union(
old_app_live_happy_tasks['app1'],
old_app_live_happy_tasks['app2'],
old_app_live_unhappy_tasks['app2'],
),
}
def test_downthenup_bounce_done(self):
"""When marathon has the desired app, and there are no other copies of the service running, downthenup bounce
should neither start nor stop anything."""
new_config = {'id': 'foo.bar.12345', 'instances': 5}
happy_tasks = [mock.Mock() for _ in xrange(5)]
assert bounce_lib.downthenup_bounce(
new_config=new_config,
new_app_running=True,
happy_new_tasks=happy_tasks,
old_app_live_happy_tasks={},
old_app_live_unhappy_tasks={},
) == {
"create_app": False,
"tasks_to_drain": set(),
}
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
|
"""Map all bindings to PySide2
This module replaces itself with the most desirable binding.
Project goals:
Qt.py was born in the film and visual effects industry to address
the growing need for the development of software capable of running
with more than one flavour of the Qt bindings for Python - PySide,
PySide2, PyQt4 and PyQt5.
1. Build for one, run with all
2. Explicit is better than implicit
3. Support co-existence
Default resolution order:
- PySide2
- PyQt5
- PySide
- PyQt4
Usage:
>> import sys
>> from Qt import QtWidgets
>> app = QtWidgets.QApplication(sys.argv)
>> button = QtWidgets.QPushButton("Hello World")
>> button.show()
>> app.exec_()
"""
import os
import sys
import shutil
self = sys.modules[__name__]
self.__version__ = "0.6.0"
self.__added__ = list() # All unique members of Qt.py
self.__remapped__ = list() # Members copied from elsewhere
self.__modified__ = list() # Existing members modified in some way
# Below members are set dynamically on import relative the original binding.
self.__qt_version__ = "0.0.0"
self.__binding__ = "None"
self.__binding_version__ = "0.0.0"
self.load_ui = lambda fname: None
self.translate = lambda context, sourceText, disambiguation, n: None
self.setSectionResizeMode = lambda *args, **kwargs: None
def convert(lines):
"""Convert compiled .ui file from PySide2 to Qt.py
Arguments:
lines (list): Each line of of .ui file
Usage:
>> with open("myui.py") as f:
.. lines = convert(f.readlines())
"""
def parse(line):
line = line.replace("from PySide2 import", "from Qt import")
line = line.replace("QtWidgets.QApplication.translate",
"Qt.QtCompat.translate")
return line
parsed = list()
for line in lines:
line = parse(line)
parsed.append(line)
return parsed
def _remap(object, name, value, safe=True):
"""Prevent accidental assignment of existing members
Arguments:
object (object): Parent of new attribute
name (str): Name of new attribute
value (object): Value of new attribute
safe (bool): Whether or not to guarantee that
the new attribute was not overwritten.
Can be set to False under condition that
it is superseded by extensive testing.
"""
if os.getenv("QT_TESTING") is not None and safe:
# Cannot alter original binding.
if hasattr(object, name):
raise AttributeError("Cannot override existing name: "
"%s.%s" % (object.__name__, name))
# Cannot alter classes of functions
if type(object).__name__ != "module":
raise AttributeError("%s != 'module': Cannot alter "
"anything but modules" % object)
elif hasattr(object, name):
# Keep track of modifications
self.__modified__.append(name)
self.__remapped__.append(name)
setattr(object, name, value)
def _add(object, name, value):
"""Append to self, accessible via Qt.QtCompat"""
self.__added__.append(name)
setattr(self, name, value)
def _pyqt5():
import PyQt5.Qt
from PyQt5 import QtCore, QtWidgets, uic
_remap(QtCore, "Signal", QtCore.pyqtSignal)
_remap(QtCore, "Slot", QtCore.pyqtSlot)
_remap(QtCore, "Property", QtCore.pyqtProperty)
_add(PyQt5, "__binding__", PyQt5.__name__)
_add(PyQt5, "load_ui", lambda fname: uic.loadUi(fname))
_add(PyQt5, "translate", lambda context, sourceText, disambiguation, n: (
QtCore.QCoreApplication(context, sourceText,
disambiguation, n)))
_add(PyQt5,
"setSectionResizeMode",
QtWidgets.QHeaderView.setSectionResizeMode)
_maintain_backwards_compatibility(PyQt5)
return PyQt5
def _pyqt4():
# Attempt to set sip API v2 (must be done prior to importing PyQt4)
import sip
try:
sip.setapi("QString", 2)
sip.setapi("QVariant", 2)
sip.setapi("QDate", 2)
sip.setapi("QDateTime", 2)
sip.setapi("QTextStream", 2)
sip.setapi("QTime", 2)
sip.setapi("QUrl", 2)
except AttributeError:
raise ImportError
# PyQt4 < v4.6
except ValueError:
# API version already set to v1
raise ImportError
import PyQt4.Qt
from PyQt4 import QtCore, QtGui, uic
_remap(PyQt4, "QtWidgets", QtGui)
_remap(QtCore, "Signal", QtCore.pyqtSignal)
_remap(QtCore, "Slot", QtCore.pyqtSlot)
_remap(QtCore, "Property", QtCore.pyqtProperty)
_remap(QtCore, "QItemSelection", QtGui.QItemSelection)
_remap(QtCore, "QStringListModel", QtGui.QStringListModel)
_remap(QtCore, "QItemSelectionModel", QtGui.QItemSelectionModel)
_remap(QtCore, "QSortFilterProxyModel", QtGui.QSortFilterProxyModel)
_remap(QtCore, "QAbstractProxyModel", QtGui.QAbstractProxyModel)
try:
from PyQt4 import QtWebKit
_remap(PyQt4, "QtWebKitWidgets", QtWebKit)
except ImportError:
# QtWebkit is optional in Qt , therefore might not be available
pass
_add(PyQt4, "QtCompat", self)
_add(PyQt4, "__binding__", PyQt4.__name__)
_add(PyQt4, "load_ui", lambda fname: uic.loadUi(fname))
_add(PyQt4, "translate", lambda context, sourceText, disambiguation, n: (
QtCore.QCoreApplication(context, sourceText,
disambiguation, None, n)))
_add(PyQt4, "setSectionResizeMode", QtGui.QHeaderView.setResizeMode)
_maintain_backwards_compatibility(PyQt4)
return PyQt4
def _pyside2():
import PySide2
from PySide2 import QtGui, QtWidgets, QtCore, QtUiTools
_remap(QtCore, "QStringListModel", QtGui.QStringListModel)
_add(PySide2, "__binding__", PySide2.__name__)
_add(PySide2, "load_ui", lambda fname: QtUiTools.QUiLoader().load(fname))
_add(PySide2, "translate", lambda context, sourceText, disambiguation, n: (
QtCore.QCoreApplication(context, sourceText,
disambiguation, None, n)))
_add(PySide2,
"setSectionResizeMode",
QtWidgets.QHeaderView.setSectionResizeMode)
_maintain_backwards_compatibility(PySide2)
return PySide2
def _pyside():
import PySide
from PySide import QtGui, QtCore, QtUiTools
_remap(PySide, "QtWidgets", QtGui)
_remap(QtCore, "QSortFilterProxyModel", QtGui.QSortFilterProxyModel)
_remap(QtCore, "QStringListModel", QtGui.QStringListModel)
_remap(QtCore, "QItemSelection", QtGui.QItemSelection)
_remap(QtCore, "QItemSelectionModel", QtGui.QItemSelectionModel)
_remap(QtCore, "QAbstractProxyModel", QtGui.QAbstractProxyModel)
try:
from PySide import QtWebKit
_remap(PySide, "QtWebKitWidgets", QtWebKit)
except ImportError:
# QtWebkit is optional in Qt, therefore might not be available
pass
_add(PySide, "__binding__", PySide.__name__)
_add(PySide, "load_ui", lambda fname: QtUiTools.QUiLoader().load(fname))
_add(PySide, "translate", lambda context, sourceText, disambiguation, n: (
QtCore.QCoreApplication(context, sourceText,
disambiguation, None, n)))
_add(PySide, "setSectionResizeMode", QtGui.QHeaderView.setResizeMode)
_maintain_backwards_compatibility(PySide)
return PySide
def _log(text, verbose):
if verbose:
sys.stdout.write(text + "\n")
def cli(args):
"""Qt.py command-line interface"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--convert",
help="Path to compiled Python module, e.g. my_ui.py")
parser.add_argument("--compile",
help="Accept raw .ui file and compile with native "
"PySide2 compiler.")
parser.add_argument("--stdout",
help="Write to stdout instead of file",
action="store_true")
parser.add_argument("--stdin",
help="Read from stdin instead of file",
action="store_true")
args = parser.parse_args(args)
if args.stdout:
raise NotImplementedError("--stdout")
if args.stdin:
raise NotImplementedError("--stdin")
if args.compile:
raise NotImplementedError("--compile")
if args.convert:
sys.stdout.write("#\n"
"# WARNING: --convert is an ALPHA feature.\n#\n"
"# See https://github.com/mottosso/Qt.py/pull/132\n"
"# for details.\n"
"#\n")
#
# ------> Read
#
with open(args.convert) as f:
lines = convert(f.readlines())
backup = "%s_backup%s" % os.path.splitext(args.convert)
sys.stdout.write("Creating \"%s\"..\n" % backup)
shutil.copy(args.convert, backup)
#
# <------ Write
#
with open(args.convert, "w") as f:
f.write("".join(lines))
sys.stdout.write("Successfully converted \"%s\"\n" % args.convert)
def init():
"""Try loading each binding in turn
Please note: the entire Qt module is replaced with this code:
sys.modules["Qt"] = binding()
This means no functions or variables can be called after
this has executed.
For debugging and testing, this module may be accessed
through `Qt.__shim__`.
"""
preferred = os.getenv("QT_PREFERRED_BINDING")
verbose = os.getenv("QT_VERBOSE") is not None
bindings = (_pyside2, _pyqt5, _pyside, _pyqt4)
if preferred:
# Internal flag (used in installer)
if preferred == "None":
self.__wrapper_version__ = self.__version__
return
preferred = preferred.split(os.pathsep)
available = {
"PySide2": _pyside2,
"PyQt5": _pyqt5,
"PySide": _pyside,
"PyQt4": _pyqt4
}
try:
bindings = [available[binding] for binding in preferred]
except KeyError:
raise ImportError(
"Available preferred Qt bindings: "
"\n".join(preferred)
)
for binding in bindings:
_log("Trying %s" % binding.__name__, verbose)
try:
binding = binding()
except ImportError as e:
_log(" - ImportError(\"%s\")" % e, verbose)
continue
else:
# Reference to this module
binding.__shim__ = self
binding.QtCompat = self
sys.modules.update({
__name__: binding,
# Fix #133, `from Qt.QtWidgets import QPushButton`
__name__ + ".QtWidgets": binding.QtWidgets
})
return
# If not binding were found, throw this error
raise ImportError("No Qt binding were found.")
def _maintain_backwards_compatibility(binding):
"""Add members found in prior versions up till the next major release
These members are to be considered deprecated. When a new major
release is made, these members are removed.
"""
for member in ("__binding__",
"__binding_version__",
"__qt_version__",
"__added__",
"__remapped__",
"__modified__",
"convert",
"load_ui",
"translate"):
setattr(binding, member, getattr(self, member))
self.__added__.append(member)
setattr(binding, "__wrapper_version__", self.__version__)
self.__added__.append("__wrapper_version__")
cli(sys.argv[1:]) if __name__ == "__main__" else init()
|
|
# coding=utf-8
# Copyright 2021 RLDSCreator Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RLDS Creator configuration."""
import re
from procgen import env as procgen_env
import robosuite
from robosuite import controllers as robosuite_controllers
# IDs of Procgen environments and their labels.
PROCGEN_IDS = [{
'id': env,
'label': env.capitalize()
} for env in procgen_env.ENV_NAMES]
# See list_games() in third_party/py/atari_py/games.py.
ATARI_GAMES = [
'Alien', 'Amidar', 'Assault', 'Asterix', 'Asteroids', 'Atlantis',
'BankHeist', 'BattleZone', 'BeamRider', 'Berzerk', 'Bowling', 'Boxing',
'Breakout', 'Centipede', 'ChopperCommand', 'CrazyClimber', 'Defender',
'DemonAttack', 'DoubleDunk', 'Enduro', 'FishingDerby', 'Freeway',
'Frostbite', 'Gopher', 'Gravitar', 'Hero', 'IceHockey', 'Jamesbond',
'Kangaroo', 'Krull', 'KungFuMaster', 'MontezumaRevenge', 'MsPacman',
'NameThisGame', 'Phoenix', 'Pitfall', 'Pong', 'PrivateEye', 'Qbert',
'Riverraid', 'RoadRunner', 'Robotank', 'Seaquest', 'Skiing', 'Solaris',
'SpaceInvaders', 'StarGunner', 'Surround', 'Tennis', 'TimePilot',
'Tutankham', 'UpNDown', 'Venture', 'VideoPinball', 'WizardOfWor',
'YarsRevenge', 'Zaxxon'
]
# IDs of Atari environment and their labels.
ATARI_IDS = [{'id': game, 'label': game} for game in ATARI_GAMES]
# See game_scripts/levels/contributed/dmlab30 under deepmind_lab.
DMLAB_LEVELS = [
'rooms_collect_good_objects_train',
'rooms_collect_good_objects_test',
'rooms_exploit_deferred_effects_train',
'rooms_exploit_deferred_effects_test',
'rooms_select_nonmatching_object',
'rooms_watermaze',
'rooms_keys_doors_puzzle',
'language_select_described_object',
'language_select_located_object',
'language_execute_random_task',
'language_answer_quantitative_question',
'lasertag_one_opponent_small',
'lasertag_three_opponents_small',
'lasertag_one_opponent_large',
'lasertag_three_opponents_large',
'natlab_fixed_large_map',
'natlab_varying_map_regrowth',
'natlab_varying_map_randomized',
'skymaze_irreversible_path_hard',
'skymaze_irreversible_path_varied',
'psychlab_arbitrary_visuomotor_mapping',
'psychlab_continuous_recognition',
'psychlab_sequential_comparison',
'psychlab_visual_search',
'explore_object_locations_small',
'explore_object_locations_large',
'explore_obstructed_goals_small',
'explore_obstructed_goals_large',
'explore_goal_locations_small',
'explore_goal_locations_large',
'explore_object_rewards_few',
'explore_object_rewards_many',
]
def dmlab_level_label(level) -> str:
"""Returns the label for a DMLab level."""
return level.replace('_', ' ').title()
# IDs of DMLab environments and their labels.
DMLAB_IDS = [{
'id': level,
'label': dmlab_level_label(level)
} for level in DMLAB_LEVELS]
NET_HACK_IDS = [
{
'id': 'NetHack-v0',
'label': 'Default'
},
{
'id': 'NetHackScore-v0',
'label': 'Score'
},
{
'id': 'NetHackStaircase-v0',
'label': 'Staircase'
},
{
'id': 'NetHackStaircasePet-v0',
'label': 'Staircase Pet'
},
{
'id': 'NetHackOracle-v0',
'label': 'Oracle'
},
{
'id': 'NetHackGold-v0',
'label': 'Gold'
},
{
'id': 'NetHackEat-v0',
'label': 'Eat'
},
{
'id': 'NetHackScout-v0',
'label': 'Scout'
},
{
'id': 'NetHackChallenge-v0',
'label': 'Challenge'
},
]
ROBODESK_TASKS = [
{
'id': 'open_slide',
'label': 'Open slide'
},
{
'id': 'open_drawer',
'label': 'Open drawer'
},
{
'id': 'push_green',
'label': 'Push green'
},
{
'id': 'stack',
'label': 'Stack'
},
{
'id': 'upright_block_off_table',
'label': 'Upright block off table'
},
{
'id': 'flat_block_in_bin',
'label': 'Flat block in bin'
},
{
'id': 'flat_block_in_shelf',
'label': 'Flat block in shelf'
},
{
'id': 'lift_upright_block',
'label': 'Lift upright block'
},
{
'id': 'lift_ball',
'label': 'Lift ball'
},
]
ROBOSUITE_IDS = [{
'id': env,
'label': re.sub(r'([A-Z])', r' \1', env)
} for env in robosuite.ALL_ENVIRONMENTS]
ROBOSUITE_ROBOTS = [{
'id': robot,
'label': robot
} for robot in robosuite.ALL_ROBOTS]
ROBOSUITE_CONFIGS = [{
'id': 'single-arm-opposed',
'label': 'Single Arms Opposed'
}, {
'id': 'single-arm-parallel',
'label': 'Single Arms Parallel'
}, {
'id': 'bimanual',
'label': 'Bimanual'
}]
ROBOSUITE_ARMS = [{
'id': 'right',
'label': 'Right'
}, {
'id': 'left',
'label': 'Left'
}]
ROBOSUITE_CONTROLLERS = [{
'id': controller,
'label': name
} for controller, name in robosuite_controllers.CONTROLLER_INFO.items()]
ROBOSUITE_CAMERAS = [{
'id': 'agentview',
'label': 'Agent view'
}, {
'id': 'frontview',
'label': 'Front view'
}, {
'id': 'birdview',
'label': 'Bird view'
}, {
'id': 'sideview',
'label': 'Side view'
}, {
'id': 'robot0_robotview',
'label': 'Robot view'
}, {
'id': 'robot0_eye_in_hand',
'label': 'Robot eye in hand view',
}]
CONFIG = {
'atari': {
'ids': ATARI_IDS
},
'dmlab': {
'ids': DMLAB_IDS
},
'net_hack': {
'ids': NET_HACK_IDS
},
'procgen': {
'ids': PROCGEN_IDS
},
'robodesk': {
'tasks': ROBODESK_TASKS
},
'robosuite': {
'ids': ROBOSUITE_IDS,
'robots': ROBOSUITE_ROBOTS,
'configs': ROBOSUITE_CONFIGS,
'arms': ROBOSUITE_ARMS,
'controllers': ROBOSUITE_CONTROLLERS,
'cameras': ROBOSUITE_CAMERAS,
},
}
|
|
# coding:utf-8
import datetime
from .user import User
from db.mysql_model import BaseModel
from peewee import *
from settings.config import config
class PostCategory(BaseModel):
"""
class
"""
name = CharField(verbose_name='name')
str = CharField(unique=True, verbose_name='str')
class PostTopic(BaseModel):
"""
sub class
"""
category = ForeignKeyField(PostCategory, related_name='topics_category', null=True)
name = CharField(verbose_name='name')
str = CharField(unique=True, verbose_name='str')
hot = BooleanField(default=False, verbose_name='hot topic')
class Post(BaseModel):
"""
post
"""
AUTH_READ = 0
AUTH_MODIFY = 1
AUTH_DELETE = 2
AUTH_ALL = 3
topic = ForeignKeyField(PostTopic, related_name='posts_topic')
title = CharField(verbose_name='post-title')
content = TextField(verbose_name='post-content')
user = ForeignKeyField(User, verbose_name='who post this')
create_time = DateTimeField(default=datetime.datetime.now, verbose_name='post-time')
latest_reply_user = ForeignKeyField(User, null=True, related_name="reply_user", verbose_name='rt')
latest_reply_time = DateTimeField(null=True, verbose_name='rt')
visit_count = IntegerField(default=1, verbose_name='rt')
reply_count = IntegerField(default=0, verbose_name='rt')
collect_count = IntegerField(default=0, verbose_name='rt')
top = BooleanField(default=False, verbose_name='top')
essence = BooleanField(default=False, verbose_name='rt')
is_delete = BooleanField(default=False, verbose_name='rt')
def __str__(self):
return "[%s-%s]" % (self.title, self.user)
def logic_delete(self):
self.is_delete = True
self.save()
def check_auth(self, user):
if user.is_admin() or self.check_own(user):
return True
return False
def check_own(self, user):
if user and self.user == user:
return True
else:
return False
def up_collect(self):
self.collect_count += 1
self.save()
def up_visit(self):
self.visit_count += 1
self.save()
def update_latest_reply(self, postreply):
self.latest_reply_user = postreply.user
self.latest_reply_time = postreply.create_time
self.reply_count += 1
self.save()
@staticmethod
def list_top():
"""
get top posts
:return:
"""
top_posts = Post.select().where(Post.top == True, Post.is_delete == False)
top_posts_count = top_posts.count()
return top_posts, top_posts_count
@staticmethod
def list_recently(page_limit=config.default_page_limit, page_number=1):
"""
get recent posts
:param page_limit: result num in per page
:param page_number: current page number
:return:
"""
page_number_limit = Post.select().where(Post.is_delete == False).order_by(Post.latest_reply_time.desc()).count()
posts = Post.select().where(Post.is_delete == False).order_by(Post.latest_reply_time.desc()).paginate(page_number, page_limit)
# result = []
# for post in posts:
# result.append({
# 'id': post.id,
# 'topic': post.topic,
# 'title': post.title,
# 'user': post.user,
# 'create_time': TimeUtil.datetime_delta(post.create_time),
# 'lastest_reply': post.lastest_reply,
# 'visit_count': post.visit_count,
# 'reply_count': post.reply_count,
# 'collect_count': post.collect_count
# })
return posts, page_number_limit
@staticmethod
def list_by_topic(topic, page_limit=config.default_page_limit, page_number=1):
"""
get recent posts of specific topic
:param topic:
:param page_limit:
:param page_number:
:return:
"""
page_number_limit = Post.select().where(Post.topic == topic, Post.is_delete == False).order_by(Post.latest_reply_time.desc()).count()
posts = Post.select().where(Post.topic == topic, Post.is_delete == False).order_by(Post.latest_reply_time.desc()).paginate(page_number, page_limit)
return posts, page_number_limit
def detail(self):
"""
get post detail
:return:
"""
result = {}
user = self.user
result['title'] = self.title
result['content'] = self.content
result['topic'] = self.topic.name
result['username'] = user.username
result['nickname'] = user.nickname
result['avatar'] = user.avatar
result['create_time'] = self.create_time,
result['visit_count'] = self.visit_count
result['reply_count'] = self.reply_count
result['collect_count'] = self.collect_count
return result
@staticmethod
def get_detail_and_replys(post_id):
"""
get post's detail and replys
:param post_id:
:return:
"""
try:
post = Post.get(Post.id == post_id, Post.is_delete == False)
except Post.DoesNotExist:
return None, None
post.up_visit()
post_replys = PostReply.list_all(post)
return post, post_replys
@staticmethod
def get_by_id(post_id):
"""
get post by id
:param post_id:
:return:
"""
try:
post = Post.get(Post.id == post_id, Post.is_delete == False)
except Post.DoesNotExist:
return None
return post
class PostReply(BaseModel):
"""
PostReply
"""
post = ForeignKeyField(Post, verbose_name='post')
user = ForeignKeyField(User, verbose_name='who reply this')
content = TextField(verbose_name='content')
create_time = DateTimeField(default=datetime.datetime.now, verbose_name='reply time')
like_count = IntegerField(default=0, verbose_name='count of like')
def __str__(self):
return "[%s-%s]" % (self.user, self.content)
def up_like(self):
self.like_count += 1
self.save()
@staticmethod
def list_all(post):
postreplys = PostReply.select().where(PostReply.post == post)
return postreplys
class CollectPost(BaseModel):
"""
collect post
"""
post = ForeignKeyField(Post, verbose_name='post')
user = ForeignKeyField(User, verbose_name='who collect this')
collect_time = DateTimeField(default=datetime.datetime.now, verbose_name='collect time')
@staticmethod
def is_collect(post, user):
if not user:
return False
try:
CollectPost.get(CollectPost.post==post, CollectPost.user==user)
except DoesNotExist:
return False
else:
return True
|
|
import glob
import os
import py_compile
import stat
import sys
import zipfile
import py
import pytest
ast = pytest.importorskip("ast")
if sys.platform.startswith("java"):
# XXX should be xfail
pytest.skip("assert rewrite does currently not work on jython")
import _pytest._code
from _pytest.assertion import util
from _pytest.assertion.rewrite import rewrite_asserts, PYTEST_TAG, AssertionRewritingHook
from _pytest.main import EXIT_NOTESTSCOLLECTED
def setup_module(mod):
mod._old_reprcompare = util._reprcompare
_pytest._code._reprcompare = None
def teardown_module(mod):
util._reprcompare = mod._old_reprcompare
del mod._old_reprcompare
def rewrite(src):
tree = ast.parse(src)
rewrite_asserts(tree)
return tree
def getmsg(f, extra_ns=None, must_pass=False):
"""Rewrite the assertions in f, run it, and get the failure message."""
src = '\n'.join(_pytest._code.Code(f).source().lines)
mod = rewrite(src)
code = compile(mod, "<test>", "exec")
ns = {}
if extra_ns is not None:
ns.update(extra_ns)
py.builtin.exec_(code, ns)
func = ns[f.__name__]
try:
func()
except AssertionError:
if must_pass:
pytest.fail("shouldn't have raised")
s = str(sys.exc_info()[1])
if not s.startswith("assert"):
return "AssertionError: " + s
return s
else:
if not must_pass:
pytest.fail("function didn't raise at all")
class TestAssertionRewrite:
def test_place_initial_imports(self):
s = """'Doc string'\nother = stuff"""
m = rewrite(s)
assert isinstance(m.body[0], ast.Expr)
assert isinstance(m.body[0].value, ast.Str)
for imp in m.body[1:3]:
assert isinstance(imp, ast.Import)
assert imp.lineno == 2
assert imp.col_offset == 0
assert isinstance(m.body[3], ast.Assign)
s = """from __future__ import with_statement\nother_stuff"""
m = rewrite(s)
assert isinstance(m.body[0], ast.ImportFrom)
for imp in m.body[1:3]:
assert isinstance(imp, ast.Import)
assert imp.lineno == 2
assert imp.col_offset == 0
assert isinstance(m.body[3], ast.Expr)
s = """'doc string'\nfrom __future__ import with_statement\nother"""
m = rewrite(s)
assert isinstance(m.body[0], ast.Expr)
assert isinstance(m.body[0].value, ast.Str)
assert isinstance(m.body[1], ast.ImportFrom)
for imp in m.body[2:4]:
assert isinstance(imp, ast.Import)
assert imp.lineno == 3
assert imp.col_offset == 0
assert isinstance(m.body[4], ast.Expr)
s = """from . import relative\nother_stuff"""
m = rewrite(s)
for imp in m.body[0:2]:
assert isinstance(imp, ast.Import)
assert imp.lineno == 1
assert imp.col_offset == 0
assert isinstance(m.body[3], ast.Expr)
def test_dont_rewrite(self):
s = """'PYTEST_DONT_REWRITE'\nassert 14"""
m = rewrite(s)
assert len(m.body) == 2
assert isinstance(m.body[0].value, ast.Str)
assert isinstance(m.body[1], ast.Assert)
assert m.body[1].msg is None
def test_name(self):
def f():
assert False
assert getmsg(f) == "assert False"
def f():
f = False
assert f
assert getmsg(f) == "assert False"
def f():
assert a_global # noqa
assert getmsg(f, {"a_global" : False}) == "assert False"
def f():
assert sys == 42
assert getmsg(f, {"sys" : sys}) == "assert sys == 42"
def f():
assert cls == 42 # noqa
class X(object):
pass
assert getmsg(f, {"cls" : X}) == "assert cls == 42"
def test_assert_already_has_message(self):
def f():
assert False, "something bad!"
assert getmsg(f) == "AssertionError: something bad!\nassert False"
def test_assertion_message(self, testdir):
testdir.makepyfile("""
def test_foo():
assert 1 == 2, "The failure message"
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines([
"*AssertionError*The failure message*",
"*assert 1 == 2*",
])
def test_assertion_message_multiline(self, testdir):
testdir.makepyfile("""
def test_foo():
assert 1 == 2, "A multiline\\nfailure message"
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines([
"*AssertionError*A multiline*",
"*failure message*",
"*assert 1 == 2*",
])
def test_assertion_message_tuple(self, testdir):
testdir.makepyfile("""
def test_foo():
assert 1 == 2, (1, 2)
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines([
"*AssertionError*%s*" % repr((1, 2)),
"*assert 1 == 2*",
])
def test_assertion_message_expr(self, testdir):
testdir.makepyfile("""
def test_foo():
assert 1 == 2, 1 + 2
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines([
"*AssertionError*3*",
"*assert 1 == 2*",
])
def test_assertion_message_escape(self, testdir):
testdir.makepyfile("""
def test_foo():
assert 1 == 2, 'To be escaped: %'
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines([
"*AssertionError: To be escaped: %",
"*assert 1 == 2",
])
def test_boolop(self):
def f():
f = g = False
assert f and g
assert getmsg(f) == "assert (False)"
def f():
f = True
g = False
assert f and g
assert getmsg(f) == "assert (True and False)"
def f():
f = False
g = True
assert f and g
assert getmsg(f) == "assert (False)"
def f():
f = g = False
assert f or g
assert getmsg(f) == "assert (False or False)"
def f():
f = g = False
assert not f and not g
getmsg(f, must_pass=True)
def x():
return False
def f():
assert x() and x()
assert getmsg(f, {"x" : x}) == """assert (False)
+ where False = x()"""
def f():
assert False or x()
assert getmsg(f, {"x" : x}) == """assert (False or False)
+ where False = x()"""
def f():
assert 1 in {} and 2 in {}
assert getmsg(f) == "assert (1 in {})"
def f():
x = 1
y = 2
assert x in {1 : None} and y in {}
assert getmsg(f) == "assert (1 in {1: None} and 2 in {})"
def f():
f = True
g = False
assert f or g
getmsg(f, must_pass=True)
def f():
f = g = h = lambda: True
assert f() and g() and h()
getmsg(f, must_pass=True)
def test_short_circut_evaluation(self):
def f():
assert True or explode # noqa
getmsg(f, must_pass=True)
def f():
x = 1
assert x == 1 or x == 2
getmsg(f, must_pass=True)
def test_unary_op(self):
def f():
x = True
assert not x
assert getmsg(f) == "assert not True"
def f():
x = 0
assert ~x + 1
assert getmsg(f) == "assert (~0 + 1)"
def f():
x = 3
assert -x + x
assert getmsg(f) == "assert (-3 + 3)"
def f():
x = 0
assert +x + x
assert getmsg(f) == "assert (+0 + 0)"
def test_binary_op(self):
def f():
x = 1
y = -1
assert x + y
assert getmsg(f) == "assert (1 + -1)"
def f():
assert not 5 % 4
assert getmsg(f) == "assert not (5 % 4)"
def test_boolop_percent(self):
def f():
assert 3 % 2 and False
assert getmsg(f) == "assert ((3 % 2) and False)"
def f():
assert False or 4 % 2
assert getmsg(f) == "assert (False or (4 % 2))"
@pytest.mark.skipif("sys.version_info < (3,5)")
def test_at_operator_issue1290(self, testdir):
testdir.makepyfile("""
class Matrix:
def __init__(self, num):
self.num = num
def __matmul__(self, other):
return self.num * other.num
def test_multmat_operator():
assert Matrix(2) @ Matrix(3) == 6""")
testdir.runpytest().assert_outcomes(passed=1)
def test_call(self):
def g(a=42, *args, **kwargs):
return False
ns = {"g" : g}
def f():
assert g()
assert getmsg(f, ns) == """assert False
+ where False = g()"""
def f():
assert g(1)
assert getmsg(f, ns) == """assert False
+ where False = g(1)"""
def f():
assert g(1, 2)
assert getmsg(f, ns) == """assert False
+ where False = g(1, 2)"""
def f():
assert g(1, g=42)
assert getmsg(f, ns) == """assert False
+ where False = g(1, g=42)"""
def f():
assert g(1, 3, g=23)
assert getmsg(f, ns) == """assert False
+ where False = g(1, 3, g=23)"""
def f():
seq = [1, 2, 3]
assert g(*seq)
assert getmsg(f, ns) == """assert False
+ where False = g(*[1, 2, 3])"""
def f():
x = "a"
assert g(**{x : 2})
assert getmsg(f, ns) == """assert False
+ where False = g(**{'a': 2})"""
def test_attribute(self):
class X(object):
g = 3
ns = {"x" : X}
def f():
assert not x.g # noqa
assert getmsg(f, ns) == """assert not 3
+ where 3 = x.g"""
def f():
x.a = False # noqa
assert x.a # noqa
assert getmsg(f, ns) == """assert False
+ where False = x.a"""
def test_comparisons(self):
def f():
a, b = range(2)
assert b < a
assert getmsg(f) == """assert 1 < 0"""
def f():
a, b, c = range(3)
assert a > b > c
assert getmsg(f) == """assert 0 > 1"""
def f():
a, b, c = range(3)
assert a < b > c
assert getmsg(f) == """assert 1 > 2"""
def f():
a, b, c = range(3)
assert a < b <= c
getmsg(f, must_pass=True)
def f():
a, b, c = range(3)
assert a < b
assert b < c
getmsg(f, must_pass=True)
def test_len(self):
def f():
l = list(range(10))
assert len(l) == 11
assert getmsg(f).startswith("""assert 10 == 11
+ where 10 = len([""")
def test_custom_reprcompare(self, monkeypatch):
def my_reprcompare(op, left, right):
return "42"
monkeypatch.setattr(util, "_reprcompare", my_reprcompare)
def f():
assert 42 < 3
assert getmsg(f) == "assert 42"
def my_reprcompare(op, left, right):
return "%s %s %s" % (left, op, right)
monkeypatch.setattr(util, "_reprcompare", my_reprcompare)
def f():
assert 1 < 3 < 5 <= 4 < 7
assert getmsg(f) == "assert 5 <= 4"
def test_assert_raising_nonzero_in_comparison(self):
def f():
class A(object):
def __nonzero__(self):
raise ValueError(42)
def __lt__(self, other):
return A()
def __repr__(self):
return "<MY42 object>"
def myany(x):
return False
assert myany(A() < 0)
assert "<MY42 object> < 0" in getmsg(f)
def test_formatchar(self):
def f():
assert "%test" == "test"
assert getmsg(f).startswith("assert '%test' == 'test'")
def test_custom_repr(self):
def f():
class Foo(object):
a = 1
def __repr__(self):
return "\n{ \n~ \n}"
f = Foo()
assert 0 == f.a
assert r"where 1 = \n{ \n~ \n}.a" in util._format_lines([getmsg(f)])[0]
class TestRewriteOnImport:
def test_pycache_is_a_file(self, testdir):
testdir.tmpdir.join("__pycache__").write("Hello")
testdir.makepyfile("""
def test_rewritten():
assert "@py_builtins" in globals()""")
assert testdir.runpytest().ret == 0
def test_pycache_is_readonly(self, testdir):
cache = testdir.tmpdir.mkdir("__pycache__")
old_mode = cache.stat().mode
cache.chmod(old_mode ^ stat.S_IWRITE)
testdir.makepyfile("""
def test_rewritten():
assert "@py_builtins" in globals()""")
try:
assert testdir.runpytest().ret == 0
finally:
cache.chmod(old_mode)
def test_zipfile(self, testdir):
z = testdir.tmpdir.join("myzip.zip")
z_fn = str(z)
f = zipfile.ZipFile(z_fn, "w")
try:
f.writestr("test_gum/__init__.py", "")
f.writestr("test_gum/test_lizard.py", "")
finally:
f.close()
z.chmod(256)
testdir.makepyfile("""
import sys
sys.path.append(%r)
import test_gum.test_lizard""" % (z_fn,))
assert testdir.runpytest().ret == EXIT_NOTESTSCOLLECTED
def test_readonly(self, testdir):
sub = testdir.mkdir("testing")
sub.join("test_readonly.py").write(
py.builtin._totext("""
def test_rewritten():
assert "@py_builtins" in globals()
""").encode("utf-8"), "wb")
old_mode = sub.stat().mode
sub.chmod(320)
try:
assert testdir.runpytest().ret == 0
finally:
sub.chmod(old_mode)
def test_dont_write_bytecode(self, testdir, monkeypatch):
testdir.makepyfile("""
import os
def test_no_bytecode():
assert "__pycache__" in __cached__
assert not os.path.exists(__cached__)
assert not os.path.exists(os.path.dirname(__cached__))""")
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")
assert testdir.runpytest_subprocess().ret == 0
def test_orphaned_pyc_file(self, testdir):
if sys.version_info < (3, 0) and hasattr(sys, 'pypy_version_info'):
pytest.skip("pypy2 doesn't run orphaned pyc files")
testdir.makepyfile("""
import orphan
def test_it():
assert orphan.value == 17
""")
testdir.makepyfile(orphan="""
value = 17
""")
py_compile.compile("orphan.py")
os.remove("orphan.py")
# Python 3 puts the .pyc files in a __pycache__ directory, and will
# not import from there without source. It will import a .pyc from
# the source location though.
if not os.path.exists("orphan.pyc"):
pycs = glob.glob("__pycache__/orphan.*.pyc")
assert len(pycs) == 1
os.rename(pycs[0], "orphan.pyc")
assert testdir.runpytest().ret == 0
@pytest.mark.skipif('"__pypy__" in sys.modules')
def test_pyc_vs_pyo(self, testdir, monkeypatch):
testdir.makepyfile("""
import pytest
def test_optimized():
"hello"
assert test_optimized.__doc__ is None"""
)
p = py.path.local.make_numbered_dir(prefix="runpytest-", keep=None,
rootdir=testdir.tmpdir)
tmp = "--basetemp=%s" % p
monkeypatch.setenv("PYTHONOPTIMIZE", "2")
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
assert testdir.runpytest_subprocess(tmp).ret == 0
tagged = "test_pyc_vs_pyo." + PYTEST_TAG
assert tagged + ".pyo" in os.listdir("__pycache__")
monkeypatch.undo()
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
assert testdir.runpytest_subprocess(tmp).ret == 1
assert tagged + ".pyc" in os.listdir("__pycache__")
def test_package(self, testdir):
pkg = testdir.tmpdir.join("pkg")
pkg.mkdir()
pkg.join("__init__.py").ensure()
pkg.join("test_blah.py").write("""
def test_rewritten():
assert "@py_builtins" in globals()""")
assert testdir.runpytest().ret == 0
def test_translate_newlines(self, testdir):
content = "def test_rewritten():\r\n assert '@py_builtins' in globals()"
b = content.encode("utf-8")
testdir.tmpdir.join("test_newlines.py").write(b, "wb")
assert testdir.runpytest().ret == 0
@pytest.mark.skipif(sys.version_info < (3,3),
reason='packages without __init__.py not supported on python 2')
def test_package_without__init__py(self, testdir):
pkg = testdir.mkdir('a_package_without_init_py')
pkg.join('module.py').ensure()
testdir.makepyfile("import a_package_without_init_py.module")
assert testdir.runpytest().ret == EXIT_NOTESTSCOLLECTED
def test_rewrite_warning(self, pytestconfig, monkeypatch):
hook = AssertionRewritingHook(pytestconfig)
warnings = []
def mywarn(code, msg):
warnings.append((code, msg))
monkeypatch.setattr(hook.config, 'warn', mywarn)
hook.mark_rewrite('_pytest')
assert '_pytest' in warnings[0][1]
def test_rewrite_module_imported_from_conftest(self, testdir):
testdir.makeconftest('''
import test_rewrite_module_imported
''')
testdir.makepyfile(test_rewrite_module_imported='''
def test_rewritten():
assert "@py_builtins" in globals()
''')
assert testdir.runpytest_subprocess().ret == 0
def test_remember_rewritten_modules(self, pytestconfig, testdir, monkeypatch):
"""
AssertionRewriteHook should remember rewritten modules so it
doesn't give false positives (#2005).
"""
monkeypatch.syspath_prepend(testdir.tmpdir)
testdir.makepyfile(test_remember_rewritten_modules='')
warnings = []
hook = AssertionRewritingHook(pytestconfig)
monkeypatch.setattr(hook.config, 'warn', lambda code, msg: warnings.append(msg))
hook.find_module('test_remember_rewritten_modules')
hook.load_module('test_remember_rewritten_modules')
hook.mark_rewrite('test_remember_rewritten_modules')
hook.mark_rewrite('test_remember_rewritten_modules')
assert warnings == []
def test_rewrite_warning_using_pytest_plugins(self, testdir, monkeypatch):
testdir.makepyfile(**{
'conftest.py': "pytest_plugins = ['core', 'gui', 'sci']",
'core.py': "",
'gui.py': "pytest_plugins = ['core', 'sci']",
'sci.py': "pytest_plugins = ['core']",
'test_rewrite_warning_pytest_plugins.py': "def test(): pass",
})
testdir.chdir()
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(['*= 1 passed in *=*'])
assert 'pytest-warning summary' not in result.stdout.str()
class TestAssertionRewriteHookDetails(object):
def test_loader_is_package_false_for_module(self, testdir):
testdir.makepyfile(test_fun="""
def test_loader():
assert not __loader__.is_package(__name__)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"* 1 passed*",
])
def test_loader_is_package_true_for_package(self, testdir):
testdir.makepyfile(test_fun="""
def test_loader():
assert not __loader__.is_package(__name__)
def test_fun():
assert __loader__.is_package('fun')
def test_missing():
assert not __loader__.is_package('pytest_not_there')
""")
testdir.mkpydir('fun')
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'* 3 passed*',
])
@pytest.mark.skipif("sys.version_info[0] >= 3")
@pytest.mark.xfail("hasattr(sys, 'pypy_translation_info')")
def test_assume_ascii(self, testdir):
content = "u'\xe2\x99\xa5\x01\xfe'"
testdir.tmpdir.join("test_encoding.py").write(content, "wb")
res = testdir.runpytest()
assert res.ret != 0
assert "SyntaxError: Non-ASCII character" in res.stdout.str()
@pytest.mark.skipif("sys.version_info[0] >= 3")
def test_detect_coding_cookie(self, testdir):
testdir.makepyfile(test_cookie="""
# -*- coding: utf-8 -*-
u"St\xc3\xa4d"
def test_rewritten():
assert "@py_builtins" in globals()""")
assert testdir.runpytest().ret == 0
@pytest.mark.skipif("sys.version_info[0] >= 3")
def test_detect_coding_cookie_second_line(self, testdir):
testdir.makepyfile(test_cookie="""
# -*- coding: utf-8 -*-
u"St\xc3\xa4d"
def test_rewritten():
assert "@py_builtins" in globals()""")
assert testdir.runpytest().ret == 0
@pytest.mark.skipif("sys.version_info[0] >= 3")
def test_detect_coding_cookie_crlf(self, testdir):
testdir.makepyfile(test_cookie="""
# -*- coding: utf-8 -*-
u"St\xc3\xa4d"
def test_rewritten():
assert "@py_builtins" in globals()""")
assert testdir.runpytest().ret == 0
def test_sys_meta_path_munged(self, testdir):
testdir.makepyfile("""
def test_meta_path():
import sys; sys.meta_path = []""")
assert testdir.runpytest().ret == 0
def test_write_pyc(self, testdir, tmpdir, monkeypatch):
from _pytest.assertion.rewrite import _write_pyc
from _pytest.assertion import AssertionState
try:
import __builtin__ as b
except ImportError:
import builtins as b
config = testdir.parseconfig([])
state = AssertionState(config, "rewrite")
source_path = tmpdir.ensure("source.py")
pycpath = tmpdir.join("pyc").strpath
assert _write_pyc(state, [1], source_path.stat(), pycpath)
def open(*args):
e = IOError()
e.errno = 10
raise e
monkeypatch.setattr(b, "open", open)
assert not _write_pyc(state, [1], source_path.stat(), pycpath)
def test_resources_provider_for_loader(self, testdir):
"""
Attempts to load resources from a package should succeed normally,
even when the AssertionRewriteHook is used to load the modules.
See #366 for details.
"""
pytest.importorskip("pkg_resources")
testdir.mkpydir('testpkg')
contents = {
'testpkg/test_pkg': """
import pkg_resources
import pytest
from _pytest.assertion.rewrite import AssertionRewritingHook
def test_load_resource():
assert isinstance(__loader__, AssertionRewritingHook)
res = pkg_resources.resource_string(__name__, 'resource.txt')
res = res.decode('ascii')
assert res == 'Load me please.'
""",
}
testdir.makepyfile(**contents)
testdir.maketxtfile(**{'testpkg/resource': "Load me please."})
result = testdir.runpytest_subprocess()
result.assert_outcomes(passed=1)
def test_read_pyc(self, tmpdir):
"""
Ensure that the `_read_pyc` can properly deal with corrupted pyc files.
In those circumstances it should just give up instead of generating
an exception that is propagated to the caller.
"""
import py_compile
from _pytest.assertion.rewrite import _read_pyc
source = tmpdir.join('source.py')
pyc = source + 'c'
source.write('def test(): pass')
py_compile.compile(str(source), str(pyc))
contents = pyc.read(mode='rb')
strip_bytes = 20 # header is around 8 bytes, strip a little more
assert len(contents) > strip_bytes
pyc.write(contents[:strip_bytes], mode='wb')
assert _read_pyc(source, str(pyc)) is None # no error
def test_reload_is_same(self, testdir):
# A file that will be picked up during collecting.
testdir.tmpdir.join("file.py").ensure()
testdir.tmpdir.join("pytest.ini").write(py.std.textwrap.dedent("""
[pytest]
python_files = *.py
"""))
testdir.makepyfile(test_fun="""
import sys
try:
from imp import reload
except ImportError:
pass
def test_loader():
import file
assert sys.modules["file"] is reload(file)
""")
result = testdir.runpytest('-s')
result.stdout.fnmatch_lines([
"* 1 passed*",
])
def test_get_data_support(self, testdir):
"""Implement optional PEP302 api (#808).
"""
path = testdir.mkpydir("foo")
path.join("test_foo.py").write(_pytest._code.Source("""
class Test:
def test_foo(self):
import pkgutil
data = pkgutil.get_data('foo.test_foo', 'data.txt')
assert data == b'Hey'
"""))
path.join('data.txt').write('Hey')
result = testdir.runpytest()
result.stdout.fnmatch_lines('*1 passed*')
def test_issue731(testdir):
testdir.makepyfile("""
class LongReprWithBraces(object):
def __repr__(self):
return 'LongReprWithBraces({' + ('a' * 80) + '}' + ('a' * 120) + ')'
def some_method(self):
return False
def test_long_repr():
obj = LongReprWithBraces()
assert obj.some_method()
""")
result = testdir.runpytest()
assert 'unbalanced braces' not in result.stdout.str()
class TestIssue925():
def test_simple_case(self, testdir):
testdir.makepyfile("""
def test_ternary_display():
assert (False == False) == False
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines('*E*assert (False == False) == False')
def test_long_case(self, testdir):
testdir.makepyfile("""
def test_ternary_display():
assert False == (False == True) == True
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines('*E*assert (False == True) == True')
def test_many_brackets(self, testdir):
testdir.makepyfile("""
def test_ternary_display():
assert True == ((False == True) == True)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines('*E*assert True == ((False == True) == True)')
|
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Self attention models for VQA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import vqa_layers
from tensor2tensor.models.research import vqa_attention
from tensor2tensor.utils import registry
# from tensor2tensor.utils import restore_hook
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import utils
@registry.register_model
class VqaSelfAttention(vqa_attention.VqaAttentionBaseline):
"""Self attention both on image and question."""
# @staticmethod
# def train_hooks():
# restore_resnet_hook = restore_hook.RestoreHook(
# # TODO(zichaoy): hard code the path given static function.
# checkpoint_path="/home/zichaoy/resnet_v1_152.ckpt",
# new_model_scope="vqa_self_attention/body/",
# old_model_scope="resnet_v1_152/",
# )
# return [restore_resnet_hook]
def body(self, features):
hp = self.hparams
# pylint: disable=eval-used
if hp.image_input_type == "image":
image_feat = vqa_layers.image_embedding(
features["inputs"],
model_fn=eval(hp.image_model_fn),
trainable=hp.train_resnet,
is_training=hp.mode == tf.estimator.ModeKeys.TRAIN)
else:
image_feat = features["inputs"]
image_feat = common_layers.flatten4d3d(image_feat)
image_hidden_size = hp.image_hidden_size or hp.hidden_size
if hp.image_feat_preprocess_proj:
image_feat = common_layers.dense(image_feat, image_hidden_size)
utils.collect_named_outputs("norms", "image_feat_after_proj",
tf.norm(image_feat, axis=-1))
else:
assert image_hidden_size == 2048
image_feat = tf.nn.dropout(
image_feat, keep_prob=1.-hp.layer_prepostprocess_dropout)
if hp.image_feat_encode:
image_feat = image_encoder(image_feat, hp)
utils.collect_named_outputs("norms", "image_feat_encoded",
tf.norm(image_feat, axis=-1))
else:
image_feat = common_layers.layer_norm(image_feat)
utils.collect_named_outputs("norms", "image_feat_after_layer",
tf.norm(image_feat, axis=-1))
question = common_layers.flatten4d3d(features["question"])
utils.collect_named_outputs("norms", "question_embedding",
tf.norm(question, axis=-1))
question, question_self_attention_bias = prepare_question_encoder(
question, hp)
question = tf.nn.dropout(
question, keep_prob=1.-hp.layer_prepostprocess_dropout)
query = question_encoder(question, question_self_attention_bias, hp)
utils.collect_named_outputs(
"norms", "query_encode", tf.norm(query, axis=-1))
query = (query + tf.expand_dims(
tf.squeeze(question_self_attention_bias, [1, 2]), axis=2))
query = tf.reduce_max(query, axis=1)
utils.collect_named_outputs(
"norms", "query_maxpool", tf.norm(query, axis=-1))
# query = common_layers.l2_norm(query)
# utils.collect_named_outputs("norms", "query_after_l2",
# tf.norm(query, axis=-1))
image_ave = attn(image_feat, query, hp)
utils.collect_named_outputs("norms", "image_ave",
tf.norm(image_ave, axis=-1))
if hp.multimodal_combine == "concat":
image_question = tf.concat([image_ave, query], axis=1)
elif hp.multimodal_combine == "sum":
image_question = image_ave + query
elif hp.multimodal_combine == "product":
image_question = image_ave * query
utils.collect_named_outputs("norms", "image_question",
tf.norm(image_question, axis=-1))
image_question = tf.nn.dropout(image_question, 1. - hp.dropout)
output = mlp(image_question, hp)
utils.collect_named_outputs("norms", "output",
tf.norm(output, axis=-1))
norm_tensors = utils.convert_collection_to_dict("norms")
vqa_layers.summarize_tensors(norm_tensors, tag="norms/")
# Expand dimension 1 and 2
return tf.expand_dims(tf.expand_dims(output, axis=1), axis=2)
@registry.register_model
class VqaCombinedSelfAttention(VqaSelfAttention):
"""Combined Self attention both on image and question."""
# @staticmethod
# def train_hooks():
# restore_resnet_hook = restore_hook.RestoreHook(
# # TODO(zichaoy): hard code the path given static function.
# checkpoint_path="/home/zichaoy/resnet_v1_152.ckpt",
# new_model_scope="vqa_combined_self_attention/body/",
# old_model_scope="resnet_v1_152/",
# )
# return [restore_resnet_hook]
def body(self, features):
hp = self.hparams
# pylint: disable=eval-used
if hp.image_input_type == "image":
image_feat = vqa_layers.image_embedding(
features["inputs"],
model_fn=eval(hp.image_model_fn),
trainable=hp.train_resnet,
is_training=hp.mode == tf.estimator.ModeKeys.TRAIN)
else:
image_feat = features["inputs"]
image_feat = common_layers.flatten4d3d(image_feat)
image_hidden_size = hp.hidden_size
image_feat = common_layers.dense(image_feat, image_hidden_size)
utils.collect_named_outputs("norms", "image_feat_after_proj",
tf.norm(image_feat, axis=-1))
question = common_layers.flatten4d3d(features["question"])
utils.collect_named_outputs("norms", "question_embedding",
tf.norm(question, axis=-1))
(encoder_input, encoder_self_attention_bias,
encoder_decoder_attention_bias) = prepare_image_question_encoder(
image_feat, question, hp)
encoder_input = tf.nn.dropout(
encoder_input, keep_prob=1.-hp.layer_prepostprocess_dropout)
encoder_output = image_question_encoder(
encoder_input, encoder_self_attention_bias, hp)
utils.collect_named_outputs(
"norms", "encoder_output", tf.norm(encoder_output, axis=-1))
# scale query by sqrt(hidden_size)
query = tf.get_variable("query", [hp.hidden_size]) * hp.hidden_size **0.5
query = tf.expand_dims(tf.expand_dims(query, axis=0), axis=0)
batch_size = common_layers.shape_list(encoder_input)[0]
query = tf.tile(query, [batch_size, 1, 1])
query = tf.nn.dropout(
query, keep_prob=1.-hp.layer_prepostprocess_dropout)
decoder_output = decoder(
query, encoder_output, None, encoder_decoder_attention_bias, hp)
utils.collect_named_outputs("norms", "decoder_output",
tf.norm(decoder_output, axis=-1))
norm_tensors = utils.convert_collection_to_dict("norms")
vqa_layers.summarize_tensors(norm_tensors, tag="norms/")
# Expand dimension 1 and 2
return tf.expand_dims(decoder_output, axis=1)
@registry.register_model
class VqaIterativeCombinedSelfAttention(VqaSelfAttention):
"""Combined Self attention both on image and question."""
# @staticmethod
# def train_hooks():
# restore_resnet_hook = restore_hook.RestoreHook(
# # TODO(zichaoy): hard code the path given static function.
# checkpoint_path="/home/zichaoy/resnet_v1_152.ckpt",
# new_model_scope="vqa_combined_self_attention/body/",
# old_model_scope="resnet_v1_152/",
# )
# return [restore_resnet_hook]
def body(self, features):
hp = self.hparams
# pylint: disable=eval-used
if hp.image_input_type == "image":
image_feat = vqa_layers.image_embedding(
features["inputs"],
model_fn=eval(hp.image_model_fn),
trainable=hp.train_resnet,
is_training=hp.mode == tf.estimator.ModeKeys.TRAIN)
else:
image_feat = features["inputs"]
image_feat = common_layers.flatten4d3d(image_feat)
image_hidden_size = hp.hidden_size
image_feat = common_layers.dense(image_feat, image_hidden_size)
utils.collect_named_outputs("norms", "image_feat_after_proj",
tf.norm(image_feat, axis=-1))
question = common_layers.flatten4d3d(features["question"])
utils.collect_named_outputs("norms", "question_embedding",
tf.norm(question, axis=-1))
(encoder_input, encoder_self_attention_bias,
encoder_decoder_attention_bias) = prepare_image_question_encoder(
image_feat, question, hp)
encoder_input = tf.nn.dropout(
encoder_input, keep_prob=1.-hp.layer_prepostprocess_dropout)
# scale query by sqrt(hidden_size)
query = tf.get_variable("query", [hp.hidden_size]) * hp.hidden_size **0.5
query = tf.expand_dims(tf.expand_dims(query, axis=0), axis=0)
batch_size = common_layers.shape_list(encoder_input)[0]
query = tf.tile(query, [batch_size, 1, 1])
query = tf.nn.dropout(
query, keep_prob=1.-hp.layer_prepostprocess_dropout)
decoder_output = iterative_encoder_decoder(
encoder_input,
encoder_self_attention_bias,
encoder_decoder_attention_bias,
query,
hp)
utils.collect_named_outputs("norms", "decoder_output",
tf.norm(decoder_output, axis=-1))
norm_tensors = utils.convert_collection_to_dict("norms")
vqa_layers.summarize_tensors(norm_tensors, tag="norms/")
# Expand dimension 1 and 2
return tf.expand_dims(decoder_output, axis=1)
def image_encoder(image_feat,
hparams,
name="image_encoder",
save_weights_to=None,
make_image_summary=True):
"""A stack of self attention layers."""
x = image_feat
image_hidden_size = hparams.image_hidden_size or hparams.hidden_size
image_filter_size = hparams.image_filter_size or hparams.filter_size
with tf.variable_scope(name):
for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("self_attention"):
y = vqa_layers.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
None,
hparams.attention_key_channels or image_hidden_size,
hparams.attention_value_channels or image_hidden_size,
image_hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.image_self_attention_type,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
scale_dotproduct=hparams.scale_dotproduct,
)
utils.collect_named_outputs(
"norms", "image_feat_self_attention_%d"%(layer),
tf.norm(y, axis=-1))
x = common_layers.layer_postprocess(x, y, hparams)
utils.collect_named_outputs(
"norms", "image_feat_self_attention_postprocess_%d"%(layer),
tf.norm(x, axis=-1))
with tf.variable_scope("ffn"):
y = common_layers.dense_relu_dense(
common_layers.layer_preprocess(x, hparams),
image_filter_size,
image_hidden_size,
dropout=hparams.relu_dropout,
)
utils.collect_named_outputs(
"norms", "image_feat_ffn_%d"%(layer), tf.norm(y, axis=-1))
x = common_layers.layer_postprocess(x, y, hparams)
utils.collect_named_outputs(
"norms", "image_feat_ffn_postprocess_%d"%(layer),
tf.norm(x, axis=-1))
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
return common_layers.layer_preprocess(x, hparams)
def prepare_question_encoder(inputs, hparams):
"""Prepare question encoder.
Args:
inputs: a Tensor.
hparams: run hyperparameters
Returns:
encoder_input: a Tensor, bottom of encoder stack
encoder_self_attention_bias: a bias tensor for use in encoder self-attention
"""
encoder_input = inputs
# Usual case - not a packed dataset.
encoder_padding = common_attention.embedding_to_padding(encoder_input)
ignore_padding = common_attention.attention_bias_ignore_padding(
encoder_padding)
encoder_self_attention_bias = ignore_padding
if hparams.pos == "timing":
encoder_input = common_attention.add_timing_signal_1d(encoder_input)
elif hparams.pos == "emb":
encoder_input = common_attention.add_positional_embedding(
encoder_input, hparams.max_length, "inputs_positional_embedding",
None)
return (encoder_input, encoder_self_attention_bias)
def question_encoder(question,
question_self_attention_bias,
hparams,
name="question_encoder",
save_weights_to=None,
make_image_summary=True):
"""A stack of self attention layers."""
x = question
with tf.variable_scope(name):
for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("self_attention"):
y = vqa_layers.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
question_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.question_self_attention_type,
block_length=hparams.block_length,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
scale_dotproduct=hparams.scale_dotproduct,
)
utils.collect_named_outputs(
"norms", "query_self_attention_%d"%(layer),
tf.norm(y, axis=-1))
x = common_layers.layer_postprocess(x, y, hparams)
utils.collect_named_outputs(
"norms", "query_self_attention_postprocess_%d"%(layer),
tf.norm(x, axis=-1))
with tf.variable_scope("ffn"):
y = common_layers.dense_relu_dense(
common_layers.layer_preprocess(x, hparams),
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout,
)
utils.collect_named_outputs(
"norms", "query_ffn_%d"%(layer), tf.norm(y, axis=-1))
x = common_layers.layer_postprocess(x, y, hparams)
utils.collect_named_outputs(
"norms", "query_ffn_postprocess_%d"%(layer),
tf.norm(x, axis=-1))
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
return common_layers.layer_preprocess(x, hparams)
def attn(image_feat,
query,
hparams,
name="attn",
save_weights_to=None,
make_image_summary=True):
"""Attention on image feature with question as query."""
with tf.variable_scope(name, "attn", values=[image_feat, query]):
total_key_depth = hparams.attention_key_channels or hparams.hidden_size
total_value_depth = hparams.attention_value_channels or hparams.hidden_size
num_heads = hparams.num_heads
query = tf.expand_dims(query, 1)
q, k, v = common_attention.compute_qkv(
query,
image_feat,
total_key_depth,
total_value_depth,
)
q = common_attention.split_heads(q, num_heads)
k = common_attention.split_heads(k, num_heads)
v = common_attention.split_heads(v, num_heads)
if hparams.scale_dotproduct:
key_depth_per_head = total_key_depth // num_heads
q *= key_depth_per_head**-0.5
# image_feat is input as v
x = common_attention.dot_product_attention(
q, k, v, None,
dropout_rate=hparams.attention_dropout,
image_shapes=None,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary)
x = common_attention.combine_heads(x)
return tf.squeeze(x, axis=1)
def mlp(feature, hparams, name="mlp"):
"""Multi layer perceptron with dropout and relu activation."""
with tf.variable_scope(name, "mlp", values=[feature]):
num_mlp_layers = hparams.num_mlp_layers
mlp_size = hparams.mlp_size
for _ in range(num_mlp_layers):
feature = common_layers.dense(feature, mlp_size, activation=None)
utils.collect_named_outputs("norms", "mlp_feature",
tf.norm(feature, axis=-1))
feature = common_layers.layer_norm(feature)
feature = tf.nn.relu(feature)
feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout)
return feature
def prepare_image_question_encoder(image_feat, question, hparams):
"""Prepare encoder.
Args:
image_feat: a Tensor.
question: a Tensor.
hparams: run hyperparameters
Returns:
encoder_input: a Tensor, bottom of encoder stack
encoder_self_attention_bias: a bias tensor for use in encoder self-attention
"""
encoder_input = tf.concat([image_feat, question], axis=1)
encoder_padding = common_attention.embedding_to_padding(encoder_input)
ignore_padding = common_attention.attention_bias_ignore_padding(
encoder_padding)
encoder_self_attention_bias = ignore_padding
encoder_decoder_attention_bias = ignore_padding
# Usual case - not a packed dataset.
if hparams.pos == "timing":
question = common_attention.add_timing_signal_1d(question)
elif hparams.pos == "emb":
question = common_attention.add_positional_embedding(
question, hparams.max_length, "inputs_positional_embedding",
None)
encoder_input = tf.concat([image_feat, question], axis=1)
return (encoder_input, encoder_self_attention_bias,
encoder_decoder_attention_bias)
def image_question_encoder(encoder_inputs,
encoder_self_attention_bias,
hparams,
query=None,
name="image_question_encoder",
save_weights_to=None,
make_image_summary=True):
"""A stack of self attention layers."""
x = encoder_inputs
with tf.variable_scope(name):
for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("self_attention"):
y = vqa_layers.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
encoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
block_length=hparams.block_length,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
scale_dotproduct=hparams.scale_dotproduct,
)
utils.collect_named_outputs(
"norms", "encoder_self_attention_%d"%(layer),
tf.norm(y, axis=-1))
x = common_layers.layer_postprocess(x, y, hparams)
utils.collect_named_outputs(
"norms", "encoder_self_attention_postprocess_%d"%(layer),
tf.norm(x, axis=-1))
if query is not None:
with tf.variable_scope("encdec_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
query,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
block_length=hparams.block_length,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
scale_dotproduct=hparams.scale_dotproduct,
)
utils.collect_named_outputs(
"norms",
"encoder_decoder_attention_%d"%(layer),
tf.norm(y, axis=-1))
x = common_layers.layer_postprocess(x, y, hparams)
utils.collect_named_outputs(
"norms",
"encoder_decoder_attention_post_%d"%(layer),
tf.norm(x, axis=-1))
with tf.variable_scope("ffn"):
y = common_layers.dense_relu_dense(
common_layers.layer_preprocess(x, hparams),
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout,
)
utils.collect_named_outputs(
"norms", "encoder_ffn_%d"%(layer), tf.norm(y, axis=-1))
x = common_layers.layer_postprocess(x, y, hparams)
utils.collect_named_outputs(
"norms", "encoder_ffn_postprocess_%d"%(layer),
tf.norm(x, axis=-1))
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
return common_layers.layer_preprocess(x, hparams)
def decoder(decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
name="decoder",
save_weights_to=None,
make_image_summary=True,):
"""A stack of transformer layers.
Args:
decoder_input: a Tensor
encoder_output: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
save_weights_to: an optional dictionary to capture attention weights
for visualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
Returns:
y: a Tensors
"""
x = decoder_input
with tf.variable_scope(name):
for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers):
layer_name = "layer_%d" % layer
with tf.variable_scope(layer_name):
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
)
utils.collect_named_outputs("norms",
"decoder_self_attention_%d"%(layer),
tf.norm(y, axis=-1))
x = common_layers.layer_postprocess(x, y, hparams)
utils.collect_named_outputs("norms",
"decoder_self_attention_post_%d"%(layer),
tf.norm(x, axis=-1))
if encoder_output is not None:
with tf.variable_scope("encdec_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
encoder_output,
encoder_decoder_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
)
utils.collect_named_outputs(
"norms",
"decoder_encoder_attention_%d"%(layer),
tf.norm(y, axis=-1))
x = common_layers.layer_postprocess(x, y, hparams)
utils.collect_named_outputs(
"norms",
"decoder_encoder_attention_post_%d"%(layer),
tf.norm(x, axis=-1))
with tf.variable_scope("ffn"):
y = common_layers.dense_relu_dense(
common_layers.layer_preprocess(x, hparams),
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout,
)
utils.collect_named_outputs("norms", "decoder_ffn_%d"%(layer),
tf.norm(y, axis=-1))
x = common_layers.layer_postprocess(x, y, hparams)
utils.collect_named_outputs("norms", "decoder_ffn_post_%d"%(layer),
tf.norm(x, axis=-1))
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
return common_layers.layer_preprocess(x, hparams)
def iterative_encoder_decoder(encoder_input,
encoder_self_attention_bias,
encoder_decoder_attention_bias,
query,
hparams):
"""Iterative encoder decoder."""
for _ in xrange(hparams.num_rec_steps):
with tf.variable_scope("step", reuse=tf.AUTO_REUSE):
encoder_output = image_question_encoder(
encoder_input,
encoder_self_attention_bias,
hparams,
query)
decoder_output = decoder(
query,
encoder_output,
None,
encoder_decoder_attention_bias,
hparams)
encoder_input = encoder_output
query = decoder_output
return decoder_output
@registry.register_hparams
def vqa_self_attention_base():
"""VQA attention baseline hparams."""
hparams = common_hparams.basic_params1()
hparams.batch_size = 128
hparams.use_fixed_batch_size = True,
hparams.optimizer = "Adam"
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.997
hparams.optimizer_adam_epsilon = 1e-9
hparams.weight_decay = 0.
hparams.clip_grad_norm = 0.
hparams.initializer = "xavier"
hparams.learning_rate_schedule = (
"constant*linear_warmup*rsqrt_normalized_decay")
hparams.learning_rate_warmup_steps = 8000
hparams.learning_rate_constant = 1e-3
hparams.learning_rate_decay_rate = 0.5
hparams.learning_rate_decay_steps = 50000
hparams.dropout = 0.5
hparams.summarize_grads = True
hparams.summarize_vars = True
# not used hparams
hparams.label_smoothing = 0.
hparams.multiply_embedding_mode = "sqrt_depth"
# add new hparams
# use raw image as input
hparams.add_hparam("image_input_type", "image")
hparams.add_hparam("image_model_fn", "resnet_v1_152")
hparams.add_hparam("resize_side", 512)
hparams.add_hparam("height", 448)
hparams.add_hparam("width", 448)
hparams.add_hparam("distort", True)
hparams.add_hparam("train_resnet", False)
# image parts
hparams.add_hparam("image_feat_preprocess_proj", True)
hparams.add_hparam("image_feat_preprocess_layernorm", True)
hparams.add_hparam("image_feat_encode", True)
hparams.add_hparam("image_hidden_size", 0) # default to hidden_size
hparams.add_hparam("image_filter_size", 0) # defaults to filter_size
# question hidden size
hparams.hidden_size = 512
hparams.filter_size = 1024
hparams.num_hidden_layers = 4
hparams.add_hparam("multimodal_combine", "concat")
hparams.add_hparam("num_mlp_layers", 1)
hparams.add_hparam("mlp_size", 1024)
# self attention parts
hparams.norm_type = "layer"
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
hparams.add_hparam("pos", "timing")
hparams.add_hparam("num_encoder_layers", 0)
hparams.add_hparam("num_decoder_layers", 0)
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("self_attention_type", "dot_product")
hparams.add_hparam("image_self_attention_type", "dot_product")
hparams.add_hparam("question_self_attention_type", "dot_product")
hparams.add_hparam("block_length", 1)
hparams.add_hparam("scale_dotproduct", True)
# iterative part
hparams.add_hparam("num_rec_steps", 3)
return hparams
@registry.register_hparams
def vqa_self_attention_feature():
hparams = vqa_self_attention_base()
hparams.image_input_type = "feature"
return hparams
@registry.register_hparams
def vqa_self_attention_feature_batch1024():
hparams = vqa_self_attention_feature()
hparams.batch_size = 1024
return hparams
@registry.register_hparams
def vqa_self_attention_feature_batch1024_big():
"""Big model."""
hparams = vqa_self_attention_feature_batch1024()
hparams.learning_rate_constant = 7e-4
hparams.batch_size = 256
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.num_heads = 16
hparams.layer_prepostprocess_dropout = 0.3
hparams.attention_dropout = 0.3
hparams.relu_dropout = 0.3
return hparams
@registry.register_hparams
def vqa_self_attention_feature_batch1024_exp():
hparams = vqa_self_attention_feature_batch1024()
hparams.learning_rate_schedule = (
"constant*linear_warmup*exp_decay")
hparams.learning_rate_decay_steps = 4000
return hparams
@registry.register_hparams
def vqa_self_attention_feature_batch1024_hidden6():
hparams = vqa_self_attention_feature_batch1024()
hparams.num_hidden_layers = 6
return hparams
@registry.register_hparams
def vqa_self_attention_feature_batch1024_hidden6_big():
hparams = vqa_self_attention_feature_batch1024_hidden6()
hparams.batch_size = 256
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.num_heads = 16
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def vqa_self_attention_feature_batch1024_drop03():
hparams = vqa_self_attention_feature_batch1024()
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def vqa_self_attention_feature_lr5():
hparams = vqa_self_attention_feature()
hparams.learning_rate_constant = 5e-4
return hparams
|
|
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.utils import settings as setting_utils
LOG = logging.getLogger(__name__)
# Predefined provider network types.
# You can add or override these entries by extra_provider_types
# in the settings.
PROVIDER_TYPES = {
'local': {
'display_name': _('Local'),
'require_physical_network': False,
'require_segmentation_id': False,
},
'flat': {
'display_name': _('Flat'),
'require_physical_network': True,
'require_segmentation_id': False,
},
'vlan': {
'display_name': _('VLAN'),
'require_physical_network': True,
'require_segmentation_id': True,
},
'gre': {
'display_name': _('GRE'),
'require_physical_network': False,
'require_segmentation_id': True,
},
'vxlan': {
'display_name': _('VXLAN'),
'require_physical_network': False,
'require_segmentation_id': True,
},
'geneve': {
'display_name': _('Geneve'),
'require_physical_network': False,
'require_segmentation_id': True,
},
'midonet': {
'display_name': _('MidoNet'),
'require_physical_network': False,
'require_segmentation_id': False,
},
'uplink': {
'display_name': _('MidoNet Uplink'),
'require_physical_network': False,
'require_segmentation_id': False,
},
}
# Predefined valid segmentation ID range per network type.
# You can add or override these entries by segmentation_id_range
# in the settings.
SEGMENTATION_ID_RANGE = {
'vlan': (1, 4094),
'gre': (1, (2 ** 32) - 1),
'vxlan': (1, (2 ** 24) - 1),
'geneve': (1, (2 ** 24) - 1),
}
# DEFAULT_PROVIDER_TYPES is used when ['*'] is specified
# in supported_provider_types. This list contains network types
# supported by Neutron ML2 plugin reference implementation.
# You can control enabled network types by
# supported_provider_types setting.
DEFAULT_PROVIDER_TYPES = ['local', 'flat', 'vlan', 'gre', 'vxlan', 'geneve']
class CreateNetwork(forms.SelfHandlingForm):
name = forms.CharField(max_length=255,
label=_("Name"),
required=False)
tenant_id = forms.ThemableChoiceField(label=_("Project"))
network_type = forms.ChoiceField(
label=_("Provider Network Type"),
help_text=_("The physical mechanism by which the virtual "
"network is implemented."),
widget=forms.ThemableSelectWidget(attrs={
'class': 'switchable',
'data-slug': 'network_type'
}))
physical_network = forms.CharField(
max_length=255,
label=_("Physical Network"),
help_text=_("The name of the physical network over which the "
"virtual network is implemented. Specify one of the "
"physical networks defined in your neutron deployment."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'network_type',
}))
segmentation_id = forms.IntegerField(
label=_("Segmentation ID"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'network_type',
}))
admin_state = forms.BooleanField(
label=_("Enable Admin State"),
initial=True,
required=False,
help_text=_("If checked, the network will be enabled."))
shared = forms.BooleanField(label=_("Shared"),
initial=False, required=False)
external = forms.BooleanField(label=_("External Network"),
initial=False, required=False)
with_subnet = forms.BooleanField(label=_("Create Subnet"),
widget=forms.CheckboxInput(attrs={
'class': 'switchable',
'data-slug': 'with_subnet',
'data-hide-tab': 'create_network__'
'createsubnetinfo'
'action,'
'create_network__'
'createsubnetdetail'
'action',
'data-hide-on-checked': 'false'
}),
initial=True,
required=False)
az_hints = forms.MultipleChoiceField(
label=_("Availability Zone Hints"),
required=False,
help_text=_("Availability zones where the DHCP agents may be "
"scheduled. Leaving this unset is equivalent to "
"selecting all availability zones"))
mtu = forms.IntegerField(
label=_("MTU"), required=False, min_value=68,
help_text=_("Maximum Transmission Unit. "
"Minimum is 68 bytes for the IPv4 subnet "
"and 1280 bytes for the IPv6 subnet."))
@classmethod
def _instantiate(cls, request, *args, **kwargs):
return cls(request, *args, **kwargs)
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
tenant_choices = [('', _("Select a project"))]
tenants, has_more = api.keystone.tenant_list(request)
for tenant in tenants:
if tenant.enabled:
tenant_choices.append((tenant.id, tenant.name))
self.fields['tenant_id'].choices = tenant_choices
try:
is_extension_supported = \
api.neutron.is_extension_supported(request, 'provider')
except Exception:
msg = _("Unable to verify Neutron service providers")
exceptions.handle(self.request, msg)
self._hide_provider_network_type()
is_extension_supported = False
if is_extension_supported:
self.seg_id_range = SEGMENTATION_ID_RANGE.copy()
seg_id_range = setting_utils.get_dict_config(
'OPENSTACK_NEUTRON_NETWORK', 'segmentation_id_range')
if seg_id_range:
self.seg_id_range.update(seg_id_range)
self.provider_types = PROVIDER_TYPES.copy()
extra_provider_types = setting_utils.get_dict_config(
'OPENSTACK_NEUTRON_NETWORK', 'extra_provider_types')
if extra_provider_types:
self.provider_types.update(extra_provider_types)
self.nettypes_with_seg_id = [
net_type for net_type in self.provider_types
if self.provider_types[net_type]['require_segmentation_id']]
self.nettypes_with_physnet = [
net_type for net_type in self.provider_types
if self.provider_types[net_type]['require_physical_network']]
supported_provider_types = setting_utils.get_dict_config(
'OPENSTACK_NEUTRON_NETWORK', 'supported_provider_types')
if supported_provider_types == ['*']:
supported_provider_types = DEFAULT_PROVIDER_TYPES
undefined_provider_types = [
net_type for net_type in supported_provider_types
if net_type not in self.provider_types]
if undefined_provider_types:
LOG.error('Undefined provider network types are found: %s',
undefined_provider_types)
seg_id_help = [
_("For %(type)s networks, valid IDs are %(min)s to %(max)s.")
% {'type': net_type,
'min': self.seg_id_range[net_type][0],
'max': self.seg_id_range[net_type][1]}
for net_type in self.nettypes_with_seg_id]
self.fields['segmentation_id'].help_text = ' '.join(seg_id_help)
# Register network types which require segmentation ID
attrs = dict(('data-network_type-%s' % network_type,
_('Segmentation ID'))
for network_type in self.nettypes_with_seg_id)
self.fields['segmentation_id'].widget.attrs.update(attrs)
physical_networks = setting_utils.get_dict_config(
'OPENSTACK_NEUTRON_NETWORK', 'physical_networks')
if physical_networks:
self.fields['physical_network'] = forms.ThemableChoiceField(
label=_("Physical Network"),
choices=[(net, net) for net in physical_networks],
widget=forms.ThemableSelectWidget(attrs={
'class': 'switched',
'data-switch-on': 'network_type',
}),
help_text=_("The name of the physical network over "
"which the virtual network is implemented."),)
# Register network types which require physical network
attrs = dict(('data-network_type-%s' % network_type,
_('Physical Network'))
for network_type in self.nettypes_with_physnet)
self.fields['physical_network'].widget.attrs.update(attrs)
network_type_choices = [
(net_type, self.provider_types[net_type]['display_name'])
for net_type in supported_provider_types]
if not network_type_choices:
self._hide_provider_network_type()
else:
self.fields['network_type'].choices = network_type_choices
try:
if api.neutron.is_extension_supported(request,
'network_availability_zone'):
zones = api.neutron.list_availability_zones(
self.request, 'network', 'available')
self.fields['az_hints'].choices = [(zone['name'], zone['name'])
for zone in zones]
else:
del self.fields['az_hints']
except Exception:
msg = _('Failed to get availability zone list.')
messages.warning(request, msg)
del self.fields['az_hints']
def _hide_provider_network_type(self):
self.fields['network_type'].widget = forms.HiddenInput()
self.fields['physical_network'].widget = forms.HiddenInput()
self.fields['segmentation_id'].widget = forms.HiddenInput()
self.fields['network_type'].required = False
self.fields['physical_network'].required = False
self.fields['segmentation_id'].required = False
def handle(self, request, data):
try:
params = {'name': data['name'],
'tenant_id': data['tenant_id'],
'admin_state_up': data['admin_state'],
'shared': data['shared'],
'router:external': data['external']}
if api.neutron.is_extension_supported(request, 'provider'):
network_type = data['network_type']
params['provider:network_type'] = network_type
if network_type in self.nettypes_with_physnet:
params['provider:physical_network'] = (
data['physical_network'])
if network_type in self.nettypes_with_seg_id:
params['provider:segmentation_id'] = (
data['segmentation_id'])
if 'az_hints' in data and data['az_hints']:
params['availability_zone_hints'] = data['az_hints']
if data['mtu']:
params['mtu'] = data['mtu']
network = api.neutron.network_create(request, **params)
LOG.debug('Network %s was successfully created.', data['name'])
return network
except Exception:
redirect = reverse('horizon:admin:networks:index')
msg = _('Failed to create network %s') % data['name']
exceptions.handle(request, msg, redirect=redirect)
def clean(self):
cleaned_data = super().clean()
if api.neutron.is_extension_supported(self.request, 'provider'):
self._clean_physical_network(cleaned_data)
self._clean_segmentation_id(cleaned_data)
return cleaned_data
def _clean_physical_network(self, data):
network_type = data.get('network_type')
if ('physical_network' in self._errors and
network_type not in self.nettypes_with_physnet):
# In this case the physical network is not required, so we can
# ignore any errors.
del self._errors['physical_network']
def _clean_segmentation_id(self, data):
network_type = data.get('network_type')
if 'segmentation_id' in self._errors:
if (network_type not in self.nettypes_with_seg_id and
not self.data.get("segmentation_id")):
# In this case the segmentation ID is not required, so we can
# ignore the field is required error.
del self._errors['segmentation_id']
elif network_type in self.nettypes_with_seg_id:
seg_id = data.get('segmentation_id')
seg_id_range = {'min': self.seg_id_range[network_type][0],
'max': self.seg_id_range[network_type][1]}
if seg_id < seg_id_range['min'] or seg_id > seg_id_range['max']:
msg = (_('For a %(network_type)s network, valid segmentation '
'IDs are %(min)s through %(max)s.')
% {'network_type': network_type,
'min': seg_id_range['min'],
'max': seg_id_range['max']})
self._errors['segmentation_id'] = self.error_class([msg])
class UpdateNetwork(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"), required=False)
admin_state = forms.BooleanField(
label=_("Enable Admin State"),
required=False,
help_text=_("If checked, the network will be enabled."))
shared = forms.BooleanField(label=_("Shared"), required=False)
external = forms.BooleanField(label=_("External Network"), required=False)
failure_url = 'horizon:admin:networks:index'
def handle(self, request, data):
try:
params = {'name': data['name'],
'admin_state_up': data['admin_state'],
'shared': data['shared'],
'router:external': data['external']}
network = api.neutron.network_update(request,
self.initial['network_id'],
**params)
msg = (_('Network %s was successfully updated.') %
network.name_or_id)
messages.success(request, msg)
return network
except Exception as e:
LOG.info('Failed to update network %(id)s: %(exc)s',
{'id': self.initial['network_id'], 'exc': e})
name_or_id = data['name'] or self.initial['network_id']
msg = _('Failed to update network %s') % name_or_id
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2017-, labman development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import main
from labman.db.exceptions import LabmanUnknownIdError
from labman.db.testing import LabmanTestCase
from labman.db.container import Tube, Well
from labman.db.study import Study
from labman.db.plate import Plate
from labman.db.process import (
ReagentCreationProcess, GDNAExtractionProcess, SamplePlatingProcess)
from labman.db.composition import (
Composition, ReagentComposition, SampleComposition, GDNAComposition,
LibraryPrep16SComposition, PoolComposition, PrimerComposition,
PrimerSetComposition, NormalizedGDNAComposition, ShotgunPrimerSet,
LibraryPrepShotgunComposition, PrimerSet, CompressedGDNAComposition)
# Most of the tests in this file are not modifying the database, and the ones
# that do, make sure to revert their changes. To avoid unnecessary DB resets
# we are going to create a single Test class, reducing the running time of
# the tests from ~12 minutes to ~2 minutes
class TestsComposition(LabmanTestCase):
def test_composition_factory(self):
self.assertEqual(Composition.factory(3073), ReagentComposition(1))
self.assertEqual(Composition.factory(1537), PrimerComposition(1))
self.assertEqual(Composition.factory(1), PrimerSetComposition(1))
self.assertEqual(Composition.factory(3081), SampleComposition(1))
self.assertEqual(Composition.factory(3082), GDNAComposition(1))
self.assertEqual(Composition.factory(3083),
LibraryPrep16SComposition(1))
self.assertEqual(Composition.factory(3084),
CompressedGDNAComposition(1))
self.assertEqual(Composition.factory(3085),
NormalizedGDNAComposition(1))
self.assertEqual(Composition.factory(3086),
LibraryPrepShotgunComposition(1))
self.assertEqual(Composition.factory(3078), PoolComposition(1))
def test_reagent_composition_list_reagents(self):
obs = ReagentComposition.list_reagents()
exp = ['157022406', '443912', 'KHP1', 'RNBF7110', 'STUBS1']
self.assertEqual(obs, exp)
obs = ReagentComposition.list_reagents(term='39')
exp = ['443912']
self.assertEqual(obs, exp)
obs = ReagentComposition.list_reagents(reagent_type='extraction kit')
exp = ['157022406']
self.assertEqual(obs, exp)
obs = ReagentComposition.list_reagents(reagent_type='water', term='BF')
exp = ['RNBF7110']
self.assertEqual(obs, exp)
obs = ReagentComposition.list_reagents(reagent_type='water', term='22')
exp = []
self.assertEqual(obs, exp)
def test_reagent_composition_from_external_id(self):
self.assertEqual(ReagentComposition.from_external_id('157022406'),
ReagentComposition(1))
with self.assertRaises(LabmanUnknownIdError):
ReagentComposition.from_external_id('Does not exist')
def test_reagent_composition_attributes(self):
obs = ReagentComposition(1)
self.assertEqual(obs.upstream_process, ReagentCreationProcess(5))
self.assertEqual(obs.container, Tube(1))
self.assertEqual(obs.total_volume, 10)
self.assertIsNone(obs.notes)
self.assertEqual(obs.composition_id, 3073)
self.assertEqual(obs.external_lot_id, '157022406')
self.assertEqual(obs.reagent_type, 'extraction kit')
self.assertIsNone(obs.study)
def test_primer_composition_attributes(self):
obs = PrimerComposition(1)
self.assertEqual(obs.container, Well(1537))
self.assertEqual(obs.total_volume, 10)
self.assertIsNone(obs.notes)
self.assertEqual(obs.composition_id, 1537)
self.assertEqual(obs.primer_set_composition, PrimerSetComposition(1))
self.assertIsNone(obs.study)
def test_primer_set_composition_attributes(self):
obs = PrimerSetComposition(1)
self.assertEqual(obs.container, Well(1))
self.assertEqual(obs.total_volume, 0)
self.assertIsNone(obs.notes)
self.assertEqual(obs.composition_id, 1)
self.assertEqual(obs.barcode, 'TCCCTTGTCTCC')
self.assertIsNone(obs.study)
def test_sample_composition_get_control_samples(self):
self.assertEqual(
SampleComposition.get_control_samples(),
['blank', 'empty', 'vibrio.positive.control', 'zymo.mock'])
self.assertEqual(SampleComposition.get_control_samples('l'),
['blank', 'vibrio.positive.control'])
self.assertEqual(SampleComposition.get_control_samples('bla'),
['blank'])
self.assertEqual(SampleComposition.get_control_samples('posit'),
['vibrio.positive.control'])
self.assertEqual(SampleComposition.get_control_samples('vib'),
['vibrio.positive.control'])
self.assertEqual(SampleComposition.get_control_samples('TrOL'),
['vibrio.positive.control'])
def test_sample_composition_get_control_sample_types_description(self):
obs = SampleComposition.get_control_sample_types_description()
exp = [
{'external_id': 'blank',
'description': 'gDNA extraction blanks. Represents an empty '
'extraction well.'},
{'external_id': 'empty',
'description': 'Empty well. Represents an empty well that should '
'not be included in library preparation.'},
{'external_id': 'vibrio.positive.control',
'description': 'Bacterial isolate control (Vibrio fischeri ES114)'
'. Represents an extraction well loaded with '
'Vibrio.'},
{'external_id': 'zymo.mock',
'description': 'Bacterial community control (Zymo Mock D6306). '
'Represents an extraction well loaded with Zymo '
'Mock community.'}]
self.assertEqual(obs, exp)
def test_sample_composition_attributes(self):
# Test a sample
obs = SampleComposition(1)
self.assertEqual(obs.sample_composition_type, 'experimental sample')
self.assertEqual(obs.sample_id, '1.SKB1.640202')
self.assertEqual(obs.content, '1.SKB1.640202.21.A1')
self.assertEqual(obs.upstream_process, SamplePlatingProcess(10))
self.assertEqual(obs.container, Well(3073))
self.assertEqual(obs.total_volume, 10)
self.assertIsNone(obs.notes)
obs.notes = 'New Notes'
self.assertEqual(obs.notes, 'New Notes')
obs.notes = None
self.assertIsNone(obs.notes)
self.assertEqual(obs.composition_id, 3081)
self.assertEqual(obs.study, Study(1))
# Test a control sample
obs = SampleComposition(85)
self.assertEqual(obs.sample_composition_type, 'blank')
self.assertIsNone(obs.sample_id)
self.assertEqual(obs.content, 'blank.21.H1')
self.assertEqual(obs.upstream_process, SamplePlatingProcess(10))
self.assertEqual(obs.container, Well(4333))
self.assertEqual(obs.total_volume, 10)
self.assertIsNone(obs.notes)
self.assertEqual(obs.composition_id, 4341)
self.assertIsNone(obs.study)
def test_sample_composition_get_sample_composition_type_id(self):
self.assertEqual(
SampleComposition._get_sample_composition_type_id(
'experimental sample'), 1)
self.assertEqual(
SampleComposition._get_sample_composition_type_id('blank'), 2)
self.assertEqual(
SampleComposition._get_sample_composition_type_id(
'vibrio.positive.control'), 3)
def test_sample_composition_update(self):
tester = SampleComposition(85)
# Make sure that the sample composition that we are working with
# is a control sample
self.assertEqual(tester.sample_composition_type, 'blank')
self.assertIsNone(tester.sample_id)
self.assertEqual(tester.content, 'blank.21.H1')
# Update a well from CONTROL -> EXPERIMENTAL SAMPLE
self.assertEqual(tester.update('1.SKM8.640201'),
('1.SKM8.640201', True))
self.assertEqual(tester.sample_composition_type, 'experimental sample')
self.assertEqual(tester.sample_id, '1.SKM8.640201')
self.assertEqual(tester.content, '1.SKM8.640201')
# This test here tests that the code automatically detects when a
# sample is duplicated in the plate and adds the plate ID and
# well ID to all duplicates.
t2 = SampleComposition(86)
self.assertEqual(t2.update('1.SKM8.640201'),
('1.SKM8.640201.21.H2', True))
self.assertEqual(t2.sample_composition_type, 'experimental sample')
self.assertEqual(t2.sample_id, '1.SKM8.640201')
self.assertEqual(t2.content, '1.SKM8.640201.21.H2')
self.assertEqual(tester.sample_composition_type, 'experimental sample')
self.assertEqual(tester.sample_id, '1.SKM8.640201')
self.assertEqual(tester.content, '1.SKM8.640201.21.H1')
# This test here tests that the code automatically detects when a
# sample is no longer duplicated in the plate and removes the plate
# id and well id from the sample content
self.assertEqual(t2.update('blank'), ('blank.21.H2', True))
self.assertEqual(tester.content, '1.SKM8.640201')
# Update a well from EXPERIMENTAL SAMPLE -> EXPERIMENTAL SAMPLE
self.assertEqual(tester.update('1.SKB6.640176'),
('1.SKB6.640176.21.H1', True))
self.assertEqual(tester.sample_composition_type, 'experimental sample')
self.assertEqual(tester.sample_id, '1.SKB6.640176')
self.assertEqual(tester.content, '1.SKB6.640176.21.H1')
# Update a well from EXPERIMENTAL SAMPLE -> CONTROL
self.assertEqual(tester.update('vibrio.positive.control'),
('vibrio.positive.control.21.H1', True))
self.assertEqual(tester.sample_composition_type,
'vibrio.positive.control')
self.assertIsNone(tester.sample_id)
self.assertEqual(tester.content, 'vibrio.positive.control.21.H1')
# Update a well from CONROL -> CONTROL
self.assertEqual(tester.update('blank'), ('blank.21.H1', True))
self.assertEqual(tester.sample_composition_type, 'blank')
self.assertIsNone(tester.sample_id)
self.assertEqual(tester.content, 'blank.21.H1')
# Update a well from CONROL -> Unknown
self.assertEqual(tester.update('Unknown'), ('Unknown', False))
self.assertEqual(tester.sample_composition_type, 'experimental sample')
self.assertIsNone(tester.sample_id)
self.assertEqual(tester.content, 'Unknown')
# Update a well from Unknown -> CONTROL
self.assertEqual(tester.update('blank'), ('blank.21.H1', True))
self.assertEqual(tester.sample_composition_type, 'blank')
self.assertIsNone(tester.sample_id)
self.assertEqual(tester.content, 'blank.21.H1')
def test_gDNA_composition_attributes(self):
obs = GDNAComposition(1)
self.assertEqual(obs.sample_composition, SampleComposition(1))
self.assertEqual(obs.upstream_process, GDNAExtractionProcess(1))
self.assertEqual(obs.container, Well(3074))
self.assertEqual(obs.total_volume, 10)
self.assertIsNone(obs.notes)
self.assertEqual(obs.composition_id, 3082)
self.assertEqual(obs.study, Study(1))
def test_library_prep_16S_composition_attributes(self):
obs = LibraryPrep16SComposition(1)
self.assertEqual(obs.container, Well(3075))
self.assertEqual(obs.total_volume, 10)
self.assertIsNone(obs.notes)
self.assertEqual(obs.gdna_composition, GDNAComposition(1))
self.assertEqual(obs.primer_composition, PrimerComposition(1))
self.assertEqual(obs.composition_id, 3083)
self.assertEqual(obs.study, Study(1))
def test_compressed_gDNA_composition_attributes(self):
obs = CompressedGDNAComposition(1)
self.assertEqual(obs.container, Well(3076))
self.assertEqual(obs.total_volume, 10)
self.assertIsNone(obs.notes)
self.assertEqual(obs.gdna_composition, GDNAComposition(1))
def test_normalized_gDNA_composition_attributes(self):
obs = NormalizedGDNAComposition(1)
self.assertEqual(obs.container, Well(3077))
self.assertEqual(obs.total_volume, 3500)
self.assertIsNone(obs.notes)
self.assertEqual(obs.compressed_gdna_composition,
CompressedGDNAComposition(1))
self.assertEqual(obs.dna_volume, 415)
self.assertEqual(obs.water_volume, 3085)
self.assertEqual(obs.composition_id, 3085)
self.assertEqual(obs.study, Study(1))
def test_library_prep_shotgun_composition_attributes(self):
obs = LibraryPrepShotgunComposition(1)
self.assertEqual(obs.container, Well(3078))
self._baseAssertEqual(obs.total_volume, 4000)
self.assertIsNone(obs.notes)
self.assertEqual(obs.normalized_gdna_composition,
NormalizedGDNAComposition(1))
self.assertEqual(obs.i5_composition, PrimerComposition(769))
self.assertEqual(obs.i7_composition, PrimerComposition(770))
self.assertEqual(obs.composition_id, 3086)
self.assertEqual(obs.study, Study(1))
def test_pool_composition_pools(self):
obs = PoolComposition.list_pools()
exp = [{'pool_composition_id': 1,
'external_id': 'Test Pool from Plate 1'},
{'pool_composition_id': 2,
'external_id': 'Test sequencing pool 1'},
{'pool_composition_id': 3,
'external_id': 'Test pool from Shotgun plate 1'}]
self.assertEqual(obs, exp)
def test_pool_composition_attributes(self):
obs = PoolComposition(1)
self.assertEqual(obs.container, Tube(6))
self.assertEqual(obs.total_volume, 96)
self.assertIsNone(obs.notes)
self.assertEqual(obs.composition_id, 3078)
obs_comp = obs.components
self.assertEqual(len(obs_comp), 95)
exp = {'composition': LibraryPrep16SComposition(1),
'input_volume': 1.0, 'percentage_of_output': 0}
self.assertEqual(obs_comp[0], exp)
self.assertEqual(obs.raw_concentration, 1.5)
def test_primer_set_attributes(self):
obs = PrimerSet(1)
self.assertEqual(obs.external_id, 'EMP 16S V4 primer set')
self.assertEqual(obs.target_name, 'Amplicon')
self.assertIsNone(obs.notes)
self.assertEqual(obs.plates, [Plate(1), Plate(2), Plate(3), Plate(4),
Plate(5), Plate(6), Plate(7), Plate(8)])
def test_primer_set_list(self):
obs = PrimerSet.list_primer_sets()
exp = [{'primer_set_id': 1, 'external_id': 'EMP 16S V4 primer set',
'target_name': 'Amplicon'},
{'primer_set_id': 2, 'external_id': 'iTru shotgun primer set',
'target_name': 'Shotgun'}]
self.assertEqual(obs, exp)
# This tests do modify the database in a way that can't be easily reverted,
# hence allowing this to live in its own class so the DB gets reseted
class TestShotgunPrimerSet(LabmanTestCase):
def test_attributes(self):
tester = ShotgunPrimerSet(1)
self.assertEqual(tester.external_id, 'iTru combos December 2017')
def test_get_next_combos(self):
tester = ShotgunPrimerSet(1)
# NOTE: 380 instead of 384 because the test sample plate contains 1
# empty well. When the plate is collapsed 4 times into a 384-well plate
# this results with 4 empty wells not included in library prep
self.assertEqual(tester.current_combo_index, 380)
with self.assertRaises(ValueError):
tester.get_next_combos(0)
with self.assertRaises(ValueError):
tester.get_next_combos(150000)
obs = tester.get_next_combos(5)
self.assertEqual(tester.current_combo_index, 385)
self.assertEqual(len(obs), 5)
exp = [(PrimerSetComposition(1146), PrimerSetComposition(1530)),
(PrimerSetComposition(1148), PrimerSetComposition(1532)),
(PrimerSetComposition(1150), PrimerSetComposition(1534)),
(PrimerSetComposition(1152), PrimerSetComposition(1536)),
(PrimerSetComposition(769), PrimerSetComposition(1155))]
self.assertEqual(obs, exp)
class TestCreateControlSample(LabmanTestCase):
def test_create_control_sample_type(self):
SampleComposition.create_control_sample_type(
'testing.control', 'A test')
obs = SampleComposition.get_control_sample_types_description()
exp = [
{'external_id': 'blank',
'description': 'gDNA extraction blanks. Represents an empty '
'extraction well.'},
{'external_id': 'empty',
'description': 'Empty well. Represents an empty well that should '
'not be included in library preparation.'},
{'external_id': 'testing.control',
'description': 'A test'},
{'external_id': 'vibrio.positive.control',
'description': 'Bacterial isolate control (Vibrio fischeri ES114)'
'. Represents an extraction well loaded with '
'Vibrio.'},
{'external_id': 'zymo.mock',
'description': 'Bacterial community control (Zymo Mock D6306). '
'Represents an extraction well loaded with Zymo '
'Mock community.'}]
self.assertEqual(obs, exp)
if __name__ == '__main__':
main()
|
|
import collections
import sys
from django.conf import settings
from django.core.management.color import color_style
from django.utils.encoding import force_str
from django.utils.itercompat import is_iterable
from django.utils import six
class ModelErrorCollection:
def __init__(self, outfile=sys.stdout):
self.errors = []
self.outfile = outfile
self.style = color_style()
def add(self, context, error):
self.errors.append((context, error))
self.outfile.write(self.style.ERROR(force_str("%s: %s\n" % (context, error))))
def get_validation_errors(outfile, app=None):
"""
Validates all models that are part of the specified app. If no app name is provided,
validates all models of all installed apps. Writes errors, if any, to outfile.
Returns number of errors.
"""
from django.db import models, connection
from django.db.models.loading import get_app_errors
from django.db.models.deletion import SET_NULL, SET_DEFAULT
e = ModelErrorCollection(outfile)
for (app_name, error) in get_app_errors().items():
e.add(app_name, error)
for cls in models.get_models(app, include_swapped=True):
opts = cls._meta
# Check swappable attribute.
if opts.swapped:
try:
app_label, model_name = opts.swapped.split('.')
except ValueError:
e.add(opts, "%s is not of the form 'app_label.app_name'." % opts.swappable)
continue
if not models.get_model(app_label, model_name):
e.add(opts, "Model has been swapped out for '%s' which has not been installed or is abstract." % opts.swapped)
# No need to perform any other validation checks on a swapped model.
continue
# If this is the current User model, check known validation problems with User models
if settings.AUTH_USER_MODEL == '%s.%s' % (opts.app_label, opts.object_name):
# Check that the USERNAME FIELD isn't included in REQUIRED_FIELDS.
if cls.USERNAME_FIELD in cls.REQUIRED_FIELDS:
e.add(opts, 'The field named as the USERNAME_FIELD should not be included in REQUIRED_FIELDS on a swappable User model.')
# Check that the username field is unique
if not opts.get_field(cls.USERNAME_FIELD).unique:
e.add(opts, 'The USERNAME_FIELD must be unique. Add unique=True to the field parameters.')
# Model isn't swapped; do field-specific validation.
for f in opts.local_fields:
if f.name == 'id' and not f.primary_key and opts.pk.name == 'id':
e.add(opts, '"%s": You can\'t use "id" as a field name, because each model automatically gets an "id" field if none of the fields have primary_key=True. You need to either remove/rename your "id" field or add primary_key=True to a field.' % f.name)
if f.name.endswith('_'):
e.add(opts, '"%s": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.' % f.name)
if (f.primary_key and f.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
e.add(opts, '"%s": Primary key fields cannot have null=True.' % f.name)
if isinstance(f, models.CharField):
try:
max_length = int(f.max_length)
if max_length <= 0:
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
except (ValueError, TypeError):
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
if isinstance(f, models.DecimalField):
decimalp_ok, mdigits_ok = False, False
decimalp_msg = '"%s": DecimalFields require a "decimal_places" attribute that is a non-negative integer.'
try:
decimal_places = int(f.decimal_places)
if decimal_places < 0:
e.add(opts, decimalp_msg % f.name)
else:
decimalp_ok = True
except (ValueError, TypeError):
e.add(opts, decimalp_msg % f.name)
mdigits_msg = '"%s": DecimalFields require a "max_digits" attribute that is a positive integer.'
try:
max_digits = int(f.max_digits)
if max_digits <= 0:
e.add(opts, mdigits_msg % f.name)
else:
mdigits_ok = True
except (ValueError, TypeError):
e.add(opts, mdigits_msg % f.name)
invalid_values_msg = '"%s": DecimalFields require a "max_digits" attribute value that is greater than or equal to the value of the "decimal_places" attribute.'
if decimalp_ok and mdigits_ok:
if decimal_places > max_digits:
e.add(opts, invalid_values_msg % f.name)
if isinstance(f, models.FileField) and not f.upload_to:
e.add(opts, '"%s": FileFields require an "upload_to" attribute.' % f.name)
if isinstance(f, models.ImageField):
try:
from django.utils.image import Image
except ImportError:
e.add(opts, '"%s": To use ImageFields, you need to install Pillow. Get it at https://pypi.python.org/pypi/Pillow.' % f.name)
if isinstance(f, models.BooleanField) and getattr(f, 'null', False):
e.add(opts, '"%s": BooleanFields do not accept null values. Use a NullBooleanField instead.' % f.name)
if isinstance(f, models.FilePathField) and not (f.allow_files or f.allow_folders):
e.add(opts, '"%s": FilePathFields must have either allow_files or allow_folders set to True.' % f.name)
if f.choices:
if isinstance(f.choices, six.string_types) or not is_iterable(f.choices):
e.add(opts, '"%s": "choices" should be iterable (e.g., a tuple or list).' % f.name)
else:
for c in f.choices:
if not isinstance(c, (list, tuple)) or len(c) != 2:
e.add(opts, '"%s": "choices" should be a sequence of two-tuples.' % f.name)
if f.db_index not in (None, True, False):
e.add(opts, '"%s": "db_index" should be either None, True or False.' % f.name)
# Perform any backend-specific field validation.
connection.validation.validate_field(e, opts, f)
# Check if the on_delete behavior is sane
if f.rel and hasattr(f.rel, 'on_delete'):
if f.rel.on_delete == SET_NULL and not f.null:
e.add(opts, "'%s' specifies on_delete=SET_NULL, but cannot be null." % f.name)
elif f.rel.on_delete == SET_DEFAULT and not f.has_default():
e.add(opts, "'%s' specifies on_delete=SET_DEFAULT, but has no default value." % f.name)
# Check to see if the related field will clash with any existing
# fields, m2m fields, m2m related objects or related objects
if f.rel:
if f.rel.to not in models.get_models():
# If the related model is swapped, provide a hint;
# otherwise, the model just hasn't been installed.
if not isinstance(f.rel.to, six.string_types) and f.rel.to._meta.swapped:
e.add(opts, "'%s' defines a relation with the model '%s.%s', which has been swapped out. Update the relation to point at settings.%s." % (f.name, f.rel.to._meta.app_label, f.rel.to._meta.object_name, f.rel.to._meta.swappable))
else:
e.add(opts, "'%s' has a relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, six.string_types):
continue
# Make sure the related field specified by a ForeignKey is unique
if f.requires_unique_target:
if len(f.foreign_related_fields) > 1:
has_unique_field = False
for rel_field in f.foreign_related_fields:
has_unique_field = has_unique_field or rel_field.unique
if not has_unique_field:
e.add(opts, "Field combination '%s' under model '%s' must have a unique=True constraint" % (','.join([rel_field.name for rel_field in f.foreign_related_fields]), f.rel.to.__name__))
else:
if not f.foreign_related_fields[0].unique:
e.add(opts, "Field '%s' under model '%s' must have a unique=True constraint." % (f.foreign_related_fields[0].name, f.rel.to.__name__))
rel_opts = f.rel.to._meta
rel_name = f.related.get_accessor_name()
rel_query_name = f.related_query_name()
if not f.rel.is_hidden():
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
seen_intermediary_signatures = []
for i, f in enumerate(opts.local_many_to_many):
# Check to see if the related m2m field will clash with any
# existing fields, m2m fields, m2m related objects or related
# objects
if f.rel.to not in models.get_models():
# If the related model is swapped, provide a hint;
# otherwise, the model just hasn't been installed.
if not isinstance(f.rel.to, six.string_types) and f.rel.to._meta.swapped:
e.add(opts, "'%s' defines a relation with the model '%s.%s', which has been swapped out. Update the relation to point at settings.%s." % (f.name, f.rel.to._meta.app_label, f.rel.to._meta.object_name, f.rel.to._meta.swappable))
else:
e.add(opts, "'%s' has an m2m relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, six.string_types):
continue
# Check that the field is not set to unique. ManyToManyFields do not support unique.
if f.unique:
e.add(opts, "ManyToManyFields cannot be unique. Remove the unique argument on '%s'." % f.name)
if f.rel.through is not None and not isinstance(f.rel.through, six.string_types):
from_model, to_model = cls, f.rel.to
if from_model == to_model and f.rel.symmetrical and not f.rel.through._meta.auto_created:
e.add(opts, "Many-to-many fields with intermediate tables cannot be symmetrical.")
seen_from, seen_to, seen_self = False, False, 0
for inter_field in f.rel.through._meta.fields:
rel_to = getattr(inter_field.rel, 'to', None)
if from_model == to_model: # relation to self
if rel_to == from_model:
seen_self += 1
if seen_self > 2:
e.add(opts, "Intermediary model %s has more than "
"two foreign keys to %s, which is ambiguous "
"and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
if rel_to == from_model:
if seen_from:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
seen_from = True
elif rel_to == to_model:
if seen_to:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
rel_to._meta.object_name
)
)
else:
seen_to = True
if f.rel.through not in models.get_models(include_auto_created=True):
e.add(opts, "'%s' specifies an m2m relation through model "
"%s, which has not been installed." % (f.name, f.rel.through)
)
signature = (f.rel.to, cls, f.rel.through)
if signature in seen_intermediary_signatures:
e.add(opts, "The model %s has two manually-defined m2m "
"relations through the model %s, which is not "
"permitted. Please consider using an extra field on "
"your intermediary model instead." % (
cls._meta.object_name,
f.rel.through._meta.object_name
)
)
else:
seen_intermediary_signatures.append(signature)
if not f.rel.through._meta.auto_created:
seen_related_fk, seen_this_fk = False, False
for field in f.rel.through._meta.fields:
if field.rel:
if not seen_related_fk and field.rel.to == f.rel.to:
seen_related_fk = True
elif field.rel.to == cls:
seen_this_fk = True
if not seen_related_fk or not seen_this_fk:
e.add(opts, "'%s' is a manually-defined m2m relation "
"through model %s, which does not have foreign keys "
"to %s and %s" % (f.name, f.rel.through._meta.object_name,
f.rel.to._meta.object_name, cls._meta.object_name)
)
elif isinstance(f.rel.through, six.string_types):
e.add(opts, "'%s' specifies an m2m relation through model %s, "
"which has not been installed" % (f.name, f.rel.through)
)
rel_opts = f.rel.to._meta
rel_name = f.related.get_accessor_name()
rel_query_name = f.related_query_name()
# If rel_name is none, there is no reverse accessor (this only
# occurs for symmetrical m2m relations to self). If this is the
# case, there are no clashes to check for this field, as there are
# no reverse descriptors for this field.
if rel_name is not None:
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
# Check ordering attribute.
if opts.ordering:
for field_name in opts.ordering:
if field_name == '?':
continue
if field_name.startswith('-'):
field_name = field_name[1:]
if opts.order_with_respect_to and field_name == '_order':
continue
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '__' in field_name:
continue
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
if field_name == 'pk':
continue
try:
opts.get_field(field_name, many_to_many=False)
except models.FieldDoesNotExist:
e.add(opts, '"ordering" refers to "%s", a field that doesn\'t exist.' % field_name)
# Check unique_together.
for ut in opts.unique_together:
validate_local_fields(e, opts, "unique_together", ut)
if not isinstance(opts.index_together, collections.Sequence):
e.add(opts, '"index_together" must a sequence')
else:
for it in opts.index_together:
validate_local_fields(e, opts, "index_together", it)
return len(e.errors)
def validate_local_fields(e, opts, field_name, fields):
from django.db import models
if not isinstance(fields, collections.Sequence):
e.add(opts, 'all %s elements must be sequences' % field_name)
else:
for field in fields:
try:
f = opts.get_field(field, many_to_many=True)
except models.FieldDoesNotExist:
e.add(opts, '"%s" refers to %s, a field that doesn\'t exist.' % (field_name, field))
else:
if isinstance(f.rel, models.ManyToManyRel):
e.add(opts, '"%s" refers to %s. ManyToManyFields are not supported in %s.' % (field_name, f.name, field_name))
if f not in opts.local_fields:
e.add(opts, '"%s" refers to %s. This is not in the same model as the %s statement.' % (field_name, f.name, field_name))
|
|
import rospy
import mongodb_store_msgs.srv as dc_srv
import mongodb_store.util as dc_util
from mongodb_store_msgs.msg import StringPair, StringPairList, SerialisedMessage, Insert
from bson import json_util
from bson.objectid import ObjectId
import json
import copy
class MessageStoreProxy:
"""
A class that provides functions for storage and retrieval of ROS Message
objects in the mongodb_store. This is achieved by acting as a proxy to the
services provided by the MessageStore ROS node, and therefore requires the message
store node to be running in addition to the datacentre:
`rosrun mongodb_store message_store_node.py`
>>> from geometry_msgs.msg import Pose, Quaternion
>>> msg_store = MessageStoreProxy()
>>> p = Pose(Point(0, 1, 2), Quaternion(0, 0, 0 , 1))
>>> msg_store.insert_named("my favourite pose", p)
>>> retrieved = msg_store.query_named("my favourite pose", Pose._type)
For usage examples, please see `example_message_store_client.py` within the scripts
folder of mongodb_store.
"""
def __init__(self, service_prefix='/message_store', database='message_store', collection='message_store', queue_size=100):
"""
:Args:
| service_prefix (str): The prefix to the *insert*, *update*, *delete* and
*query_messages* ROS services/
| database (str): The MongoDB database that this object works with.
| collection (str): The MongoDB collection that this object works with.
"""
self.database = database
self.collection = collection
insert_service = service_prefix + '/insert'
update_service = service_prefix + '/update'
delete_service = service_prefix + '/delete'
query_service = service_prefix + '/query_messages'
# try and get the mongo service, block until available
found_services_first_try = True # if found straight away
while not rospy.is_shutdown():
try:
rospy.wait_for_service(insert_service,5)
rospy.wait_for_service(update_service,5)
rospy.wait_for_service(query_service,5)
rospy.wait_for_service(delete_service,5)
break
except rospy.ROSException, e:
found_services_first_try = False
rospy.logerr("Could not get message store services. Maybe the message "
"store has not been started? Retrying..")
if not found_services_first_try:
rospy.loginfo("Message store services found.")
self.insert_srv = rospy.ServiceProxy(insert_service, dc_srv.MongoInsertMsg)
self.update_srv = rospy.ServiceProxy(update_service, dc_srv.MongoUpdateMsg)
self.query_srv = rospy.ServiceProxy(query_service, dc_srv.MongoQueryMsg)
self.delete_srv = rospy.ServiceProxy(delete_service, dc_srv.MongoDeleteMsg)
insert_topic = service_prefix + '/insert'
self.pub_insert = rospy.Publisher(insert_topic, Insert, queue_size=queue_size)
def insert_named(self, name, message, meta = {}, wait=True):
"""
Inserts a ROS message into the message storage, giving it a name for convenient
later retrieval.
.. note:: Multiple messages can be stored with the same name.
:Args:
| name (str): The name to refere to this message as.
| message (ROS Message): An instance of a ROS message type to store
| meta (dict): A dictionary of additional meta data to store in association
with thie message.
| wait (bool): If true, waits until database returns object id after insert
:Returns:
| (str) the ObjectId of the MongoDB document containing the stored message.
"""
# create a copy as we're modifying it
meta_copy = copy.copy(meta)
meta_copy["name"] = name
return self.insert(message, meta_copy, wait=wait)
def insert(self, message, meta = {}, wait=True):
"""
Inserts a ROS message into the message storage.
:Args:
| message (ROS Message): An instance of a ROS message type to store
| meta (dict): A dictionary of additional meta data to store in association
with thie message.
| wait (bool): If true, waits until database returns object id after insert
:Returns:
| (str) the ObjectId of the MongoDB document containing the stored message.
"""
# assume meta is a dict, convert k/v to tuple pairs
meta_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(meta, default=json_util.default)),)
serialised_msg = dc_util.serialise_message(message)
if wait:
return self.insert_srv(self.database, self.collection, serialised_msg, StringPairList(meta_tuple)).id
else:
msg = Insert(self.database, self.collection, serialised_msg, StringPairList(meta_tuple))
self.pub_insert.publish(msg)
return True
def query_id(self, id, type):
"""
Finds and returns the message with the given ID.
:Parameters:
| id (str): The ObjectID of the MongoDB document holding the message.
| type (str): The ROS message type of the stored messsage to retrieve.
:Returns:
| message (ROS message), meta (dict): The retrieved message and associated metadata
or *None* if the named message could not be found.
"""
return self.query(type, {'_id': ObjectId(id)}, {}, True)
def delete(self, message_id):
"""
Delete the message with the given ID.
:Parameters:
| message_id (str) : The ObjectID of the MongoDB document holding the message.
:Returns:
| bool : was the object successfully deleted.
"""
return self.delete_srv(self.database, self.collection, message_id)
def query_named(self, name, type, single = True, meta = {}, limit = 0):
"""
Finds and returns the message(s) with the given name.
:Args:
| name (str): The name of the stored messages to retrieve.
| type (str): The type of the stored message.
| single (bool): Should only one message be returned?
| meta (dict): Extra queries on the meta data of the message.
| limit (int): Limit number of return documents
:Return:
| message (ROS message), meta (dict): The retrieved message and associated metadata
or *None* if the named message could not be found.
"""
# create a copy as we're modifying it
meta_copy = copy.copy(meta)
meta_copy["name"] = name
return self.query(type, {}, meta_copy, single, [], limit)
def update_named(self, name, message, meta = {}, upsert = False):
"""
Updates a named message.
:Args:
| name (str): The name of the stored messages to update.
| message (ROS Message): The updated ROS message
| meta (dict): Updated meta data to store with the message.
| upsert (bool): If True, insert the named message if it doesnt exist.
:Return:
| str, bool: The MongoDB ObjectID of the document, and whether it was altered by
the update.
"""
meta_query = {}
meta_query["name"] = name
# make sure the name goes into the meta info after update
meta_copy = copy.copy(meta)
meta_copy["name"] = name
return self.update(message, meta_copy, {}, meta_query, upsert)
def update_id(self, id, message, meta = {}, upsert = False):
"""
Updates a message by MongoDB ObjectId.
:Args:
| id (str): The MongoDB ObjectId of the doucment storing the message.
| message (ROS Message): The updated ROS message
| meta (dict): Updated meta data to store with the message.
| upsert (bool): If True, insert the named message if it doesnt exist.
:Return:
| str, bool: The MongoDB ObjectID of the document, and whether it was altered by
the update.
"""
msg_query = {'_id': ObjectId(id)}
meta_query = {}
return self.update(message, meta, msg_query, meta_query, upsert)
def update(self, message, meta = {}, message_query = {}, meta_query = {}, upsert = False):
"""
Updates a message.
:Args:
| message (ROS Message): The updated ROS message
| meta (dict): Updated meta data to store with the message.
| message_query (dict): A query to match the ROS message that is to be updated.
| meta_query (dict): A query to match against the meta data of the message to be updated
| upsert (bool): If True, insert the named message if it doesnt exist.
:Return:
| str, bool: The MongoDB ObjectID of the document, and whether it was altered by
the update.
"""
# serialise the json queries to strings using json_util.dumps
message_query_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(message_query, default=json_util.default)),)
meta_query_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(meta_query, default=json_util.default)),)
meta_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(meta, default=json_util.default)),)
return self.update_srv(self.database, self.collection, upsert, StringPairList(message_query_tuple), StringPairList(meta_query_tuple), dc_util.serialise_message(message), StringPairList(meta_tuple))
"""
Returns [message, meta] where message is the queried message and meta a dictionary of meta information. If single is false returns a list of these lists.
"""
def query(self, type, message_query = {}, meta_query = {}, single = False, sort_query = [], projection_query = {}, limit=0):
"""
Finds and returns message(s) matching the message and meta data queries.
:Parameters:
| type (str): The ROS message type of the stored messsage to retrieve.
| message_query (dict): A query to match the actual ROS message
| meta_query (dict): A query to match against the meta data of the message
| sort_query (list of tuple): A query to request sorted list to mongodb module
| projection_query (dict): A query to request desired fields to be returned or excluded
| single (bool): Should only one message be returned?
| limit (int): Limit number of return documents
:Returns:
| [message, meta] where message is the queried message and meta a dictionary of
meta information. If single is false returns a list of these lists.
"""
# assume meta is a dict, convert k/v to tuple pairs for ROS msg type
# serialise the json queries to strings using json_util.dumps
message_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(message_query, default=json_util.default)),)
meta_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(meta_query, default=json_util.default)),)
projection_tuple =(StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(projection_query, default=json_util.default)),)
if len(sort_query) > 0:
sort_tuple = [StringPair(str(k), str(v)) for k, v in sort_query]
else:
sort_tuple = []
response = self.query_srv(
self.database, self.collection, type, single, limit,
StringPairList(message_tuple),
StringPairList(meta_tuple),
StringPairList(sort_tuple),
StringPairList(projection_tuple))
if response.messages is None:
messages = []
metas = []
else:
messages = map(dc_util.deserialise_message, response.messages)
metas = map(dc_util.string_pair_list_to_dictionary, response.metas)
if single:
if len(messages) > 0:
return [messages[0], metas[0]]
else:
return [None, None]
else:
return zip(messages,metas)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
==================
3D Transform class
==================
A class containing a transformation matrix and providing several methods
to use/alter it. Transform uses Vector objects for most of its methods
arguments.
"""
from math import *
from Vector import Vector
# =============================
# Transform: for generating transformation matrices
# =============================
class Transform:
"""\
Transform([matrix]) -> new Transform object.
Keyword arguments:
- m -- A matrix containing values to be initially set
"""
def __init__(self, m = None):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
# load identity
if m is not None:
self.m = m
else:
self.m = [1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1]
def getMatrix(self):
""" Returns the transformation matrix. """
return self.m
def reset(self):
""" Resets to identity matrix. """
self.m = [1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1]
def applyRotation(self, xyzangle):
"""\
Apply a rotation.
Arguments:
- xyzangle -- A Vector containing the amount of rotation around each axis.
"""
global pi
t = Transform()
# convert degrees to radiant
xyzangle *= pi/180.0
#rotation around x axis
if xyzangle.x != 0:
t.m[5] = cos(xyzangle.x)
t.m[6] = sin(xyzangle.x)
t.m[9] = -sin(xyzangle.x)
t.m[10] = cos(xyzangle.x)
self.m = (self*t).m
#rotation around y axis
t.reset()
if xyzangle.y != 0:
t.m[0] = cos(xyzangle.y)
t.m[2] = -sin(xyzangle.y)
t.m[8] = sin(xyzangle.y)
t.m[10] = cos(xyzangle.y)
self.m = (self*t).m
#rotation around z axis
t.reset()
if xyzangle.z != 0:
t.m[0] = cos(xyzangle.z)
t.m[1] = sin(xyzangle.z)
t.m[4] = -sin(xyzangle.z)
t.m[5] = cos(xyzangle.z)
self.m = (self*t).m
def applyTranslation(self, vector):
"""\
Apply a translation.
Arguments:
- vector -- A Vector containing the amount of translation.
"""
t = Transform()
if (vector.x != 0 or vector.y != 0 or vector.z != 0):
t.m[12] = vector.x
t.m[13] = vector.y
t.m[14] = vector.z
self.m = (self*t).m
def applyScaling(self, vector):
"""\
Apply scaling.
Arguments:
- vector -- A Vector containing the amount of scaling for each axis.
"""
t = Transform()
if (vector.x != 0 or vector.y != 0 or vector.z != 0):
t.m[0] = vector.x
t.m[5] = vector.y
t.m[10] = vector.z
self.m = (self*t).m
def setLookAtRotation(self, eye, center, up):
"""\
Generates a "look at" transform.
Arguments:
- eye -- A Vector with viewer origin.
- center -- A Vector with point the viewer looks at.
- up -- The viewers up Vector.
"""
# apply rotation
z = -(center-eye).norm()
x = up.cross(z).norm()
y = z.cross(x)
t = Transform()
t.m[0] = x.x
t.m[1] = y.x
t.m[2] = z.x
t.m[3] = 0.0
t.m[4] = x.y
t.m[5] = y.y
t.m[6] = z.y
t.m[7] = 0.0
t.m[8] = x.z
t.m[9] = y.z
t.m[10] = z.z
t.m[11] = 0.0
t.m[12] = 0.0
t.m[13] = 0.0
t.m[14] = 0.0
t.m[15] = 1.0
self.m = (self*t).m
# Vector multiplication
def transformVector(self, v):
"""\
Transforms a Vector v.
Returns the transformed Vector.
"""
return Vector(self.m[0]*v.x + self.m[4]*v.y + self.m[8]*v.z + self.m[12],
self.m[1]*v.x + self.m[5]*v.y + self.m[9]*v.z + self.m[13],
self.m[2]*v.x + self.m[6]*v.y + self.m[10]*v.z + self.m[14])
# Matrix multiplication
def __mul__(self,other):
x = Transform()
x.m[0] = self.m[0]*other.m[0] + self.m[1]*other.m[4] + self.m[2]*other.m[8] + self.m[3]*other.m[12];
x.m[1] = self.m[0]*other.m[1] + self.m[1]*other.m[5] + self.m[2]*other.m[9] + self.m[3]*other.m[13];
x.m[2] = self.m[0]*other.m[2] + self.m[1]*other.m[6] + self.m[2]*other.m[10] + self.m[3]*other.m[14];
x.m[3] = self.m[0]*other.m[3] + self.m[1]*other.m[7] + self.m[2]*other.m[11] + self.m[3]*other.m[15];
x.m[4] = self.m[4]*other.m[0] + self.m[5]*other.m[4] + self.m[6]*other.m[8] + self.m[7]*other.m[12];
x.m[5] = self.m[4]*other.m[1] + self.m[5]*other.m[5] + self.m[6]*other.m[9] + self.m[7]*other.m[13];
x.m[6] = self.m[4]*other.m[2] + self.m[5]*other.m[6] + self.m[6]*other.m[10] + self.m[7]*other.m[14];
x.m[7] = self.m[4]*other.m[3] + self.m[5]*other.m[7] + self.m[6]*other.m[11] + self.m[7]*other.m[15];
x.m[8] = self.m[8]*other.m[0] + self.m[9]*other.m[4] + self.m[10]*other.m[8] + self.m[11]*other.m[12];
x.m[9] = self.m[8]*other.m[1] + self.m[9]*other.m[5] + self.m[10]*other.m[9] + self.m[11]*other.m[13];
x.m[10] = self.m[8]*other.m[2] + self.m[9]*other.m[6] + self.m[10]*other.m[10] + self.m[11]*other.m[14];
x.m[11] = self.m[8]*other.m[3] + self.m[9]*other.m[7] + self.m[10]*other.m[11] + self.m[11]*other.m[15];
x.m[12] = self.m[12]*other.m[0] + self.m[13]*other.m[4] + self.m[14]*other.m[8] + self.m[15]*other.m[12];
x.m[13] = self.m[12]*other.m[1] + self.m[13]*other.m[5] + self.m[14]*other.m[9] + self.m[15]*other.m[13];
x.m[14] = self.m[12]*other.m[2] + self.m[13]*other.m[6] + self.m[14]*other.m[10] + self.m[15]*other.m[14];
x.m[15] = self.m[12]*other.m[3] + self.m[13]*other.m[7] + self.m[14]*other.m[11] + self.m[15]*other.m[15];
return x
# Immediate matrix multiplication
def __imul__(self,other):
x = Transform()
x.m[0] = self.m[0]*other.m[0] + self.m[1]*other.m[4] + self.m[2]*other.m[8] + self.m[3]*other.m[12];
x.m[1] = self.m[0]*other.m[1] + self.m[1]*other.m[5] + self.m[2]*other.m[9] + self.m[3]*other.m[13];
x.m[2] = self.m[0]*other.m[2] + self.m[1]*other.m[6] + self.m[2]*other.m[10] + self.m[3]*other.m[14];
x.m[3] = self.m[0]*other.m[3] + self.m[1]*other.m[7] + self.m[2]*other.m[11] + self.m[3]*other.m[15];
x.m[4] = self.m[4]*other.m[0] + self.m[5]*other.m[4] + self.m[6]*other.m[8] + self.m[7]*other.m[12];
x.m[5] = self.m[4]*other.m[1] + self.m[5]*other.m[5] + self.m[6]*other.m[9] + self.m[7]*other.m[13];
x.m[6] = self.m[4]*other.m[2] + self.m[5]*other.m[6] + self.m[6]*other.m[10] + self.m[7]*other.m[14];
x.m[7] = self.m[4]*other.m[3] + self.m[5]*other.m[7] + self.m[6]*other.m[11] + self.m[7]*other.m[15];
x.m[8] = self.m[8]*other.m[0] + self.m[9]*other.m[4] + self.m[10]*other.m[8] + self.m[11]*other.m[12];
x.m[9] = self.m[8]*other.m[1] + self.m[9]*other.m[5] + self.m[10]*other.m[9] + self.m[11]*other.m[13];
x.m[10] = self.m[8]*other.m[2] + self.m[9]*other.m[6] + self.m[10]*other.m[10] + self.m[11]*other.m[14];
x.m[11] = self.m[8]*other.m[3] + self.m[9]*other.m[7] + self.m[10]*other.m[11] + self.m[11]*other.m[15];
x.m[12] = self.m[12]*other.m[0] + self.m[13]*other.m[4] + self.m[14]*other.m[8] + self.m[15]*other.m[12];
x.m[13] = self.m[12]*other.m[1] + self.m[13]*other.m[5] + self.m[14]*other.m[9] + self.m[15]*other.m[13];
x.m[14] = self.m[12]*other.m[2] + self.m[13]*other.m[6] + self.m[14]*other.m[10] + self.m[15]*other.m[14];
x.m[15] = self.m[12]*other.m[3] + self.m[13]*other.m[7] + self.m[14]*other.m[11] + self.m[15]*other.m[15];
self.m = x.m
# Licensed to the BBC under a Contributor Agreement: THF
|
|
# -*- coding: utf-8 -*-
"""
Various dependencies that are required for file-metadata which need some
special handling.
"""
from __future__ import (division, absolute_import, unicode_literals,
print_function)
import ctypes.util
import hashlib
import os
import subprocess
import sys
from distutils import sysconfig
from distutils.errors import DistutilsSetupError
try:
from urllib.request import urlopen
except ImportError: # Python 2
from urllib2 import urlopen
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
def data_path():
name = os.path.join(PROJECT_PATH, 'file_metadata', 'datafiles')
if not os.path.exists(name):
os.makedirs(name)
return name
def which(cmd):
try:
from shutil import which
return which(cmd)
except ImportError: # For python 3.2 and lower
try:
output = subprocess.check_output(["which", cmd],
stderr=subprocess.STDOUT)
except (OSError, subprocess.CalledProcessError):
return None
else:
output = output.decode(sys.getfilesystemencoding())
return output.strip()
def setup_install(packages):
"""
Install packages using pip to the current folder. Useful to import
packages during setup itself.
"""
packages = list(packages)
if not packages:
return True
try:
subprocess.call([sys.executable, "-m", "pip", "install",
"-t", PROJECT_PATH] + packages)
return True
except subprocess.CalledProcessError:
return False
def download(url, filename, overwrite=False, sha1=None):
"""
Download the given URL to the given filename. If the file exists,
it won't be downloaded unless asked to overwrite. Both, text data
like html, txt, etc. or binary data like images, audio, etc. are
acceptable.
:param url: A URL to download.
:param filename: The file to store the downloaded file to.
:param overwrite: Set to True if the file should be downloaded even if it
already exists.
:param sha1: The sha1 checksum to verify the file using.
"""
blocksize = 16 * 1024
_hash = hashlib.sha1()
if os.path.exists(filename) and not overwrite:
# Do a pass for the hash if it already exists
with open(filename, "rb") as downloaded_file:
while True:
block = downloaded_file.read(blocksize)
if not block:
break
_hash.update(block)
else:
# If it doesn't exist, or overwrite=True, find hash while downloading
response = urlopen(url)
with open(filename, 'wb') as out_file:
while True:
block = response.read(blocksize)
if not block:
break
out_file.write(block)
_hash.update(block)
return _hash.hexdigest() == sha1
class CheckFailed(Exception):
"""
Exception thrown when a ``SetupPackage.check()`` fails.
"""
pass
class SetupPackage(object):
name = None
optional = False
pkg_names = {
"apt-get": None,
"yum": None,
"dnf": None,
"pacman": None,
"zypper": None,
"brew": None,
"port": None,
"windows_url": None
}
def check(self):
"""
Check whether the dependencies are met. Should raise a ``CheckFailed``
exception if the dependency was not found.
"""
pass
def get_install_requires(self):
"""
Return a list of Python packages that are required by the package.
pip / easy_install will attempt to download and install this
package if it is not installed.
"""
return []
def get_setup_requires(self):
"""
Return a list of Python packages that are required by the setup.py
itself. pip / easy_install will attempt to download and install this
package if it is not installed on top of the setup.py script.
"""
return []
def get_data_files(self):
"""
Perform required actions to add the data files into the directory
given by ``data_path()``.
"""
pass
def install_help_msg(self):
"""
The help message to show if the package is not installed. The help
message shown depends on whether some class variables are present.
"""
def _try_managers(*managers):
for manager in managers:
pkg_name = self.pkg_names.get(manager, None)
if pkg_name and which(manager) is not None:
pkg_note = None
if isinstance(pkg_name, (tuple, list)):
pkg_name, pkg_note = pkg_name
msg = ('Try installing {0} with `{1} install {2}`.'
.format(self.name, manager, pkg_name))
if pkg_note:
msg += ' Note: ' + pkg_note
return msg
message = ""
if sys.platform == "win32":
url = self.pkg_names.get("windows_url", None)
if url:
return ('Please check {0} for instructions to install {1}'
.format(url, self.name))
elif sys.platform == "darwin":
manager_message = _try_managers("brew", "port")
return manager_message or message
elif sys.platform.startswith("linux"):
try:
import distro
except ImportError:
setup_install(['distro'])
import distro
release = distro.id()
if release in ('debian', 'ubuntu', 'linuxmint', 'raspbian'):
manager_message = _try_managers('apt-get')
if manager_message:
return manager_message
elif release in ('centos', 'rhel', 'redhat', 'fedora',
'scientific', 'amazon', ):
manager_message = _try_managers('dnf', 'yum')
if manager_message:
return manager_message
elif release in ('sles', 'opensuse'):
manager_message = _try_managers('zypper')
if manager_message:
return manager_message
elif release in ('arch'):
manager_message = _try_managers('pacman')
if manager_message:
return manager_message
return message
class PkgConfig(SetupPackage):
"""
This is a class for communicating with pkg-config.
"""
name = "pkg-config"
pkg_names = {
"apt-get": 'pkg-config',
"yum": None,
"dnf": None,
"pacman": None,
"zypper": None,
"brew": 'pkg-config',
"port": None,
"windows_url": None
}
def __init__(self):
if sys.platform == 'win32':
self.has_pkgconfig = False
else:
self.pkg_config = os.environ.get('PKG_CONFIG', 'pkg-config')
self.set_pkgconfig_path()
try:
with open(os.devnull) as nul:
subprocess.check_call([self.pkg_config, "--help"],
stdout=nul, stderr=nul)
self.has_pkgconfig = True
except (subprocess.CalledProcessError, OSError):
self.has_pkgconfig = False
raise DistutilsSetupError("pkg-config is not installed. "
"Please install it to continue.\n" +
self.install_help_msg())
def set_pkgconfig_path(self):
pkgconfig_path = sysconfig.get_config_var('LIBDIR')
if pkgconfig_path is None:
return
pkgconfig_path = os.path.join(pkgconfig_path, 'pkgconfig')
if not os.path.isdir(pkgconfig_path):
return
os.environ['PKG_CONFIG_PATH'] = ':'.join(
[os.environ.get('PKG_CONFIG_PATH', ""), pkgconfig_path])
def get_version(self, package):
"""
Get the version of the package from pkg-config.
"""
if not self.has_pkgconfig:
return None
try:
output = subprocess.check_output(
[self.pkg_config, package, "--modversion"],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return None
else:
output = output.decode(sys.getfilesystemencoding())
return output.strip()
# The PkgConfig class should be used through this singleton
pkg_config = PkgConfig()
class Distro(SetupPackage):
name = "distro"
def check(self):
return 'Will be installed with pip.'
def get_setup_requires(self):
try:
import distro # noqa (unused import)
return []
except ImportError:
return ['distro']
class SetupTools(SetupPackage):
name = 'setuptools'
def check(self):
return 'Will be installed with pip.'
def get_setup_requires(self):
try:
import setuptools # noqa (unused import)
return []
except ImportError:
return ['setuptools']
class PathLib(SetupPackage):
name = 'pathlib'
def check(self):
if sys.version_info < (3, 4):
return 'Backported pathlib2 will be installed with pip.'
else:
return 'Already installed in python 3.4+'
def get_install_requires(self):
if sys.version_info < (3, 4):
return ['pathlib2']
else:
return []
class AppDirs(SetupPackage):
name = 'appdirs'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['appdirs']
class LibMagic(SetupPackage):
name = 'libmagic'
pkg_names = {
"apt-get": 'libmagic-dev',
"yum": 'file',
"dnf": 'file',
"pacman": None,
"zypper": None,
"brew": 'libmagic',
"port": None,
"windows_url": None
}
def check(self):
file_path = which('file')
if file_path is None:
raise CheckFailed('Needs to be installed manually.')
else:
return 'Found "file" utility at {0}.'.format(file_path)
class PythonMagic(SetupPackage):
name = 'python-magic'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['python-magic']
class Six(SetupPackage):
name = 'six'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['six>=1.8.0']
class ExifTool(SetupPackage):
name = 'exiftool'
pkg_names = {
"apt-get": 'exiftool',
"yum": 'perl-Image-ExifTool',
"dnf": 'perl-Image-ExifTool',
"pacman": None,
"zypper": None,
"brew": 'exiftool',
"port": 'p5-image-exiftool',
"windows_url": 'http://www.sno.phy.queensu.ca/~phil/exiftool/'
}
def check(self):
exiftool_path = which('exiftool')
if exiftool_path is None:
raise CheckFailed('Needs to be installed manually.')
else:
return 'Found at {0}.'.format(exiftool_path)
class Pillow(SetupPackage):
name = 'pillow'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['pillow>=2.5.0']
class Numpy(SetupPackage):
name = 'numpy'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['numpy>=1.7.2']
class Dlib(SetupPackage):
name = 'dlib'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['dlib']
class ScikitImage(SetupPackage):
name = 'scikit-image'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
# For some reason some dependencies of scikit-image aren't installed
# by pip: https://github.com/scikit-image/scikit-image/issues/2155
return ['scipy', 'matplotlib', 'scikit-image>=0.12']
class MagickWand(SetupPackage):
name = 'magickwand'
pkg_names = {
"apt-get": 'libmagickwand-dev',
"yum": 'ImageMagick-devel',
"dnf": 'ImageMagick-devel',
"pacman": None,
"zypper": None,
"brew": 'imagemagick',
"port": 'imagemagick',
"windows_url": ("http://docs.wand-py.org/en/latest/guide/"
"install.html#install-imagemagick-on-windows")
}
def check(self):
# `wand` already checks for magickwand, but only when importing, not
# during installation. See https://github.com/dahlia/wand/issues/293
magick_wand = pkg_config.get_version("MagickWand")
if magick_wand is None:
raise CheckFailed('Needs to be installed manually.')
else:
return 'Found with pkg-config.'
class Wand(SetupPackage):
name = 'wand'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['wand']
class PyColorName(SetupPackage):
name = 'pycolorname'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['pycolorname']
class LibZBar(SetupPackage):
name = 'libzbar'
pkg_names = {
"apt-get": 'libzbar-dev',
"yum": 'zbar-devel',
"dnf": 'zbar-devel',
"pacman": None,
"zypper": None,
"brew": 'zbar',
"port": None,
"windows_url": None
}
def check(self):
libzbar = ctypes.util.find_library('zbar')
if libzbar is None:
raise CheckFailed('Needs to be installed manually.')
else:
return 'Found {0}.'.format(libzbar)
class ZBar(SetupPackage):
name = 'zbar'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['zbar']
class JavaJRE(SetupPackage):
name = 'java'
pkg_names = {
"apt-get": 'default-jre',
"yum": 'java',
"dnf": 'java',
"pacman": None,
"zypper": None,
"brew": None,
"port": None,
"windows_url": "https://java.com/download/"
}
def check(self):
java_path = which('java')
if java_path is None:
raise CheckFailed('Needs to be installed manually.')
else:
return 'Found at {0}.'.format(java_path)
class ZXing(SetupPackage):
name = 'zxing'
def check(self):
return 'Will be downloaded from their maven repositories.'
@staticmethod
def download_jar(data_folder, path, name, ver, **kwargs):
data = {'name': name, 'ver': ver, 'path': path}
fname = os.path.join(data_folder, '{name}-{ver}.jar'.format(**data))
url = ('http://central.maven.org/maven2/{path}/{name}/{ver}/'
'{name}-{ver}.jar'.format(**data))
download(url, fname, **kwargs)
return fname
def get_data_files(self):
msg = 'Unable to download "{0}" correctly.'
if not self.download_jar(
data_path(), 'com/google/zxing', 'core', '3.2.1',
sha1='2287494d4f5f9f3a9a2bb6980e3f32053721b315'):
return msg.format('zxing-core')
if not self.download_jar(
data_path(), 'com/google/zxing', 'javase', '3.2.1',
sha1='78e98099b87b4737203af1fcfb514954c4f479d9'):
return msg.format('zxing-javase')
if not self.download_jar(
data_path(), 'com/beust', 'jcommander', '1.48',
sha1='bfcb96281ea3b59d626704f74bc6d625ff51cbce'):
return msg.format('jcommander')
return 'Successfully downloaded zxing-javase, zxing-core, jcommander.'
class FFProbe(SetupPackage):
name = 'ffprobe'
pkg_names = {
"apt-get": 'libav-tools',
"yum": ('ffmpeg', 'This requires the RPMFusion repo to be enabled.'),
"dnf": ('ffmpeg', 'This requires the RPMFusion repo to be enabled.'),
"pacman": None,
"zypper": None,
"brew": 'ffmpeg',
"port": None,
"windows_url": None
}
def check(self):
ffprobe_path = which('ffprobe') or which('avprobe')
if ffprobe_path is None:
raise CheckFailed('Needs to be installed manually.')
else:
return 'Found at {0}.'.format(ffprobe_path)
|
|
# Copyright 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import signal
import sys
import time
from neutron.agent.common import polling
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import constants as n_const
from neutron.common import topics
from neutron.common import utils
from neutron import context
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from networking_vsphere.agent.firewalls import dvs_securitygroup_rpc as dvs_rpc
from networking_vsphere.common import constants as dvs_const
from networking_vsphere.common import dvs_agent_rpc_api
from networking_vsphere.common import exceptions
from networking_vsphere.utils import dvs_util
from networking_vsphere._i18n import _, _LE, _LI
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('DVS_AGENT',
'networking_vsphere.common.vmware_conf')
class DVSPluginApi(agent_rpc.PluginApi):
pass
class DVSAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
dvs_agent_rpc_api.ExtendAPI):
target = oslo_messaging.Target(version='1.2')
def __init__(self, vsphere_hostname, vsphere_login, vsphere_password,
bridge_mappings, polling_interval, quitting_rpc_timeout=None):
super(DVSAgent, self).__init__()
self.agent_state = {
'binary': 'neutron-dvs-agent',
'host': cfg.CONF.host,
'topic': n_const.L2_AGENT_TOPIC,
'configurations': {'bridge_mappings': bridge_mappings,
'vsphere_hostname': vsphere_hostname},
'agent_type': 'DVS agent',
'start_flag': True}
report_interval = cfg.CONF.DVS_AGENT.report_interval
self.polling_interval = polling_interval
# Security group agent support
self.context = context.get_admin_context_without_session()
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.sg_agent = dvs_rpc.DVSSecurityGroupRpc(
self.context, self.sg_plugin_rpc, defer_refresh_firewall=True)
self.setup_rpc()
self.run_daemon_loop = True
self.iter_num = 0
self.quitting_rpc_timeout = quitting_rpc_timeout
self.network_map = dvs_util.create_network_map_from_config(
cfg.CONF.ML2_VMWARE, pg_cache=True)
uplink_map = dvs_util.create_uplink_map_from_config(
cfg.CONF.ML2_VMWARE, self.network_map)
for phys, dvs in self.network_map.iteritems():
if phys in uplink_map:
dvs.load_uplinks(phys, uplink_map[phys])
self.updated_ports = set()
self.deleted_ports = set()
self.known_ports = set()
self.added_ports = set()
self.booked_ports = set()
LOG.info(_LI("Agent out of sync with plugin!"))
connected_ports = self._get_dvs_ports()
self.added_ports = connected_ports
if cfg.CONF.DVS.clean_on_restart:
self._clean_up_vsphere_extra_resources(connected_ports)
self.fullsync = False
# The initialization is complete; we can start receiving messages
self.connection.consume_in_threads()
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
@dvs_util.wrap_retry
def create_network_precommit(self, current, segment):
try:
dvs = self._lookup_dvs_for_context(segment)
except exceptions.NoDVSForPhysicalNetwork as e:
LOG.info(_LI('Network %(id)s not created. Reason: %(reason)s') % {
'id': current['id'],
'reason': e.message})
except exceptions.InvalidNetwork:
pass
else:
dvs.create_network(current, segment)
@dvs_util.wrap_retry
def delete_network_postcommit(self, current, segment):
try:
dvs = self._lookup_dvs_for_context(segment)
except exceptions.NoDVSForPhysicalNetwork as e:
LOG.info(_LI('Network %(id)s not deleted. Reason: %(reason)s') % {
'id': current['id'],
'reason': e.message})
except exceptions.InvalidNetwork:
pass
else:
dvs.delete_network(current)
@dvs_util.wrap_retry
def update_network_precommit(self, current, segment, original):
try:
dvs = self._lookup_dvs_for_context(segment)
except exceptions.NoDVSForPhysicalNetwork as e:
LOG.info(_LI('Network %(id)s not updated. Reason: %(reason)s') % {
'id': current['id'],
'reason': e.message})
except exceptions.InvalidNetwork:
pass
else:
dvs.update_network(current, original)
@dvs_util.wrap_retry
def book_port(self, current, network_segments, network_current):
physnet = network_current['provider:physical_network']
dvs = None
dvs_segment = None
for segment in network_segments:
if segment['physical_network'] == physnet:
dvs = self._lookup_dvs_for_context(segment)
dvs_segment = segment
if dvs:
port = dvs.book_port(network_current, current['id'],
dvs_segment, current.get('portgroup_name'))
self.booked_ports.add(current['id'])
return port
return None
@dvs_util.wrap_retry
def update_port_postcommit(self, current, original, segment):
try:
dvs = self._lookup_dvs_for_context(segment)
if current['id'] in self.booked_ports:
self.added_ports.add(current['id'])
self.booked_ports.discard(current['id'])
except exceptions.NoDVSForPhysicalNetwork:
raise exceptions.InvalidSystemState(details=_(
'Port %(port_id)s belong to VMWare VM, but there is '
'no mapping from network to DVS.') % {'port_id': current['id']}
)
else:
self._update_admin_state_up(dvs, original, current)
def delete_port_postcommit(self, current, original, segment):
try:
dvs = self._lookup_dvs_for_context(segment)
except exceptions.NoDVSForPhysicalNetwork:
raise exceptions.InvalidSystemState(details=_(
'Port %(port_id)s belong to VMWare VM, but there is '
'no mapping from network to DVS.') % {'port_id': current['id']}
)
else:
if sg_rpc.is_firewall_enabled():
key = current.get(
'binding:vif_details', {}).get('dvs_port_key')
if key:
dvs.remove_block(key)
else:
dvs.release_port(current)
def _lookup_dvs_for_context(self, segment):
physical_network = segment['physical_network']
try:
return self.network_map[physical_network]
except KeyError:
LOG.debug('No dvs mapped for physical '
'network: %s' % physical_network)
raise exceptions.NoDVSForPhysicalNetwork(
physical_network=physical_network)
def _update_admin_state_up(self, dvs, original, current):
try:
original_admin_state_up = original['admin_state_up']
except KeyError:
pass
else:
current_admin_state_up = current['admin_state_up']
perform = current_admin_state_up != original_admin_state_up
if perform:
dvs.switch_port_blocked_state(current)
def _report_state(self):
try:
agent_status = self.state_rpc.report_state(self.context,
self.agent_state,
True)
if agent_status == n_const.AGENT_REVIVED:
LOG.info(_LI('Agent has just revived. Do a full sync.'))
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def setup_rpc(self):
self.agent_id = 'dvs-agent-%s' % cfg.CONF.host
self.topic = topics.AGENT
self.plugin_rpc = DVSPluginApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
# Handle updates from service
self.endpoints = [self]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.PORT, topics.DELETE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE],
[dvs_const.DVS, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers,
start_listening=False)
def _handle_sigterm(self, signum, frame):
LOG.info(_LI("Agent caught SIGTERM, quitting daemon loop."))
self.sg_agent.firewall.stop_all()
self.run_daemon_loop = False
@dvs_util.wrap_retry
def _clean_up_vsphere_extra_resources(self, connected_ports):
LOG.debug("Cleanup vsphere extra ports and networks...")
vsphere_not_connected_ports_maps = {}
network_with_active_ports = {}
network_with_known_not_active_ports = {}
for phys_net, dvs in self.network_map.items():
phys_net_active_network = \
network_with_active_ports.setdefault(phys_net, set())
phys_net_not_active_network = \
network_with_known_not_active_ports.setdefault(phys_net, {})
for port in dvs.get_ports(False):
port_name = getattr(port.config, 'name', None)
if not port_name:
continue
if port_name not in connected_ports:
vsphere_not_connected_ports_maps[port_name] = {
'phys_net': phys_net,
'port_key': port.key
}
phys_net_not_active_network[port_name] = port.portgroupKey
else:
phys_net_active_network.add(port.portgroupKey)
if vsphere_not_connected_ports_maps:
devices_details_list = (
self.plugin_rpc.get_devices_details_list_and_failed_devices(
self.context,
vsphere_not_connected_ports_maps.keys(),
self.agent_id,
cfg.CONF.host))
neutron_ports = set([
p['port_id'] for p in itertools.chain(
devices_details_list['devices'],
devices_details_list['failed_devices']) if p.get('port_id')
])
for port_id, port_data in vsphere_not_connected_ports_maps.items():
phys_net = port_data['phys_net']
if port_id not in neutron_ports:
dvs = self.network_map[phys_net]
dvs.release_port({
'id': port_id,
'binding:vif_details': {
'dvs_port_key': port_data['port_key']
}
})
else:
network_with_active_ports[phys_net].add(
network_with_known_not_active_ports[phys_net][port_id])
for phys_net, dvs in self.network_map.items():
dvs.delete_networks_without_active_ports(
network_with_active_ports.get(phys_net, []))
def daemon_loop(self):
with polling.get_polling_manager() as pm:
self.rpc_loop(polling_manager=pm)
def rpc_loop(self, polling_manager=None):
if not polling_manager:
polling_manager = polling.get_polling_manager(
minimize_polling=False)
while self.run_daemon_loop:
start = time.time()
port_stats = {'regular': {'added': 0,
'updated': 0,
'removed': 0}}
if self.fullsync:
LOG.info(_LI("Agent out of sync with plugin!"))
connected_ports = self._get_dvs_ports()
self.added_ports = connected_ports - self.known_ports
if cfg.CONF.DVS.clean_on_restart:
self._clean_up_vsphere_extra_resources(connected_ports)
self.fullsync = False
polling_manager.force_polling()
if self._agent_has_updates(polling_manager):
LOG.debug("Agent rpc_loop - update")
self.process_ports()
port_stats['regular']['added'] = len(self.added_ports)
port_stats['regular']['updated'] = len(self.updated_ports)
port_stats['regular']['removed'] = len(self.deleted_ports)
polling_manager.polling_completed()
self.loop_count_and_wait(start)
def _agent_has_updates(self, polling_manager):
return (polling_manager.is_polling_required or
self.sg_agent.firewall_refresh_needed() or
self.updated_ports or self.deleted_ports)
def loop_count_and_wait(self, start_time):
# sleep till end of polling interval
elapsed = time.time() - start_time
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d "
"completed. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': elapsed})
if elapsed < self.polling_interval:
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
def process_ports(self):
LOG.debug("Process ports")
if self.deleted_ports:
deleted_ports = self.deleted_ports.copy()
self.deleted_ports = self.deleted_ports - deleted_ports
self.sg_agent.remove_devices_filter(deleted_ports)
if self.added_ports:
possible_ports = self.added_ports
self.added_ports = set()
else:
possible_ports = set()
upd_ports = self.updated_ports.copy()
self.sg_agent.setup_port_filters(possible_ports,
upd_ports)
self.updated_ports = self.updated_ports - upd_ports
self.known_ports |= possible_ports
def port_update(self, context, **kwargs):
port = kwargs.get('port')
if port['id'] in self.known_ports:
self.updated_ports.add(port['id'])
LOG.debug("port_update message processed for port %s", port['id'])
def port_delete(self, context, **kwargs):
port_id = kwargs.get('port_id')
if port_id in self.known_ports:
self.deleted_ports.add(port_id)
self.known_ports.discard(port_id)
if port_id in self.added_ports:
self.added_ports.discard(port_id)
LOG.debug("port_delete message processed for port %s", port_id)
def _get_dvs_ports(self):
ports = set()
dvs_list = self.network_map.values()
for dvs in dvs_list:
LOG.debug("Take port ids for dvs %s", dvs)
ports.update(dvs._get_ports_ids())
return ports
def create_agent_config_map(config):
"""Create a map of agent config parameters.
:param config: an instance of cfg.CONF
:returns: a map of agent configuration parameters
"""
try:
bridge_mappings = utils.parse_mappings(config.ML2_VMWARE.network_maps,
unique_values=False)
except ValueError as e:
raise ValueError(_("Parsing network_maps failed: %s.") % e)
kwargs = dict(
vsphere_hostname=config.ML2_VMWARE.vsphere_hostname,
vsphere_login=config.ML2_VMWARE.vsphere_login,
vsphere_password=config.ML2_VMWARE.vsphere_password,
bridge_mappings=bridge_mappings,
polling_interval=config.DVS_AGENT.polling_interval,
quitting_rpc_timeout=config.DVS_AGENT.quitting_rpc_timeout,
)
return kwargs
def main():
# cfg.CONF.register_opts(ip_lib.OPTS)
common_config.init(sys.argv[1:])
common_config.setup_logging()
utils.log_opt_values(LOG)
try:
agent_config = create_agent_config_map(cfg.CONF)
except ValueError as e:
LOG.error(_LE('%s Agent terminated!'), e)
sys.exit(1)
try:
agent = DVSAgent(**agent_config)
except RuntimeError as e:
LOG.error(_LE("%s Agent terminated!"), e)
sys.exit(1)
signal.signal(signal.SIGTERM, agent._handle_sigterm)
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
agent.daemon_loop()
if __name__ == "__main__":
main()
|
|
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Actions to take on resources
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import base64
from datetime import datetime
import jmespath
import logging
import zlib
import six
from botocore.exceptions import ClientError
from c7n.executor import ThreadPoolExecutor
from c7n.registry import PluginRegistry
from c7n.resolver import ValuesFrom
from c7n import utils
from c7n.version import version as VERSION
def average(numbers):
return float(sum(numbers)) / max(len(numbers), 1)
def distinct_count(values):
return float(len(set(values)))
METRIC_OPS = {
'count': len,
'distinct_count': distinct_count,
'sum': sum,
'average': average,
}
METRIC_UNITS = [
# Time
'Seconds',
'Microseconds',
'Milliseconds',
# Bytes and Bits
'Bytes',
'Kilobytes',
'Megabytes',
'Gigabytes',
'Terabytes',
'Bits',
'Kilobits',
'Megabits',
'Gigabits',
'Terabits',
# Rates
'Bytes/Second',
'Kilobytes/Second',
'Megabytes/Second',
'Gigabytes/Second',
'Terabytes/Second',
'Bits/Second',
'Kilobits/Second',
'Megabits/Second',
'Gigabits/Second',
'Terabits/Second',
'Count/Second',
# Other Scalars
'Percent',
'Count',
'None'
]
class ActionRegistry(PluginRegistry):
def __init__(self, *args, **kw):
super(ActionRegistry, self).__init__(*args, **kw)
self.register('notify', Notify)
self.register('invoke-lambda', LambdaInvoke)
self.register('put-metric', PutMetric)
def parse(self, data, manager):
results = []
for d in data:
results.append(self.factory(d, manager))
return results
def factory(self, data, manager):
if isinstance(data, dict):
action_type = data.get('type')
if action_type is None:
raise ValueError(
"Invalid action type found in %s" % (data))
else:
action_type = data
data = {}
action_class = self.get(action_type)
if action_class is None:
raise ValueError(
"Invalid action type %s, valid actions %s" % (
action_type, list(self.keys())))
# Construct a ResourceManager
return action_class(data, manager).validate()
class Action(object):
permissions = ()
metrics = ()
log = logging.getLogger("custodian.actions")
executor_factory = ThreadPoolExecutor
permissions = ()
schema = {'type': 'object'}
def __init__(self, data=None, manager=None, log_dir=None):
self.data = data or {}
self.manager = manager
self.log_dir = log_dir
def get_permissions(self):
return self.permissions
def validate(self):
return self
@property
def name(self):
return self.__class__.__name__.lower()
def process(self, resources):
raise NotImplementedError(
"Base action class does not implement behavior")
def _run_api(self, cmd, *args, **kw):
try:
return cmd(*args, **kw)
except ClientError as e:
if (e.response['Error']['Code'] == 'DryRunOperation' and
e.response['ResponseMetadata']['HTTPStatusCode'] == 412 and
'would have succeeded' in e.message):
return self.log.info(
"Dry run operation %s succeeded" % (
self.__class__.__name__.lower()))
raise
BaseAction = Action
class ModifyVpcSecurityGroupsAction(Action):
"""Common actions for modifying security groups on a resource
Can target either physical groups as a list of group ids or
symbolic groups like 'matched' or 'all'. 'matched' uses
the annotations of the 'security-group' interface filter.
Note an interface always gets at least one security group, so
we mandate the specification of an isolation/quarantine group
that can be specified if there would otherwise be no groups.
type: modify-security-groups
add: []
remove: [] | matched
isolation-group: sg-xyz
"""
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['modify-security-groups']},
'add': {'oneOf': [
{'type': 'string', 'pattern': '^sg-*'},
{'type': 'array', 'items': {
'pattern': '^sg-*',
'type': 'string'}}]},
'remove': {'oneOf': [
{'type': 'array', 'items': {
'type': 'string', 'pattern': '^sg-*'}},
{'enum': [
'matched', 'all',
{'type': 'string', 'pattern': '^sg-*'}]}]},
'isolation-group': {'oneOf': [
{'type': 'string', 'pattern': '^sg-*'},
{'type': 'array', 'items': {
'type': 'string', 'pattern': '^sg-*'}}]}},
'oneOf': [
{'required': ['isolation-group', 'remove']},
{'required': ['add', 'remove']},
{'required': ['add']}]
}
def get_groups(self, resources, metadata_key=None):
"""Parse policies to get lists of security groups to attach to each resource
For each input resource, parse the various add/remove/isolation-
group policies for 'modify-security-groups' to find the resulting
set of VPC security groups to attach to that resource.
The 'metadata_key' parameter can be used for two purposes at
the moment; The first use is for resources' APIs that return a
list of security group IDs but use a different metadata key
than 'Groups' or 'SecurityGroups'.
The second use is for when there are richer objects in the 'Groups' or
'SecurityGroups' lists. The custodian actions need to act on lists of
just security group IDs, so the metadata_key can be used to select IDs
from the richer objects in the provided lists.
Returns a list of lists containing the resulting VPC security groups
that should end up on each resource passed in.
:param resources: List of resources containing VPC Security Groups
:param metadata_key: Metadata key for security groups list
:return: List of lists of security groups per resource
"""
# parse the add, remove, and isolation group params to return the
# list of security groups that will end up on the resource
# target_group_ids = self.data.get('groups', 'matched')
add_target_group_ids = self.data.get('add', None)
remove_target_group_ids = self.data.get('remove', None)
isolation_group = self.data.get('isolation-group')
add_groups = []
remove_groups = []
return_groups = []
for idx, r in enumerate(resources):
if r.get('Groups'):
if metadata_key and isinstance(r['Groups'][0], dict):
rgroups = [g[metadata_key] for g in r['SecurityGroups']]
else:
rgroups = [g['GroupId'] for g in r['Groups']]
elif r.get('SecurityGroups'):
if metadata_key and isinstance(r['SecurityGroups'][0], dict):
rgroups = [g[metadata_key] for g in r['SecurityGroups']]
else:
rgroups = [g for g in r['SecurityGroups']]
elif r.get('VpcSecurityGroups'):
if metadata_key and isinstance(r['VpcSecurityGroups'][0], dict):
rgroups = [g[metadata_key] for g in r['VpcSecurityGroups']]
else:
rgroups = [g for g in r['VpcSecurityGroups']]
# use as substitution for 'Groups' or '[Vpc]SecurityGroups'
# unsure if necessary - defer to coverage report
elif metadata_key and r.get(metadata_key):
rgroups = [g for g in r[metadata_key]]
# Parse remove_groups
if remove_target_group_ids == 'matched':
remove_groups = r.get('c7n.matched-security-groups', ())
elif remove_target_group_ids == 'all':
remove_groups = rgroups
elif isinstance(remove_target_group_ids, list):
remove_groups = remove_target_group_ids
elif isinstance(remove_target_group_ids, six.string_types):
remove_groups = [remove_target_group_ids]
# Parse add_groups
if isinstance(add_target_group_ids, list):
add_groups = add_target_group_ids
elif isinstance(add_target_group_ids, six.string_types):
add_groups = [add_target_group_ids]
# seems extraneous with list?
# if not remove_groups and not add_groups:
# continue
for g in remove_groups:
if g in rgroups:
rgroups.remove(g)
for g in add_groups:
if g not in rgroups:
rgroups.append(g)
if not rgroups:
rgroups.append(isolation_group)
return_groups.append(rgroups)
return return_groups
class EventAction(BaseAction):
"""Actions which receive lambda event if present
"""
class LambdaInvoke(EventAction):
""" Invoke an arbitrary lambda
serialized invocation parameters
- resources / collection of resources
- policy / policy that is invoke the lambda
- action / action that is invoking the lambda
- event / cloud trail event if any
- version / version of custodian invoking the lambda
We automatically batch into sets of 250 for invocation,
We try to utilize async invocation by default, this imposes
some greater size limits of 128kb which means we batch
invoke.
Example::
- type: invoke-lambda
function: my-function
"""
schema = utils.type_schema(
'invoke-lambda',
function={'type': 'string'},
async={'type': 'boolean'},
qualifier={'type': 'string'},
batch_size={'type': 'integer'},
required=('function',))
def get_permissions(self):
if self.data.get('async', True):
return ('lambda:InvokeAsync',)
return ('lambda:Invoke',)
permissions = ('lambda:InvokeFunction',)
def process(self, resources, event=None):
client = utils.local_session(
self.manager.session_factory).client('lambda')
params = dict(FunctionName=self.data['function'])
if self.data.get('qualifier'):
params['Qualifier'] = self.data['Qualifier']
if self.data.get('async', True):
params['InvocationType'] = 'Event'
payload = {
'version': VERSION,
'event': event,
'action': self.data,
'policy': self.manager.data}
results = []
for resource_set in utils.chunks(resources, self.data.get('batch_size', 250)):
payload['resources'] = resource_set
params['Payload'] = utils.dumps(payload)
result = client.invoke(**params)
result['Payload'] = result['Payload'].read()
results.append(result)
return results
class Notify(EventAction):
"""
Flexible notifications require quite a bit of implementation support
on pluggable transports, templates, address resolution, variable
extraction, batch periods, etc.
For expedience and flexibility then, we instead send the data to
an sqs queue, for processing. ie. actual communications is DIY atm.
Example::
policies:
- name: ec2-bad-instance-kill
resource: ec2
filters:
- Name: bad-instance
actions:
- terminate
- type: notify
to:
- event-user
- resource-creator
- email@address
# which template for the email should we use
template: policy-template
transport:
type: sqs
region: us-east-1
queue: xyz
"""
C7N_DATA_MESSAGE = "maidmsg/1.0"
schema = {
'type': 'object',
'anyOf': [
{'required': ['type', 'transport', 'to']},
{'required': ['type', 'transport', 'to_from']}],
'properties': {
'type': {'enum': ['notify']},
'to': {'type': 'array', 'items': {'type': 'string'}},
'to_from': ValuesFrom.schema,
'cc': {'type': 'array', 'items': {'type': 'string'}},
'cc_from': ValuesFrom.schema,
'cc_manager': {'type': 'boolean'},
'from': {'type': 'string'},
'subject': {'type': 'string'},
'template': {'type': 'string'},
'transport': {
'oneOf': [
{'type': 'object',
'required': ['type', 'queue'],
'properties': {
'queue': {'type': 'string'},
'type': {'enum': ['sqs']}}},
{'type': 'object',
'required': ['type', 'topic'],
'properties': {
'topic': {'type': 'string'},
'type': {'enum': ['sns']},
}}]
},
'assume_role': {'type': 'boolean'}
}
}
batch_size = 250
def __init__(self, data=None, manager=None, log_dir=None):
super(Notify, self).__init__(data, manager, log_dir)
self.assume_role = data.get('assume_role', True)
def get_permissions(self):
if self.data.get('transport', {}).get('type') == 'sns':
return ('sns:Publish',)
if self.data.get('transport', {'type': 'sqs'}).get('type') == 'sqs':
return ('sqs:SendMessage',)
return ()
def expand_variables(self, message):
"""expand any variables in the action to_from/cc_from fields.
"""
p = self.data.copy()
if 'to_from' in self.data:
to_from = self.data['to_from'].copy()
to_from['url'] = to_from['url'].format(**message)
if 'expr' in to_from:
to_from['expr'] = to_from['expr'].format(**message)
p.setdefault('to', []).extend(ValuesFrom(to_from, self.manager).get_values())
if 'cc_from' in self.data:
cc_from = self.data['cc_from'].copy()
cc_from['url'] = cc_from['url'].format(**message)
if 'expr' in cc_from:
cc_from['expr'] = cc_from['expr'].format(**message)
p.setdefault('cc', []).extend(ValuesFrom(cc_from, self.manager).get_values())
return p
def process(self, resources, event=None):
aliases = self.manager.session_factory().client(
'iam').list_account_aliases().get('AccountAliases', ())
account_name = aliases and aliases[0] or ''
message = {
'event': event,
'account_id': self.manager.config.account_id,
'account': account_name,
'region': self.manager.config.region,
'policy': self.manager.data}
message['action'] = self.expand_variables(message)
for batch in utils.chunks(resources, self.batch_size):
message['resources'] = batch
receipt = self.send_data_message(message)
self.log.info("sent message:%s policy:%s template:%s count:%s" % (
receipt, self.manager.data['name'],
self.data.get('template', 'default'), len(batch)))
def send_data_message(self, message):
if self.data['transport']['type'] == 'sqs':
return self.send_sqs(message)
elif self.data['transport']['type'] == 'sns':
return self.send_sns(message)
def pack(self, message):
dumped = utils.dumps(message)
compressed = zlib.compress(dumped.encode('utf8'))
b64encoded = base64.b64encode(compressed)
return b64encoded.decode('ascii')
def send_sns(self, message):
topic = self.data['transport']['topic']
if topic.startswith('arn:aws:sns'):
region = region = topic.split(':', 5)[3]
topic_arn = topic
else:
region = message['region']
topic_arn = "arn:aws:sns:%s:%s:%s" % (
message['region'], message['account_id'], topic)
client = self.manager.session_factory(
region=region, assume=self.assume_role).client('sns')
client.publish(TopicArn=topic_arn, Message=self.pack(message))
def send_sqs(self, message):
queue = self.data['transport']['queue']
if queue.startswith('https://queue.amazonaws.com'):
region = 'us-east-1'
queue_url = queue
elif queue.startswith('https://sqs.'):
region = queue.split('.', 2)[1]
queue_url = queue
elif queue.startswith('arn:sqs'):
queue_arn_split = queue.split(':', 5)
region = queue_arn_split[3]
owner_id = queue_arn_split[4]
queue_name = queue_arn_split[5]
queue_url = "https://sqs.%s.amazonaws.com/%s/%s" % (
region, owner_id, queue_name)
else:
region = self.manager.config.region
owner_id = self.manager.config.account_id
queue_name = queue
queue_url = "https://sqs.%s.amazonaws.com/%s/%s" % (
region, owner_id, queue_name)
client = self.manager.session_factory(
region=region, assume=self.assume_role).client('sqs')
attrs = {
'mtype': {
'DataType': 'String',
'StringValue': self.C7N_DATA_MESSAGE,
},
}
result = client.send_message(
QueueUrl=queue_url,
MessageBody=self.pack(message),
MessageAttributes=attrs)
return result['MessageId']
class AutoTagUser(EventAction):
"""Tag a resource with the user who created/modified it.
.. code-block:: yaml
policies:
- name: ec2-auto-tag-owner
resource: ec2
mode:
type: cloudtrail
role: arn:aws:iam::123456789000:role/custodian-auto-tagger
events:
- RunInstances
filters:
- tag:Owner: absent
actions:
- type: auto-tag-user
tag: OwnerContact
There's a number of caveats to usage. Resources which don't
include tagging as part of their api may have some delay before
automation kicks in to create a tag. Real world delay may be several
minutes, with worst case into hours[0]. This creates a race condition
between auto tagging and automation.
In practice this window is on the order of a fraction of a second, as
we fetch the resource and evaluate the presence of the tag before
attempting to tag it.
References
- AWS Config (see REQUIRED_TAGS caveat) - http://goo.gl/oDUXPY
- CloudTrail User - http://goo.gl/XQhIG6
"""
schema = utils.type_schema(
'auto-tag-user',
required=['tag'],
**{'user-type': {
'type': 'array',
'items': {'type': 'string',
'enum': [
'IAMUser',
'AssumedRole',
'FederatedUser'
]}},
'update': {'type': 'boolean'},
'tag': {'type': 'string'},
'principal_id_tag': {'type': 'string'}
}
)
def get_permissions(self):
return self.manager.action_registry.get(
'tag')({}, self.manager).get_permissions()
def validate(self):
if self.manager.data.get('mode', {}).get('type') != 'cloudtrail':
raise ValueError("Auto tag owner requires an event")
if self.manager.action_registry.get('tag') is None:
raise ValueError("Resource does not support tagging")
return self
def process(self, resources, event):
if event is None:
return
event = event['detail']
utype = event['userIdentity']['type']
if utype not in self.data.get('user-type', ['AssumedRole', 'IAMUser']):
return
user = None
if utype == "IAMUser":
user = event['userIdentity']['userName']
principal_id_value = event['userIdentity'].get('principalId', '')
elif utype == "AssumedRole":
user = event['userIdentity']['arn']
prefix, user = user.rsplit('/', 1)
principal_id_value = event['userIdentity'].get('principalId', '').split(':')[0]
# instance role
if user.startswith('i-'):
return
# lambda function (old style)
elif user.startswith('awslambda'):
return
if user is None:
return
# if the auto-tag-user policy set update to False (or it's unset) then we
# will skip writing their UserName tag and not overwrite pre-existing values
if not self.data.get('update', False):
untagged_resources = []
# iterating over all the resources the user spun up in this event
for resource in resources:
tag_already_set = False
for tag in resource.get('Tags', ()):
if tag['Key'] == self.data['tag']:
tag_already_set = True
break
if not tag_already_set:
untagged_resources.append(resource)
# if update is set to True, we will overwrite the userName tag even if
# the user already set a value
else:
untagged_resources = resources
tag_action = self.manager.action_registry.get('tag')
new_tags = {
self.data['tag']: user
}
# if principal_id_key is set (and value), we'll set the principalId tag.
principal_id_key = self.data.get('principal_id_tag', None)
if principal_id_key and principal_id_value:
new_tags[principal_id_key] = principal_id_value
for key, value in six.iteritems(new_tags):
tag_action({'key': key, 'value': value}, self.manager).process(untagged_resources)
return new_tags
class PutMetric(BaseAction):
"""Action to put metrics based on an expression into CloudWatch metrics
:example:
.. code-block: yaml
policies:
- name: track-attached-ebs
resource: ec2
comment: |
Put the count of the number of EBS attached disks to an instance
filters:
- Name: tracked-ec2-instance
actions:
- type: put-metric
key: Reservations[].Instances[].BlockDeviceMappings[].DeviceName
namespace: Usage Metrics
metric_name: Attached Disks
op: count
units: Files
op and units are optional and will default to simple Counts.
"""
# permissions are typically lowercase servicename:TitleCaseActionName
permissions = {'cloudwatch:PutMetricData', }
schema = {
'type': 'object',
'required': ['type', 'key', 'namespace', 'metric_name'],
'properties': {
'type': {'enum': ['put-metric', ]},
'key': {'type': 'string'}, # jmes path
'namespace': {'type': 'string'},
'metric_name': {'type': 'string'},
'dimensions':
{'type':'array',
'items': {
'type':'object'
},
},
'op': {'enum': list(METRIC_OPS.keys())},
'units': {'enum': METRIC_UNITS}
}
}
def process(self, resources):
ns = self.data['namespace']
metric_name = self.data['metric_name']
key_expression = self.data.get('key', 'Resources[]')
operation = self.data.get('op', 'count')
units = self.data.get('units', 'Count')
# dimensions are passed as a list of dicts
dimensions = self.data.get('dimensions', [])
now = datetime.utcnow()
# reduce the resources by the key expression, and apply the operation to derive the value
values = []
self.log.debug("searching for %s in %s", key_expression, resources)
try:
values = jmespath.search("Resources[]." + key_expression,
{'Resources': resources})
# I had to wrap resourses in a dict like this in order to not have jmespath expressions
# start with [] in the yaml files. It fails to parse otherwise.
except TypeError as oops:
self.log.error(oops.message)
value = 0
try:
f = METRIC_OPS[operation]
value = f(values)
except KeyError:
self.log.error("Bad op for put-metric action: %s", operation)
# for demo purposes
# from math import sin, pi
# value = sin((now.minute * 6 * 4 * pi) / 180) * ((now.hour + 1) * 4.0)
metrics_data = [
{
'MetricName': metric_name,
'Dimensions': [{'Name': i[0], 'Value': i[1]}
for d in dimensions
for i in d.items()],
'Timestamp': now,
'Value': value,
# TODO: support an operation of 'stats' to include this
# structure instead of a single Value
# Value and StatisticValues are mutually exclusive.
# 'StatisticValues': {
# 'SampleCount': 1,
# 'Sum': 123.0,
# 'Minimum': 123.0,
# 'Maximum': 123.0
# },
'Unit': units,
},
]
client = self.manager.session_factory().client('cloudwatch')
client.put_metric_data(Namespace=ns, MetricData=metrics_data)
return resources
|
|
from __future__ import division
from misc_scripts.r_factor_calc import *
from iotbx.pdb.multimer_reconstruction import multimer
import multiprocessing as mp
from iotbx import pdb
import cPickle as pickle
import os
'''
Read list of pdb files names with more than one good BIOMT records
Read list of pdb files names with more than one good MTRIX records
Get coresponding structure factor files
@author: Youval Dar
'''
# global variable for parallel process results collection
results = []
def collect_results(x):
'''
Collect the results from the parallel process
'''
results.append(x)
def full_pdb_file_paths(file_type=1):
'''
read all names of
1: PDB files
2: structure factor files
return:
{file_name: file_path,...}
'''
if file_type == 1: # PDB files
files_dir = os.environ["PDB_MIRROR_PDB"]
else: # structure factor files
files_dir = '/net/cci/pdb_mirror/structure_factors'
file_names = open(os.path.join(files_dir, "INDEX"), "r").readlines()
result = {}
for file_path in file_names:
file_path = os.path.join(files_dir, file_path.strip())
file_name = file_path.split('/')[-1]
file_name = file_name.split('.')[0]
result[file_name] = file_path
return result
def make_dict(index_file_name,data_dir=''):
'''
Read all file names from PBP mirror folder, structure factor files
or other file containing file names
for PDB fils check the correct folder using os.environ["PDB_MIRROR_PDB"]
and the file name is 'INDEX'
for structure factor files use os.environ["PDB_MIRROR_STRUCTURE_FACTORS"]
and the file name 'INDEX'
input:
data_dir : the directory containing a file with the names of files we want to extract
index_file_name : file names list
Output:
a dictionary
{file_name: file_path,...}
'''
file_names = open(os.path.join(data_dir, index_file_name), "r").readlines()
result = {}
for file_path in file_names:
# file_path looks like: '/net/chevy/raid1/pdb_mirror/pdb/00/pdb100d.ent.gz'
# file_name should look like 'pdb100d'
file_path = file_path.strip()
file_name = file_path.split('/')[-1]
file_name = file_name.split('.')[0]
if file_name.startswith('pdb'):
# pdb file names in INDEX are like 'pdb2vpf', the file_name should be '2vpf'
file_name = file_name[3:]
elif file_name.startswith('r'):
# structure factor file names in INDEX are like 'r1oqjsf', it should be '1oqj'
file_name = file_name[1:-2]
else:
print 'File namming problems!!!'
print file_name
break
if file_path.startswith('/net'):
result[file_name] = file_path
else:
result[file_name] = data_dir+file_path
return result
def start_multiprocessing():
while True:
try:
np = int(raw_input('Number of processors available is {}. How many would you like to use? '.format(mp.cpu_count())))
if np>mp.cpu_count():
raise Exception
break
except ValueError:
print 'Please enter an integer \n'
except Exception:
print 'Please choose smaller number of processors \n'
# set number of CPUs
p = mp.Pool(processes=np)
return p
def run(recon_test=False,build_new_dictinaries=False):
'''
good_MTRIX_pdb_files, good_BIOMT_pdb_files and structure_factors_files
are dictionaries. the keys are pdb record name and the values are the
appropriate file full path
'''
if build_new_dictinaries:
# Do the following only if there were changes to the files lists
good_MTRIX_pdb_files = make_dict('mtrix_ok_run.txt')
good_BIOMT_pdb_files = make_dict('biomt_ok_run.txt')
structure_factors_files = make_dict('INDEX','/net/cci/pdb_mirror/structure_factors/')
pickle.dump(good_MTRIX_pdb_files,open('dict_good_MTRIX_pdb_files','w'))
pickle.dump(good_BIOMT_pdb_files,open('dict_good_BIOMT_pdb_files','w'))
pickle.dump(structure_factors_files,open('dict_structure_factors_files','w'))
print 'Dictionaries Created...'
else:
# If you already have the dictionaries use:
good_MTRIX_pdb_files = pickle.load(open('dict_good_MTRIX_pdb_files','r'))
good_BIOMT_pdb_files = pickle.load(open('dict_good_BIOMT_pdb_files','r'))
structure_factors_files = pickle.load(open('dict_structure_factors_files','r'))
MTRIX_with_Straucture_Factor = pickle.load(open('MTRIX_with_Straucture_Factor_file_list','r'))
print 'Dictionaries are loaded...'
# When changing the file lists
if build_new_dictinaries:
# When changing the file lists
MTRIX_with_Straucture_Factor = []
for x in good_MTRIX_pdb_files:
if structure_factors_files.has_key(x):
MTRIX_with_Straucture_Factor.extend([x,good_MTRIX_pdb_files[x],structure_factors_files[x]])
l = len(good_MTRIX_pdb_files)
i = len(MTRIX_with_Straucture_Factor)
print 'The number of both structure factors and good MTRIX with the same name: {} from a total of {}'.format(i,l)
pickle.dump(MTRIX_with_Straucture_Factor,open('MTRIX_with_Straucture_Factor_file_list','w'))
i = 0
for x in good_BIOMT_pdb_files:
if structure_factors_files.has_key(x):
i += 1
l = len(good_BIOMT_pdb_files)
print 'The number of both structure factors and good BIOMT with the same name: {} from a total of {}'.format(i,l)
#f1 = open('dict_good_MTRIX_pdb_files.txt','w')
#f1.writelines([x+'\n' for x in dict_good_MTRIX_pdb_files])
#f1.close()
# run test - compare r-work fromreconstructed pdb file to that of the mtz data
if recon_test:
p = start_multiprocessing()
print '*'*50
print 'Start testing MTRIX reconstruction testing'
print '*'*50
# Load previous results
reconstruction_test_dict = pickle.load(open('reconstruction_test_dict','r'))
reconstruction_test_list = pickle.load(open('reconstruction_test_list','r'))
# iterate over file and calculate qulity of R-work of reconstructed pdb file
# Test of all files in MTRIX_with_Straucture_Factor
# collect all good results and save them on a file so that
# not to repeat them
#tested_files = open('Collect_tested_files',"r").readlines()
tested_files = open('Collect_tested_files',"r").read().splitlines()
files_with_problems = open('files_with_problems',"r").read().splitlines()
# Clean the remarks - use only protein name
files_with_problems = [x[:4] for x in files_with_problems]
# append results from this run
f = open('Collect_tested_files',"a")
g = open('files_with_problems',"a")
for file_name in MTRIX_with_Straucture_Factor:
if (file_name not in tested_files) and (file_name not in files_with_problems):
print file_name
pdb_file = good_MTRIX_pdb_files[file_name]
sf_file = structure_factors_files[file_name]
# calculate the precent of difference of R-work reconstructed vs mtz data
p.apply_async(r_factor_calc,([pdb_file,sf_file],),
{'eps':2e-3,'file_name':file_name,'strOut':True,'fromRCSB':False},
callback=collect_results)
# close the parallel process
p.close()
p.join()
# The multiprocess does not catch all the raised errors and sorrys
# need to run the regular version after this is done
print '*'*50
print 'Done with calculating'
print '*'*50
for x in results:
# x is of the form 'pdbname:r_score'
[file_name,r] = x.split(':')
r = float(r)
reconstruction_test_dict[file_name] = r
reconstruction_test_list.append(r)
outString = '{}:OK\n'.format(x)
if r<1:
f.write(outString)
else:
g.write(outString)
f.close()
g.close()
## Test of a single wile
##file_name = '4kn2' # have both IOBS and FOBS
#file_name = '4aun' # have issues running phenix.cif_as_mtz
#file_name = '2wws'
#print file_name
#pdb_file = good_MTRIX_pdb_files[file_name]
#sf_file = structure_factors_files[file_name]
## calculate the precent of difference of R-work reconstructed vs mtz data
#t = r_factor_calc([pdb_file,sf_file],eps=1e-3)
#reconstruction_test_dict[file_name] = t
#reconstruction_test_list.append(t)
# save the results
pickle.dump(reconstruction_test_dict,open('reconstruction_test_dict','w'))
pickle.dump(reconstruction_test_list,open('reconstruction_test_list','w'))
print 'Done...'
if __name__=='__main__':
# move to working directory
os.chdir('/net/cci-filer2/raid1/home/youval/Work/work')
#os.chdir('c:\\Phenix\\Dev\\Work\\work')
# check how many processors are available
run(recon_test=True)
|
|
import inspect
from graceful.validators import min_validator, max_validator
class BaseField:
"""Base field class for subclassing.
To create new field type subclass :any:`BaseField` and implement following
methods:
* ``from_representation()``: converts representation (used in
request/response body) to internal value.
* ``to_representation()``: converts internal value to representation
that will be used in response body.
Args:
details (str): human readable description of field (it will be used
for describing resource on OPTIONS requests).
label (str): human readable label of a field (it will be used for
describing resource on OPTIONS requests).
*Note: it is recommended to use field names that are
self-explanatory intead of relying on field labels.*
source (str): name of internal object key/attribute that will be
passed to field on ``.to_representation()`` call. Special ``'*'``
value is allowed that will pass whole object to field when making
representation. If not set then default source will
be a field name used as a serializer's attribute.
validators (list): list of validator callables.
many (bool): set to True if field is in fact a list of given type
objects.
read_only (bool): True if field is read-only and cannot be set/modified
via POST, PUT, or PATCH requests.
write_only (bool): True if field is write-only and cannot be retrieved
via GET requests.
allow_null (bool): True if field can have intentional `null` values
which will be interpreted as `None` afterwards.
.. versionadded:: 0.5.0
Example:
.. code-block:: python
class BoolField(BaseField):
def from_representation(self, data):
if data in {'true', 'True', 'yes', '1', 'Y'}:
return True:
elif data in {'false', 'False', 'no', '0', 'N'}:
return False:
else:
raise ValueError(
"{data} is not valid boolean field".format(
data=data
)
)
def to_representation(self, value):
return ["True", "False"][value]
"""
#: Two-tuple ``(label, url)`` pointing to represented type specification
#: (for documentation).
spec = None
#: String label of represented type (for documentation).
type = None
def __init__(
self,
details,
label=None,
source=None,
validators=None,
many=False,
read_only=False,
write_only=False,
allow_null=False,
):
"""Initialize field definition."""
self.label = label
self.source = source
self.details = details
self.validators = validators or []
self.many = many
self.read_only = read_only
self.write_only = write_only
self.allow_null = allow_null
if self.write_only and self.read_only:
raise ValueError(
"Field cannot be read-only and write-only at the same time."
)
def from_representation(self, data):
"""Convert representation value to internal value.
Note:
This is method handler stub and should be redifined in the
``BaseField`` subclasses.
"""
raise NotImplementedError(
"{cls}.from_representation() method not implemented".format(
cls=self.__class__.__name__
)
)
def to_representation(self, value):
"""Convert representation value to internal value.
Note:
This is method handler stub and should be redifined in the
``BaseField`` subclasses.
"""
raise NotImplementedError(
"{cls}.to_representation() method not implemented".format(
cls=self.__class__.__name__
)
)
def describe(self, **kwargs):
"""
Describe this field instance for purpose of self-documentation.
Args:
kwargs (dict): dictionary of additional description items for
extending default description
Returns:
dict: dictionary of description items
Suggested way for overriding description fields or extending it with
additional items is calling super class method with new/overriden
fields passed as keyword arguments like following:
.. code-block:: python
class DummyField(BaseField):
def description(self, **kwargs):
super().describe(is_dummy=True, **kwargs)
"""
description = {
'label': self.label,
'details': inspect.cleandoc(self.details),
'type': "list of {}".format(self.type) if self.many else self.type,
'spec': self.spec,
'read_only': self.read_only,
'write_only': self.write_only,
'allow_null': self.allow_null,
}
description.update(kwargs)
return description
def validate(self, value):
"""Perform validation on value by running all field validators.
Single validator is a callable that accepts one positional argument
and raises :any:`ValidationError` when validation fails.
Error message included in exception will be included in http error
response
Args:
value: internal value to validate
Returns:
None
Note:
Concept of validation for fields is understood here as a process
of checking if data of valid type (successfully parsed/processed by
``.from_representation`` handler) does meet some other constraints
(lenght, bounds, uniqueness, etc). So this method is always called
with result of ``.from_representation()`` passed as its argument.
"""
for validator in self.validators:
validator(value)
class RawField(BaseField):
"""Represents raw field subtype.
Any value from resource object will be returned as is without any
conversion and no control over serialized value type is provided. Can be
used only with very simple data types like int, float, str etc. but can
eventually cause problems if value provided in representation has type
that is not accepted in application.
Effect of using this can differ between various content-types.
"""
type = 'raw'
def from_representation(self, data):
"""Return representation value as-is (note: content-type dependent)."""
return data
def to_representation(self, value):
"""Return internal value as-is (note: content-type dependent)."""
return value
class StringField(BaseField):
"""Represents string field subtype without any extensive validation."""
type = 'string'
def from_representation(self, data):
"""Convert representation value to ``str``."""
return str(data)
def to_representation(self, value):
"""Convert representation value to ``str``."""
return str(value)
class BoolField(BaseField):
"""Represents boolean type of field.
By default accepts a wide range of incoming True/False representations:
* False: ``['False', 'false', 'FALSE', 'F', 'f', '0', 0, 0.0, False]``
* True: ``['True', 'true', 'TRUE', 'T', 't', '1', 1, True]``
By default, the outup representations of internal object's value are
Python's False/True values that will be later serialized to form that
is native for content-type of use.
This behavior can be changed using ``representations`` field argument.
Note that when using ``representations`` parameter you need to make
strict decision and there is no ability to accept multiple options for
true/false representations. Anyway, it is reccomended approach to
strictly define these values.
Args:
representations (tuple): two-tuple with representations for
(False, True) values, that will be used instead of default values
"""
type = 'bool'
_TRUE_VALUES = {'True', 'true', 'TRUE', 'T', 't', '1', 1, True}
_FALSE_VALUES = {'False', 'false', 'FALSE', 'F', 'f', '0', 0, 0.0, False}
_DEFAULT_REPRESENTATIONS = (False, True)
def __init__(
self,
details,
representations=None,
**kwargs
):
"""Initialize field definition and set preffered representations."""
super().__init__(details, **kwargs)
if representations:
# could not resist...
self._FALSE_VALUES = {representations[False]}
self._TRUE_VALUES = {representations[True]}
self.representations = representations or self._DEFAULT_REPRESENTATIONS
def from_representation(self, data):
"""Convert representation value to ``bool`` if it has expected form."""
if data in self._TRUE_VALUES:
return True
elif data in self._FALSE_VALUES:
return False
else:
raise ValueError(
"{type} type value must be one of {values}".format(
type=self.type,
values=self._TRUE_VALUES.union(self._FALSE_VALUES)
)
)
def to_representation(self, value):
"""Convert internal boolean value to one of defined representations."""
return self.representations[value]
class IntField(BaseField):
"""Represents integer type of field.
Field of this type accepts both integers and strings as an incoming
integer representation and always returns int as a representation of
internal objects's value that will be later serialized to form that is
native for content-type of use.
This field accepts optional arguments that simply add new `max` and `min`
value validation.
Args:
max_value (int): optional max value for validation
min_value (int): optional min value for validation
"""
type = 'int'
def __init__(
self,
details,
max_value=None,
min_value=None,
**kwargs
):
"""Initialize field definition and attach default validators."""
super().__init__(details, **kwargs)
self.max_value = max_value
self.min_value = min_value
if max_value is not None:
self.validators.append(max_validator(max_value))
if min_value is not None:
self.validators.append(min_validator(min_value))
def to_representation(self, value):
"""Convert internal value to ``int``."""
return int(value)
def from_representation(self, data):
"""Convert representation value to ``int``."""
return int(data)
class FloatField(BaseField):
"""Represents float type of field.
Accepts both floats and strings as an incoming float number
representation and always returns float as a representation of internal
objects's value that will be later serialized to form that is native for
content-type of use.
This field accepts optional arguments that simply add new `max` and `min`
value validation.
Args:
max_value (int): optional max value for validation
min_value (int): optional min value for validation
"""
type = 'float'
def __init__(
self,
details,
max_value=None,
min_value=None,
**kwargs
):
"""Initialize field definition and attach default validators."""
super().__init__(details, **kwargs)
self.max_value = max_value
self.min_value = min_value
if max_value is not None:
self.validators.append(max_validator(max_value))
if min_value is not None:
self.validators.append(min_validator(min_value))
def to_representation(self, value):
"""Convert internal value to ``float``."""
return float(value)
def from_representation(self, data):
"""Convert representation value to ``float``."""
return float(data)
|
|
try:
from xml.etree import cElementTree as etree
except ImportError:
from xml.etree import ElementTree as etree
class TagDescriptor(object):
def __get__(_self, _instance, cls):
return getattr(cls, '__tag__', None) or cls.__name__
class TypeNameDescriptor(object):
def __get__(_self, instance, cls):
return instance.attributes.get('name') if instance else cls.tag
def resolve_type(etype):
if isinstance(etype, TypeAlias) or (type(etype) is type and issubclass(etype, TypeAlias)):
return etype.get_real_type()
else:
return etype
class Node(object):
tag = TagDescriptor()
type_name = TypeNameDescriptor()
def __init__(self, **attributes):
self.attributes = attributes.copy()
self.children = []
def __call__(self, *children):
self.children.extend(children)
return self
def get_node(self, creator):
attributes = self.attributes
if 'type' in attributes:
attributes = attributes.copy()
etype = resolve_type(attributes['type'])
attributes['type'] = creator.get_prefixed_tag(etype.namespace, etype.type_name)
if 'base' in attributes:
attributes = attributes.copy()
etype = resolve_type(attributes['base'])
attributes['base'] = creator.get_prefixed_tag(etype.namespace, etype.type_name)
node = creator(self.__class__.namespace, self.tag, attributes)
for child in self.children:
node.append(child.get_node(creator))
return node
class Type(Node):
class InstanceClassDescriptor(object):
def __init__(self):
self.cache = {}
def __get__(self, _instance, cls):
try:
return self.cache[cls]
except KeyError:
pass
result = self.cache[cls] = create_instance_class(cls)
return result
instance_class = InstanceClassDescriptor()
@classmethod
def get_name(cls):
return cls.__name__
@classmethod
def instance(cls, *args, **kwargs):
return TypeInstance(cls, *args, **kwargs)
@staticmethod
def alias(element, name):
fields = {}
fields['_element'] = element
fields['_name'] = name
name = name + 'Alias'
return type(name, (TypeAlias,), fields)
class TypeAlias(Type):
@classmethod
def get_real_type(cls):
return cls._element.schema.types[cls._name]
@classmethod
def fill_node(cls, node, instance, creator):
cls.get_real_type().fill_node(node, instance, creator)
@classmethod
def init(cls, instance, *args, **kwargs):
cls.get_real_type().init(instance, *args, **kwargs)
@classmethod
def from_node(cls, node):
return cls.get_real_type().from_node(node)
class Namespace(object):
def __init__(self, namespace, abbr=None):
self.namespace = namespace
self.abbr = abbr
def __str__(self):
return self.namespace
class Instance(object):
def __init__(self, _element, *args, **kwargs):
self._element = _element
self._type.init(self, *args, **kwargs)
def get_node(self, creator):
node = self._element.create_node(creator)
self._type.fill_node(node, self, creator)
return node
@property
def tag(self):
return self._element.name
class TypeInstance(object):
def __init__(self, type, *args, **kwargs):
self.inferior_instance = type.instance_class(None, *args, **kwargs)
def create(self, element):
self.inferior_instance._element = element
return self.inferior_instance
def __getattr__(self, name):
return getattr(self.inferior_instance, name)
def __setattr__(self, name, value):
if name in ('inferior_instance'):
object.__setattr__(self, name, value)
else:
setattr(self.inferior_instance, name, value)
class ElementInstance(Instance):
def __init__(self, tree):
self.value = tree
def get_node(self, _creator):
return self.value
class BareInstance(object):
def __init__(self, args, kwargs):
self.args = args
self.kwargs = kwargs
def create(self, element):
return element.instance(*self.args, **self.kwargs)
def create_instance_class(etype):
fields = {}
name = etype.get_name() + 'Instance'
fields['_type'] = etype
return type(name, (Instance,), fields)
def get_root(node_getter):
creator = ElementCreator()
node = node_getter.get_node(creator)
for uri, prefix in creator.prefixes.iteritems():
node.attrib['xmlns:%s' % prefix] = uri
return node
class ElementCreator(object):
def __init__(self):
self.ns_counter = 0
self.prefixes = {}
def get_prefix(self, namespace):
try:
return self.prefixes[namespace.namespace]
except KeyError:
pass
if namespace.abbr:
prefix = namespace.abbr
else:
prefix = 'ns%d' % self.ns_counter
self.ns_counter += 1
self.prefixes[namespace.namespace] = prefix
return prefix
def get_prefixed_tag(self, namespace, tag):
return '{}:{}'.format(self.get_prefix(namespace), tag)
def __call__(self, namespace, tag, *args, **kwargs):
return etree.Element(self.get_prefixed_tag(namespace, tag), *args, **kwargs)
|
|
# Copyright 2015 Alex Meade
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import client as http_client
from oslo_log import log
from oslo_utils import uuidutils
import webob
from webob import exc
from manila.api import common
from manila.api.openstack import wsgi
import manila.api.views.share_group_snapshots as share_group_snapshots_views
from manila import db
from manila import exception
from manila.i18n import _
import manila.share_group.api as share_group_api
LOG = log.getLogger(__name__)
SG_GRADUATION_VERSION = '2.55'
class ShareGroupSnapshotController(wsgi.Controller, wsgi.AdminActionsMixin):
"""The share group snapshots API controller for the OpenStack API."""
resource_name = 'share_group_snapshot'
_view_builder_class = (
share_group_snapshots_views.ShareGroupSnapshotViewBuilder)
def __init__(self):
super(ShareGroupSnapshotController, self).__init__()
self.share_group_api = share_group_api.API()
def _get_share_group_snapshot(self, context, sg_snapshot_id):
try:
return self.share_group_api.get_share_group_snapshot(
context, sg_snapshot_id)
except exception.NotFound:
msg = _("Share group snapshot %s not found.") % sg_snapshot_id
raise exc.HTTPNotFound(explanation=msg)
@wsgi.Controller.authorize('get')
def _show(self, req, id):
"""Return data about the given share group snapshot."""
context = req.environ['manila.context']
sg_snapshot = self._get_share_group_snapshot(context, id)
return self._view_builder.detail(req, sg_snapshot)
@wsgi.Controller.api_version('2.31', '2.54', experimental=True)
def show(self, req, id):
return self._show(req, id)
@wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa
def show(self, req, id): # pylint: disable=function-redefined # noqa F811
return self._show(req, id)
@wsgi.Controller.authorize('delete')
def _delete_group_snapshot(self, req, id):
"""Delete a share group snapshot."""
context = req.environ['manila.context']
LOG.info("Delete share group snapshot with id: %s",
id, context=context)
sg_snapshot = self._get_share_group_snapshot(context, id)
try:
self.share_group_api.delete_share_group_snapshot(
context, sg_snapshot)
except exception.InvalidShareGroupSnapshot as e:
raise exc.HTTPConflict(explanation=e.message)
return webob.Response(status_int=http_client.ACCEPTED)
@wsgi.Controller.api_version('2.31', '2.54', experimental=True)
def delete(self, req, id):
return self._delete_group_snapshot(req, id)
@wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa
def delete(self, req, id): # pylint: disable=function-redefined # noqa F811
return self._delete_group_snapshot(req, id)
@wsgi.Controller.api_version('2.31', '2.54', experimental=True)
def index(self, req):
"""Returns a summary list of share group snapshots."""
return self._get_share_group_snaps(req, is_detail=False)
@wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa
def index(self, req): # pylint: disable=function-redefined # noqa F811
"""Returns a summary list of share group snapshots."""
return self._get_share_group_snaps(req, is_detail=False)
@wsgi.Controller.api_version('2.31', '2.54', experimental=True)
def detail(self, req):
"""Returns a detailed list of share group snapshots."""
return self._get_share_group_snaps(req, is_detail=True)
@wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa
def detail(self, req): # pylint: disable=function-redefined # noqa F811
"""Returns a detailed list of share group snapshots."""
return self._get_share_group_snaps(req, is_detail=True)
@wsgi.Controller.authorize('get_all')
def _get_share_group_snaps(self, req, is_detail):
"""Returns a list of share group snapshots."""
context = req.environ['manila.context']
search_opts = {}
search_opts.update(req.GET)
# Remove keys that are not related to group attrs
search_opts.pop('limit', None)
search_opts.pop('offset', None)
sort_key = search_opts.pop('sort_key', 'created_at')
sort_dir = search_opts.pop('sort_dir', 'desc')
snaps = self.share_group_api.get_all_share_group_snapshots(
context, detailed=is_detail, search_opts=search_opts,
sort_dir=sort_dir, sort_key=sort_key)
limited_list = common.limited(snaps, req)
if is_detail:
snaps = self._view_builder.detail_list(req, limited_list)
else:
snaps = self._view_builder.summary_list(req, limited_list)
return snaps
@wsgi.Controller.authorize('update')
def _update_group_snapshot(self, req, id, body):
"""Update a share group snapshot."""
context = req.environ['manila.context']
key = 'share_group_snapshot'
if not self.is_valid_body(body, key):
msg = _("'%s' is missing from the request body.") % key
raise exc.HTTPBadRequest(explanation=msg)
sg_snapshot_data = body[key]
valid_update_keys = {
'name',
'description',
}
invalid_fields = set(sg_snapshot_data.keys()) - valid_update_keys
if invalid_fields:
msg = _("The fields %s are invalid or not allowed to be updated.")
raise exc.HTTPBadRequest(explanation=msg % invalid_fields)
sg_snapshot = self._get_share_group_snapshot(context, id)
sg_snapshot = self.share_group_api.update_share_group_snapshot(
context, sg_snapshot, sg_snapshot_data)
return self._view_builder.detail(req, sg_snapshot)
@wsgi.Controller.api_version('2.31', '2.54', experimental=True)
def update(self, req, id, body):
return self._update_group_snapshot(req, id, body)
@wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa
def update(self, req, id, body): # pylint: disable=function-redefined # noqa F811
return self._update_group_snapshot(req, id, body)
@wsgi.Controller.authorize('create')
def _create(self, req, body):
"""Creates a new share group snapshot."""
context = req.environ['manila.context']
if not self.is_valid_body(body, 'share_group_snapshot'):
msg = _("'share_group_snapshot' is missing from the request body.")
raise exc.HTTPBadRequest(explanation=msg)
share_group_snapshot = body.get('share_group_snapshot', {})
share_group_id = share_group_snapshot.get('share_group_id')
if not share_group_id:
msg = _("Must supply 'share_group_id' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
if not uuidutils.is_uuid_like(share_group_id):
msg = _("The 'share_group_id' attribute must be a uuid.")
raise exc.HTTPBadRequest(explanation=msg)
kwargs = {"share_group_id": share_group_id}
if 'name' in share_group_snapshot:
kwargs['name'] = share_group_snapshot.get('name')
if 'description' in share_group_snapshot:
kwargs['description'] = share_group_snapshot.get('description')
try:
new_snapshot = self.share_group_api.create_share_group_snapshot(
context, **kwargs)
except exception.ShareGroupNotFound as e:
raise exc.HTTPBadRequest(explanation=e.message)
except exception.InvalidShareGroup as e:
raise exc.HTTPConflict(explanation=e.message)
return self._view_builder.detail(req, dict(new_snapshot.items()))
@wsgi.Controller.api_version('2.31', '2.54', experimental=True)
@wsgi.response(202)
def create(self, req, body):
return self._create(req, body)
@wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa
@wsgi.response(202)
def create(self, req, body): # pylint: disable=function-redefined # noqa F811
return self._create(req, body)
@wsgi.Controller.authorize('get')
def _members(self, req, id):
"""Returns a list of share group snapshot members."""
context = req.environ['manila.context']
snaps = self.share_group_api.get_all_share_group_snapshot_members(
context, id)
limited_list = common.limited(snaps, req)
snaps = self._view_builder.member_list(req, limited_list)
return snaps
@wsgi.Controller.api_version('2.31', '2.54', experimental=True)
def members(self, req, id):
return self._members(req, id)
@wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa
def members(self, req, id): # pylint: disable=function-redefined # noqa F811
return self._members(req, id)
def _update(self, *args, **kwargs):
db.share_group_snapshot_update(*args, **kwargs)
def _get(self, *args, **kwargs):
return self.share_group_api.get_share_group_snapshot(*args, **kwargs)
def _delete(self, context, resource, force=True):
db.share_group_snapshot_destroy(context.elevated(), resource['id'])
@wsgi.Controller.api_version('2.31', '2.54', experimental=True)
@wsgi.action('reset_status')
def share_group_snapshot_reset_status(self, req, id, body):
return self._reset_status(req, id, body)
# pylint: disable=function-redefined
@wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa
@wsgi.action('reset_status')
def share_group_snapshot_reset_status(self, req, id, body): # noqa F811
return self._reset_status(req, id, body)
# pylint: enable=function-redefined
@wsgi.Controller.api_version('2.31', '2.54', experimental=True)
@wsgi.action('force_delete')
def share_group_snapshot_force_delete(self, req, id, body):
return self._force_delete(req, id, body)
# pylint: disable=function-redefined
@wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa
@wsgi.action('force_delete')
def share_group_snapshot_force_delete(self, req, id, body): # noqa F811
return self._force_delete(req, id, body)
def create_resource():
return wsgi.Resource(ShareGroupSnapshotController())
|
|
"""Tests for pytest-cov.
Known issues:
- If py 2 then can have tx for any py 2, but problems if tx for py 3.
- If py 3.0 then can have tx for py 3.0 / 3.1, but problems if tx for py 2.
- If py 3.1 then can have tx for py 3.1, but problems if tx for py 2 or py 3.0.
- For py 3.0 coverage seems to give incorrect results, it reports all
covered except the one line which it should have actually covered.
Issue reported upstream, also only problem with pass statement and
is fine with simple assignment statement.
"""
import sys
import py
import pytest
pytest_plugins = 'pytester', 'cov'
SCRIPT = '''
import sys
def pytest_generate_tests(metafunc):
for i in range(10):
metafunc.addcall()
def test_foo():
assert True
if sys.version_info[0] > 5:
assert False
'''
SCRIPT_CHILD = '''
import sys
idx = int(sys.argv[1])
if idx == 0:
pass
if idx == 1:
pass
'''
SCRIPT_PARENT = '''
import subprocess
import sys
def pytest_generate_tests(metafunc):
for i in range(2):
metafunc.addcall(funcargs=dict(idx=i))
def test_foo(idx):
out, err = subprocess.Popen(
[sys.executable, 'child_script.py', str(idx)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
# there is a issue in coverage.py with multiline statements at
# end of file: https://bitbucket.org/ned/coveragepy/issue/293
pass
'''
SCRIPT_FUNCARG = '''
import coverage
def test_foo(cov):
assert isinstance(cov, coverage.control.coverage)
'''
SCRIPT_FUNCARG_NOT_ACTIVE = '''
def test_foo(cov):
assert cov is None
'''
MULTIPROCESSING_SCRIPT = '''
import multiprocessing
def target_fn():
a = True
return a
def test_run_target():
p = multiprocessing.Process(target=target_fn)
p.start()
p.join()
'''
SCRIPT_FAIL = '''
def test_fail():
assert False
'''
SCRIPT_RESULT = '8 * 88%'
CHILD_SCRIPT_RESULT = '6 * 100%'
PARENT_SCRIPT_RESULT = '8 * 100%'
def test_central(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_central * %s *' % SCRIPT_RESULT,
'*10 passed*'
])
assert result.ret == 0
def test_no_cov_on_fail(testdir):
script = testdir.makepyfile(SCRIPT_FAIL)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--no-cov-on-fail',
script)
assert 'coverage: platform' not in result.stdout.str()
result.stdout.fnmatch_lines(['*1 failed*'])
def test_dist_collocated(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=2*popen',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_collocated * %s *' % SCRIPT_RESULT,
'*10 passed*'
])
assert result.ret == 0
@pytest.mark.xfail(sys.platform == 'win32' and sys.version_info[:2] < (3, 4),
reason='path rewrite in cov_core is somehow broken')
def test_dist_not_collocated(testdir):
script = testdir.makepyfile(SCRIPT)
dir1 = testdir.mkdir('dir1')
dir2 = testdir.mkdir('dir2')
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=popen//chdir=%s' % dir1,
'--tx=popen//chdir=%s' % dir2,
'--rsyncdir=%s' % script.basename,
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_not_collocated * %s *' % SCRIPT_RESULT,
'*10 passed*'
])
assert result.ret == 0
def test_central_subprocess(testdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--cov-report=term-missing',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script * %s *' % CHILD_SCRIPT_RESULT,
'parent_script * %s *' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_dist_subprocess_collocated(testdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=2*popen',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script * %s *' % CHILD_SCRIPT_RESULT,
'parent_script * %s *' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
@pytest.mark.xfail(sys.platform == 'win32' and sys.version_info[:2] < (3, 4),
reason='path rewrite in cov_core is somehow broken')
def test_dist_subprocess_not_collocated(testdir, tmpdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
child_script = scripts.dirpath().join('child_script.py')
dir1 = tmpdir.mkdir('dir1')
dir2 = tmpdir.mkdir('dir2')
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=popen//chdir=%s' % dir1,
'--tx=popen//chdir=%s' % dir2,
'--rsyncdir=%s' % child_script,
'--rsyncdir=%s' % parent_script,
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script * %s *' % CHILD_SCRIPT_RESULT,
'parent_script * %s *' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_empty_report(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=non_existent_module',
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'*10 passed*'
])
assert result.ret == 0
matching_lines = [line for line in result.outlines if '%' in line]
assert not matching_lines
@pytest.mark.xfail(reason='This tests expects a Python installation without '
'pytest-cov installed. Maybe we can simulate this '
'with a virtualenv')
def test_dist_missing_data(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=popen//python=%s' % sys.executable,
script)
result.stdout.fnmatch_lines([
'*- coverage: failed slaves -*'
])
assert result.ret == 0
def test_funcarg(testdir):
script = testdir.makepyfile(SCRIPT_FUNCARG)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_funcarg * 3 * 100%*',
'*1 passed*'
])
assert result.ret == 0
def test_funcarg_not_active(testdir):
script = testdir.makepyfile(SCRIPT_FUNCARG_NOT_ACTIVE)
result = testdir.runpytest('-v',
script)
result.stdout.fnmatch_lines([
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.xfail(sys.platform == 'win32' and sys.version_info[0] < 3,
reason='multiprocessing coverage does not work '
'right now on Windows with Python 2')
def test_multiprocessing_subprocess(testdir):
py.test.importorskip('multiprocessing.util')
script = testdir.makepyfile(MULTIPROCESSING_SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_subprocess * 8 * 100%*',
'*1 passed*'
])
assert result.ret == 0
|
|
# -*- coding: utf-8 -*-
import sys
import os
import subprocess
import urllib
#import zlib
#import bz2
import zipfile
#import tarfile
from optparse import OptionParser
ENV = None
OPTS = None
PATH = None
TARGETS = None
optparser = None
if not hasattr(str, 'format'):
# Dirty implementation of str.format()
# Ignores format_spec
import __builtin__
import re
re_format = re.compile('((^{)|([^{]{))(((([a-zA-Z_]\w*)|(\d*))(([.][^.[]+?)|([[][^.[]+?[]]))*?))([!].)?(:[^}]*)?}')
re_format_field = re.compile('([.][^.[]+)|([[][^.[]+[]]).*')
class str(__builtin__.str):
def format(self, *args, **kwargs):
idx = [0]
def sub(m):
start_char = m.group(0)[0]
if start_char == '{':
start_char = ''
field = m.group(8)
conversion = m.group(12)
format_spec = m.group(13)
if field is None:
field = m.group(7)
v = kwargs[field]
else:
if field != '':
i = int(field)
else:
i = idx[0]
idx[0] += 1
v = args[i]
s = m.group(4)
s = s[len(field):]
while s:
m = re_format_field.match(s)
name = m.group(1)
if name is not None:
v = getattr(v, name[1:])
s = s[len(name):]
else:
i = m.group(2)[1:-1]
s = s[len(i) + 2:]
try:
i = int(i)
except:
pass
v = v[i]
if conversion is None or conversion == 's':
v = str(v)
elif conversion == 'r':
v = repr(v)
else:
raise ValueError("Unknown conversion character '%s'" % conversion)
return start_char + v
s = re_format.sub(sub, self)
return s.replace('{{', '{').replace('}}', '}')
__builtin__.str = str
PACKAGE = {
'title': 'example',
'desc': 'default description',
}
INDEX = {
'example': r'''
<hr/>
<!-- start {name} {{example.{name}._comment_end}}
<h2 class='title'>{{example.{name}.title}}</h2>
<h3 class='desc'>{{example.{name}.desc}}</h3>
<ul class='demos'>
{{example.{name}.demos}}
</ul>
{{example.{name}._comment_start}} -->
<h4 class='source'><a href="{name}/">source directory</a> ({name})<h4>
''',
'demo': r'''
<li class='demo'>(demo) <a href="{name}/output/{target}.html">{target}</a></li>
''',
}
class _e(object):
_ident = 'example'
_special = {
'_comment_start': '<!--',
'_comment_end': '-->',
}
def __init__(self, examples=None, **kwds):
if examples is None:
self._examples = kwds
else:
self._examples = examples
self._examples.update(kwds)
self._path = [self._ident]
def __repr__(self):
return '_e(%s)' % repr(self.__str__())
def __str__(self):
try:
curr = self._examples
for frag in self._path[1:]:
curr = curr[frag]
except KeyError:
if frag in self._special:
curr = self._special[frag]
else:
curr = '{%s}' % '.'.join(self._path)
self._path[1:] = []
return curr
def __getattr__(self, name):
self._path.append(name)
return self
def _find_python():
if sys.version_info[0] == 2 and sys.executable and os.path.isfile(sys.executable):
return sys.executable
for python in ('python2', 'python2.7', 'python2.6'):
try:
subprocess.call([python, '-c', '"raise SystemExit"'])
return python
except OSError:
pass
return 'python'
def _list_examples():
examples = [
example
for example in os.listdir(ENV['DIR_EXAMPLES'])
if os.path.isfile(os.path.join(ENV['DIR_EXAMPLES'], example, '__main__.py'))
and not example.startswith('_')
]
examples.sort()
return examples
def _process_pyjamas(root):
lim = 3
while lim > 0:
root = os.path.join(root, '..')
boot = os.path.join(root, 'bootstrap.py')
if os.path.isfile(boot):
root = os.path.abspath(root)
boot = os.path.abspath(boot)
if sys.platform == 'win32':
pyjsbuild = os.path.join(root, 'bin', 'pyjsbuild.py')
else:
pyjsbuild = os.path.join(root, 'bin', 'pyjsbuild')
break
lim = lim - 1
if lim == 0:
raise RuntimeError('Unable to locate pyjamas root.')
# Bootstrap on test failure; attempts to fix a couple issues at once
null = open(os.devnull, 'wb')
try:
if subprocess.call(['python', pyjsbuild], stdout=null, stderr=subprocess.STDOUT) > 0:
raise OSError
except OSError:
subprocess.call(['python', boot], stdout=null, stderr=subprocess.STDOUT)
return {
'DIR_PYJAMAS': root,
'BASE_EXAMPLES': os.path.join(root, 'examples'),
'BIN_PYJSBUILD': pyjsbuild,
}
def _process_environ():
return dict([
(k[5:], v[:])
for k, v in os.environ.items()
if k.startswith('PYJS')
])
def _process_args(args):
return {'ARG_PYJSBUILD': args or ['-O']}
def _process_path(targets, target):
path = PATH
if isinstance(targets, dict):
if 'path' in targets[target]:
path = targets[target]['path']
if not path.startswith(os.sep):
path = os.path.join(PATH, path)
return path
def get_optparser(**kwargs):
global optparser
if optparser is None:
optparser = OptionParser(**kwargs)
add_option = optparser.add_option
add_option(
'--download',
dest='download',
action='store_true',
default=False,
help='permit downloads of files or libraries',
)
add_option(
'--misc',
dest='misc',
action='store_true',
default=False,
help='build miscellaneous examples',
)
add_option(
'--deprecated',
dest='deprecated',
action='store_true',
default=False,
help='build deprecated examples',
)
return optparser
def init(path):
global ENV, PATH, OPTS
optparser = get_optparser()
opts, args = optparser.parse_args()
OPTS=opts
PATH = path
ENV = {}
ENV.update(_process_environ())
ENV.update(_process_args(args))
if 'BIN_PYTHON' not in ENV:
ENV['BIN_PYTHON'] = _find_python()
if 'DIR_PYJAMAS' not in ENV:
ENV.update(_process_pyjamas(path))
if 'DIR_EXAMPLES' not in ENV:
ENV['DIR_EXAMPLES'] = os.path.dirname(path)
ENV['NAME_EXAMPLE'] = os.path.basename(path)
ENV['DIR_EXAMPLE'] = path
def download(downloads):
for download in downloads:
url = download['url']
dst = download['dst']
if not os.path.exists(dst):
if not OPTS.download:
raise TypeError('Downloads not permitted. Use --download option to permit')
urllib.urlretrieve(url, dst)
if download.get('unzip'):
path = download.get('path', os.path.dirname(dst))
z = zipfile.ZipFile(dst)
z.extractall(path)
def setup(targets):
for target in targets:
downloads = None
path = _process_path(targets, target)
if isinstance(targets, dict):
downloads = targets[target].get('downloads')
if not os.path.isfile(os.path.join(path, target)):
raise TypeError('Target `%s` does not exist.' % target)
if downloads:
download(downloads)
global TARGETS
TARGETS = targets
def translate():
for target in TARGETS:
args = [target]
if isinstance(TARGETS, dict):
opts = TARGETS[target].get('options', [])
args += TARGETS[target].get('additional_args', [])
else:
opts = []
cmd = [ENV['BIN_PYTHON'], ENV['BIN_PYJSBUILD']] + ENV['ARG_PYJSBUILD'] + opts + args
if not [ENV['ARG_PYJSBUILD']] + opts + args:
raise RuntimeError(cmd)
path = _process_path(TARGETS, target)
e = subprocess.Popen(cmd, cwd=path)
ret = e.wait()
def install(package=None, **packages):
if package is not None:
PACKAGE.update(package)
name = ENV['NAME_EXAMPLE']
demos = ''.join([
str(INDEX['demo']).format(name=name, target=target[:-3])
for target in TARGETS
])
example = {
'name': name,
'title': PACKAGE['title'],
'desc': PACKAGE['desc'],
'demos': demos,
}
if 'OPT_PROXYINSTALL' in ENV:
sys.stdout.write(repr(example))
sys.stdout.flush()
return
packages[name] = example
if not packages:
raise TypeError('Nothing to install.')
index = os.path.join(ENV['DIR_EXAMPLES'], 'index.html')
try:
if os.path.isfile(index):
idx_out_fd = open(index, 'r+')
index_orig = tpl = idx_out_fd.read()
idx_out_fd.seek(0)
idx_out_fd.truncate()
else:
idx_out_fd = open(index, 'w')
index_orig = tpl = None
if tpl is None or '<style>' in tpl:
examples = ''.join([
str(INDEX['example']).format(name=example)
for example in _list_examples()
])
index_tpl = os.path.join(ENV['BASE_EXAMPLES'], '_examples', 'template', 'index.html.tpl')
idx_in_fd = open(index_tpl, 'r')
tpl = str(idx_in_fd.read()).format(examples)
index_new = str(tpl).format(example=_e(packages))
except:
if index_orig is None:
idx_out_fd.close()
os.unlink(index)
else:
idx_out_fd.write(index_orig)
raise
else:
idx_out_fd.write(index_new)
finally:
idx_out_fd.close()
|
|
# Copyright (C) 2014 VA Linux Systems Japan K.K.
# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
# Copyright (C) 2014 Fumihiko Kakuma <kakuma at valinux co jp>
# All Rights Reserved.
#
# Based on openvswitch agent.
#
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import netaddr
from oslo.config import cfg
from oslo import messaging
from ryu.app.ofctl import api as ryu_api
from ryu.base import app_manager
from ryu.controller import handler
from ryu.controller import ofp_event
from ryu.lib import hub
from ryu.ofproto import ofproto_v1_3 as ryu_ofp13
from neutron.agent import l2population_rpc
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import constants as n_const
from neutron.common import log
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron import context
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.common import constants as p_const
from neutron.plugins.openvswitch.common import constants
from networking_ofagent.i18n import _LE, _LI, _LW
from networking_ofagent.plugins.ofagent.agent import arp_lib
from networking_ofagent.plugins.ofagent.agent import constants as ofa_const
from networking_ofagent.plugins.ofagent.agent import flows
from networking_ofagent.plugins.ofagent.agent import ports
from networking_ofagent.plugins.ofagent.agent import tables
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('AGENT',
'networking_ofagent.plugins.ofagent.common.config')
# A class to represent a VIF (i.e., a port that has 'iface-id' and 'vif-mac'
# attributes set).
class LocalVLANMapping(object):
def __init__(self, vlan, network_type, physical_network, segmentation_id,
vif_ports=None):
assert(isinstance(vlan, (int, long)))
if vif_ports is None:
vif_ports = {}
self.vlan = vlan
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
self.vif_ports = vif_ports
# set of tunnel ports on which packets should be flooded
self.tun_ofports = set()
def __str__(self):
return ("lv-id = %s type = %s phys-net = %s phys-id = %s" %
(self.vlan, self.network_type, self.physical_network,
self.segmentation_id))
class Bridge(flows.OFAgentIntegrationBridge, ovs_lib.OVSBridge):
def __init__(self, br_name, root_helper, ryuapp):
super(Bridge, self).__init__(br_name, root_helper)
self.datapath_id = None
self.datapath = None
self.ryuapp = ryuapp
self.set_app(ryuapp)
def find_datapath_id(self):
self.datapath_id = self.get_datapath_id()
def get_datapath(self, retry_max=cfg.CONF.AGENT.get_datapath_retry_times):
retry = 0
while self.datapath is None:
self.datapath = ryu_api.get_datapath(self.ryuapp,
int(self.datapath_id, 16))
retry += 1
if retry >= retry_max:
LOG.error(_LE('Agent terminated!: Failed to get a datapath.'))
raise SystemExit(1)
time.sleep(1)
self.set_dp(self.datapath)
def setup_ofp(self, controller_names=None,
protocols='OpenFlow13',
retry_max=cfg.CONF.AGENT.get_datapath_retry_times):
if not controller_names:
host = cfg.CONF.ofp_listen_host
if not host:
# 127.0.0.1 is a default for agent style of controller
host = '127.0.0.1'
controller_names = ["tcp:%s:%d" % (host,
cfg.CONF.ofp_tcp_listen_port)]
try:
self.set_protocols(protocols)
self.set_controller(controller_names)
except RuntimeError:
LOG.exception(_LE("Agent terminated"))
raise SystemExit(1)
self.find_datapath_id()
self.get_datapath(retry_max)
class OFAPluginApi(agent_rpc.PluginApi,
sg_rpc.SecurityGroupServerRpcApiMixin):
pass
class OFASecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin):
def __init__(self, context, plugin_rpc, root_helper):
self.context = context
self.plugin_rpc = plugin_rpc
self.root_helper = root_helper
self.init_firewall(defer_refresh_firewall=True)
class OFANeutronAgentRyuApp(app_manager.RyuApp):
OFP_VERSIONS = [ryu_ofp13.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(OFANeutronAgentRyuApp, self).__init__(*args, **kwargs)
self.arplib = arp_lib.ArpLib(self)
def start(self):
super(OFANeutronAgentRyuApp, self).start()
return hub.spawn(self._agent_main, self)
def _agent_main(self, ryuapp):
cfg.CONF.register_opts(ip_lib.OPTS)
n_utils.log_opt_values(LOG)
try:
agent_config = create_agent_config_map(cfg.CONF)
except ValueError:
LOG.exception(_LE("Agent failed to create agent config map"))
raise SystemExit(1)
agent = OFANeutronAgent(ryuapp, **agent_config)
self.arplib.set_bridge(agent.int_br)
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
agent.daemon_loop()
@handler.set_ev_cls(ofp_event.EventOFPPacketIn, handler.MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
self.arplib.packet_in_handler(ev)
def add_arp_table_entry(self, network, ip, mac):
self.arplib.add_arp_table_entry(network, ip, mac)
def del_arp_table_entry(self, network, ip):
self.arplib.del_arp_table_entry(network, ip)
class OFANeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
l2population_rpc.L2populationRpcCallBackTunnelMixin):
"""A agent for OpenFlow Agent ML2 mechanism driver.
OFANeutronAgent is a OpenFlow Agent agent for a ML2 plugin.
This is as a ryu application thread.
This has the following features.
- An agent acts as an OpenFlow controller on each compute nodes.
- OpenFlow 1.3 (vendor agnostic unlike OVS extensions).
- l2-population is mandatory.
"""
# history
# 1.0 Initial version
# 1.1 Support Security Group RPC
target = messaging.Target(version='1.1')
def __init__(self, ryuapp, integ_br, local_ip,
bridge_mappings, interface_mappings, root_helper,
polling_interval, tunnel_types=None):
"""Constructor.
:param ryuapp: object of the ryu app.
:param integ_br: name of the integration bridge.
:param local_ip: local IP address of this hypervisor.
:param bridge_mappings: mappings from physical network name to bridge.
(deprecated)
:param interface_mappings: mappings from physical network name to
interface.
:param root_helper: utility to use when running shell cmds.
:param polling_interval: interval (secs) to poll DB.
:param tunnel_types: A list of tunnel types to enable support for in
the agent. If set, will automatically set enable_tunneling to
True.
"""
super(OFANeutronAgent, self).__init__()
self.ryuapp = ryuapp
self.root_helper = root_helper
# TODO(yamamoto): Remove this VLAN leftover
self.available_local_vlans = set(xrange(ofa_const.LOCAL_VLAN_MIN,
ofa_const.LOCAL_VLAN_MAX))
self.tunnel_types = tunnel_types or []
l2pop_network_types = list(set(self.tunnel_types +
[p_const.TYPE_VLAN,
p_const.TYPE_FLAT,
p_const.TYPE_LOCAL]))
self.agent_state = {
'binary': 'neutron-ofa-agent',
'host': cfg.CONF.host,
'topic': n_const.L2_AGENT_TOPIC,
'configurations': {
'bridge_mappings': bridge_mappings,
'interface_mappings': interface_mappings,
'tunnel_types': self.tunnel_types,
'tunneling_ip': local_ip,
'l2_population': True,
'l2pop_network_types': l2pop_network_types},
'agent_type': n_const.AGENT_TYPE_OFA,
'start_flag': True}
# Keep track of int_br's device count for use by _report_state()
self.int_br_device_count = 0
self.int_br = Bridge(integ_br, self.root_helper, self.ryuapp)
# Stores port update notifications for processing in main loop
self.updated_ports = set()
self.setup_rpc()
self.setup_integration_br()
self.int_ofports = {}
self.setup_physical_interfaces(interface_mappings)
self.local_vlan_map = {}
self.tun_ofports = {}
for t in tables.TUNNEL_TYPES:
self.tun_ofports[t] = {}
self.polling_interval = polling_interval
self.enable_tunneling = bool(self.tunnel_types)
self.local_ip = local_ip
self.tunnel_count = 0
self.vxlan_udp_port = cfg.CONF.AGENT.vxlan_udp_port
self.dont_fragment = cfg.CONF.AGENT.dont_fragment
# Security group agent support
self.sg_agent = OFASecurityGroupAgent(self.context,
self.plugin_rpc,
self.root_helper)
# Initialize iteration counter
self.iter_num = 0
def _report_state(self):
# How many devices are likely used by a VM
self.agent_state.get('configurations')['devices'] = (
self.int_br_device_count)
try:
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def _create_tunnel_port_name(self, tunnel_type, ip_address):
try:
ip_hex = '%08x' % netaddr.IPAddress(ip_address, version=4)
return '%s-%s' % (tunnel_type, ip_hex)
except Exception:
LOG.warn(_LW("Unable to create tunnel port. "
"Invalid remote IP: %s"), ip_address)
def setup_rpc(self):
mac = self.int_br.get_local_port_mac()
self.agent_id = '%s%s' % ('ovs', (mac.replace(":", "")))
self.topic = topics.AGENT
self.plugin_rpc = OFAPluginApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
self.context = context.get_admin_context_without_session()
# Handle updates from service
self.endpoints = [self]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.SECURITY_GROUP, topics.UPDATE],
[topics.L2POPULATION, topics.UPDATE, cfg.CONF.host]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _get_ports(self, br):
"""Generate ports.Port instances for the given bridge."""
datapath = br.datapath
ofpp = datapath.ofproto_parser
msg = ofpp.OFPPortDescStatsRequest(datapath=datapath)
descs = ryu_api.send_msg(app=self.ryuapp, msg=msg,
reply_cls=ofpp.OFPPortDescStatsReply,
reply_multi=True)
for d in descs:
for p in d.body:
yield ports.Port.from_ofp_port(p)
def _get_ofport_names(self, br):
"""Return a set of OpenFlow port names for the given bridge."""
return set(p.normalized_port_name() for p in
self._get_ports(br) if p.is_neutron_port())
def get_net_uuid(self, vif_id):
for network_id, vlan_mapping in self.local_vlan_map.iteritems():
if vif_id in vlan_mapping.vif_ports:
return network_id
@log.log
def port_update(self, context, **kwargs):
port = kwargs.get('port')
# Put the port identifier in the updated_ports set.
# Even if full port details might be provided to this call,
# they are not used since there is no guarantee the notifications
# are processed in the same order as the relevant API requests
self.updated_ports.add(ports.get_normalized_port_name(port['id']))
@log.log
def fdb_add(self, context, fdb_entries):
for lvm, agent_ports in self.get_agent_ports(fdb_entries,
self.local_vlan_map):
if lvm.network_type in self.tunnel_types:
local = agent_ports.pop(self.local_ip, None)
if local:
self._fdb_add_arp(lvm, {self.local_ip: local})
if len(agent_ports):
self.fdb_add_tun(context, self.int_br, lvm, agent_ports,
self.tun_ofports)
else:
self._fdb_add_arp(lvm, agent_ports)
@log.log
def fdb_remove(self, context, fdb_entries):
for lvm, agent_ports in self.get_agent_ports(fdb_entries,
self.local_vlan_map):
if lvm.network_type in self.tunnel_types:
local = agent_ports.pop(self.local_ip, None)
if local:
self._fdb_remove_arp(lvm, {self.local_ip: local})
if len(agent_ports):
self.fdb_remove_tun(context, self.int_br, lvm, agent_ports,
self.tun_ofports)
else:
self._fdb_remove_arp(lvm, agent_ports)
@log.log
def _fdb_add_arp(self, lvm, agent_ports):
for _remote_ip, port_infos in agent_ports.items():
for port_info in port_infos:
if port_info == n_const.FLOODING_ENTRY:
continue
self.ryuapp.add_arp_table_entry(lvm.vlan,
port_info.ip_address,
port_info.mac_address)
@log.log
def _fdb_remove_arp(self, lvm, agent_ports):
for _remote_ip, port_infos in agent_ports.items():
for port_info in port_infos:
if port_info == n_const.FLOODING_ENTRY:
continue
self.ryuapp.del_arp_table_entry(lvm.vlan, port_info.ip_address)
def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
if port_info == n_const.FLOODING_ENTRY:
lvm.tun_ofports.add(ofport)
br.install_tunnel_output(
tables.TUNNEL_FLOOD[lvm.network_type],
lvm.vlan, lvm.segmentation_id,
lvm.tun_ofports, goto_next=True)
else:
self.ryuapp.add_arp_table_entry(
lvm.vlan,
port_info.ip_address,
port_info.mac_address)
br.install_tunnel_output(
tables.TUNNEL_OUT,
lvm.vlan, lvm.segmentation_id,
set([ofport]), goto_next=False, eth_dst=port_info.mac_address)
def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
if port_info == n_const.FLOODING_ENTRY:
lvm.tun_ofports.remove(ofport)
if len(lvm.tun_ofports) > 0:
br.install_tunnel_output(
tables.TUNNEL_FLOOD[lvm.network_type],
lvm.vlan, lvm.segmentation_id,
lvm.tun_ofports, goto_next=True)
else:
br.delete_tunnel_output(
tables.TUNNEL_FLOOD[lvm.network_type],
lvm.vlan)
else:
self.ryuapp.del_arp_table_entry(lvm.vlan, port_info.ip_address)
br.delete_tunnel_output(tables.TUNNEL_OUT,
lvm.vlan, eth_dst=port_info.mac_address)
def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address,
ip_address):
if action == 'add':
self.ryuapp.add_arp_table_entry(local_vid, ip_address, mac_address)
elif action == 'remove':
self.ryuapp.del_arp_table_entry(local_vid, ip_address)
@log.log
def _fdb_chg_ip(self, context, fdb_entries):
self.fdb_chg_ip_tun(context, self.int_br, fdb_entries, self.local_ip,
self.local_vlan_map)
def provision_local_vlan(self, net_uuid, network_type, physical_network,
segmentation_id):
"""Provisions a local VLAN.
:param net_uuid: the uuid of the network associated with this vlan.
:param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat',
'local')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
"""
if not self.available_local_vlans:
LOG.error(_LE("No local VLAN available for net-id=%s"), net_uuid)
return
lvid = self.available_local_vlans.pop()
LOG.info(_LI("Assigning %(vlan_id)s as local vlan for "
"net-id=%(net_uuid)s"),
{'vlan_id': lvid, 'net_uuid': net_uuid})
self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid, network_type,
physical_network,
segmentation_id)
if network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
self.int_br.provision_tenant_tunnel(network_type, lvid,
segmentation_id)
else:
LOG.error(_LE("Cannot provision %(network_type)s network for "
"net-id=%(net_uuid)s - tunneling disabled"),
{'network_type': network_type,
'net_uuid': net_uuid})
elif network_type in [p_const.TYPE_VLAN, p_const.TYPE_FLAT]:
if physical_network in self.int_ofports:
phys_port = self.int_ofports[physical_network]
self.int_br.provision_tenant_physnet(network_type, lvid,
segmentation_id,
phys_port)
else:
LOG.error(_LE("Cannot provision %(network_type)s network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s"),
{'network_type': network_type,
'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == p_const.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error(_LE("Cannot provision unknown network type "
"%(network_type)s for net-id=%(net_uuid)s"),
{'network_type': network_type,
'net_uuid': net_uuid})
def reclaim_local_vlan(self, net_uuid):
"""Reclaim a local VLAN.
:param net_uuid: the network uuid associated with this vlan.
:param lvm: a LocalVLANMapping object that tracks (vlan, lsw_id,
vif_ids) mapping.
"""
lvm = self.local_vlan_map.pop(net_uuid, None)
if lvm is None:
LOG.debug("Network %s not used on agent.", net_uuid)
return
LOG.info(_LI("Reclaiming vlan = %(vlan_id)s from "
"net-id = %(net_uuid)s"),
{'vlan_id': lvm.vlan,
'net_uuid': net_uuid})
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
self.int_br.reclaim_tenant_tunnel(lvm.network_type, lvm.vlan,
lvm.segmentation_id)
# Try to remove tunnel ports if not used by other networks
for ofport in lvm.tun_ofports:
self.cleanup_tunnel_port(self.int_br, ofport,
lvm.network_type)
elif lvm.network_type in [p_const.TYPE_FLAT, p_const.TYPE_VLAN]:
phys_port = self.int_ofports[lvm.physical_network]
self.int_br.reclaim_tenant_physnet(lvm.network_type, lvm.vlan,
lvm.segmentation_id, phys_port)
elif lvm.network_type == p_const.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error(_LE("Cannot reclaim unknown network type "
"%(network_type)s for net-id=%(net_uuid)s"),
{'network_type': lvm.network_type,
'net_uuid': net_uuid})
self.available_local_vlans.add(lvm.vlan)
def port_bound(self, port, net_uuid,
network_type, physical_network, segmentation_id):
"""Bind port to net_uuid/lsw_id and install flow for inbound traffic
to vm.
:param port: a ports.Port object.
:param net_uuid: the net_uuid this port is to be associated with.
:param network_type: the network type ('gre', 'vlan', 'flat', 'local')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
"""
if net_uuid not in self.local_vlan_map:
self.provision_local_vlan(net_uuid, network_type,
physical_network, segmentation_id)
lvm = self.local_vlan_map[net_uuid]
lvm.vif_ports[port.normalized_port_name()] = port
self.int_br.check_in_port_add_local_port(lvm.vlan, port.ofport)
# if any of vif mac is unknown, flood unicasts as well
flood_unicast = any(map(lambda x: x.vif_mac is None,
lvm.vif_ports.values()))
ofports = (vp.ofport for vp in lvm.vif_ports.values())
self.int_br.local_flood_update(lvm.vlan, ofports, flood_unicast)
if port.vif_mac is None:
return
self.int_br.local_out_add_port(lvm.vlan, port.ofport, port.vif_mac)
def port_unbound(self, vif_id, net_uuid=None):
"""Unbind port.
Removes corresponding local vlan mapping object if this is its last
VIF.
:param vif_id: the id of the vif
:param net_uuid: the net_uuid this port is associated with.
"""
net_uuid = net_uuid or self.get_net_uuid(vif_id)
if not self.local_vlan_map.get(net_uuid):
LOG.info(_LI('port_unbound() net_uuid %s not in local_vlan_map'),
net_uuid)
return
lvm = self.local_vlan_map[net_uuid]
port = lvm.vif_ports.pop(vif_id, None)
self.int_br.check_in_port_delete_port(port.ofport)
if not lvm.vif_ports:
self.reclaim_local_vlan(net_uuid)
if port.vif_mac is None:
return
self.int_br.local_out_delete_port(lvm.vlan, port.vif_mac)
def port_dead(self, port):
"""Once a port has no binding, put it on the "dead vlan".
:param port: a ovs_lib.VifPort object.
"""
pass
def setup_integration_br(self):
"""Setup the integration bridge.
"""
br = self.int_br
br.setup_ofp()
br.setup_default_table()
def setup_physical_interfaces(self, interface_mappings):
"""Setup the physical network interfaces.
Link physical network interfaces to the integration bridge.
:param interface_mappings: map physical network names to
interface names.
"""
for physical_network, interface_name in interface_mappings.iteritems():
ofport = int(self.int_br.add_port(interface_name))
self.int_ofports[physical_network] = ofport
def scan_ports(self, registered_ports, updated_ports=None):
cur_ports = self._get_ofport_names(self.int_br)
self.int_br_device_count = len(cur_ports)
port_info = {'current': cur_ports}
if updated_ports is None:
updated_ports = set()
if updated_ports:
# Some updated ports might have been removed in the
# meanwhile, and therefore should not be processed.
# In this case the updated port won't be found among
# current ports.
updated_ports &= cur_ports
if updated_ports:
port_info['updated'] = updated_ports
if cur_ports == registered_ports:
# No added or removed ports to set, just return here
return port_info
port_info['added'] = cur_ports - registered_ports
# Remove all the known ports not found on the integration bridge
port_info['removed'] = registered_ports - cur_ports
return port_info
def treat_vif_port(self, vif_port, port_id, network_id, network_type,
physical_network, segmentation_id, admin_state_up):
if vif_port:
# When this function is called for a port, the port should have
# an OVS ofport configured, as only these ports were considered
# for being treated. If that does not happen, it is a potential
# error condition of which operators should be aware
if not vif_port.ofport:
LOG.warn(_LW("VIF port: %s has no ofport configured, "
"and might not be able to transmit"),
vif_port.port_name)
if admin_state_up:
self.port_bound(vif_port, network_id, network_type,
physical_network, segmentation_id)
else:
self.port_dead(vif_port)
else:
LOG.debug("No VIF port for port %s defined on agent.", port_id)
def _setup_tunnel_port(self, br, port_name, remote_ip, tunnel_type):
ofport = br.add_tunnel_port(port_name,
remote_ip,
self.local_ip,
tunnel_type,
self.vxlan_udp_port,
self.dont_fragment)
if ofport == ovs_lib.INVALID_OFPORT:
LOG.error(_LE("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': tunnel_type, 'ip': remote_ip})
return 0
ofport = int(ofport)
self.tun_ofports[tunnel_type][remote_ip] = ofport
br.check_in_port_add_tunnel_port(tunnel_type, ofport)
return ofport
def setup_tunnel_port(self, br, remote_ip, network_type):
port_name = self._create_tunnel_port_name(network_type, remote_ip)
if not port_name:
return 0
ofport = self._setup_tunnel_port(br,
port_name,
remote_ip,
network_type)
return ofport
def _remove_tunnel_port(self, br, tun_ofport, tunnel_type):
for remote_ip, ofport in self.tun_ofports[tunnel_type].items():
if ofport == tun_ofport:
br.check_in_port_delete_port(ofport)
port_name = self._create_tunnel_port_name(tunnel_type,
remote_ip)
if port_name:
br.delete_port(port_name)
self.tun_ofports[tunnel_type].pop(remote_ip, None)
def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type):
# Check if this tunnel port is still used
for lvm in self.local_vlan_map.values():
if tun_ofport in lvm.tun_ofports:
break
# If not, remove it
else:
self._remove_tunnel_port(br, tun_ofport, tunnel_type)
def treat_devices_added_or_updated(self, devices):
resync = False
all_ports = dict((p.normalized_port_name(), p) for p in
self._get_ports(self.int_br) if p.is_neutron_port())
for device in devices:
LOG.debug("Processing port %s", device)
if device not in all_ports:
# The port has disappeared and should not be processed
# There is no need to put the port DOWN in the plugin as
# it never went up in the first place
LOG.info(_LI("Port %s was not found on the integration bridge "
"and will therefore not be processed"), device)
continue
port = all_ports[device]
try:
details = self.plugin_rpc.get_device_details(self.context,
device,
self.agent_id)
except Exception as e:
LOG.debug("Unable to get port details for %(device)s: %(e)s",
{'device': device, 'e': e})
resync = True
continue
if 'port_id' in details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': details})
port.vif_mac = details.get('mac_address')
self.treat_vif_port(port, details['port_id'],
details['network_id'],
details['network_type'],
details['physical_network'],
details['segmentation_id'],
details['admin_state_up'])
# update plugin about port status
if details.get('admin_state_up'):
LOG.debug("Setting status for %s to UP", device)
self.plugin_rpc.update_device_up(
self.context, device, self.agent_id, cfg.CONF.host)
else:
LOG.debug("Setting status for %s to DOWN", device)
self.plugin_rpc.update_device_down(
self.context, device, self.agent_id, cfg.CONF.host)
LOG.info(_LI("Configuration for device %s completed."), device)
else:
LOG.warn(_LW("Device %s not defined on plugin"), device)
if (port and port.ofport != -1):
self.port_dead(port)
return resync
def treat_devices_removed(self, devices):
resync = False
self.sg_agent.remove_devices_filter(devices)
for device in devices:
LOG.info(_LI("Attachment %s removed"), device)
try:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug("port_removed failed for %(device)s: %(e)s",
{'device': device, 'e': e})
resync = True
continue
self.port_unbound(device)
return resync
def process_network_ports(self, port_info):
resync_add = False
resync_removed = False
# If there is an exception while processing security groups ports
# will not be wired anyway, and a resync will be triggered
self.sg_agent.setup_port_filters(port_info.get('added', set()),
port_info.get('updated', set()))
# VIF wiring needs to be performed always for 'new' devices.
# For updated ports, re-wiring is not needed in most cases, but needs
# to be performed anyway when the admin state of a device is changed.
# A device might be both in the 'added' and 'updated'
# list at the same time; avoid processing it twice.
devices_added_updated = (port_info.get('added', set()) |
port_info.get('updated', set()))
if devices_added_updated:
start = time.time()
resync_add = self.treat_devices_added_or_updated(
devices_added_updated)
LOG.debug("process_network_ports - iteration:%(iter_num)d - "
"treat_devices_added_or_updated completed "
"in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
if 'removed' in port_info:
start = time.time()
resync_removed = self.treat_devices_removed(port_info['removed'])
LOG.debug("process_network_ports - iteration:%(iter_num)d - "
"treat_devices_removed completed in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# If one of the above opertaions fails => resync with plugin
return (resync_add | resync_removed)
def tunnel_sync(self):
resync = False
try:
for tunnel_type in self.tunnel_types:
self.plugin_rpc.tunnel_sync(self.context,
self.local_ip,
tunnel_type)
except Exception as e:
LOG.debug("Unable to sync tunnel IP %(local_ip)s: %(e)s",
{'local_ip': self.local_ip, 'e': e})
resync = True
return resync
def _port_info_has_changes(self, port_info):
return (port_info.get('added') or
port_info.get('removed') or
port_info.get('updated'))
def daemon_loop(self):
# TODO(yamamoto):
# It might be better to monitor port status async messages
sync = True
ports = set()
tunnel_sync = True
while True:
start = time.time()
port_stats = {'regular': {'added': 0, 'updated': 0, 'removed': 0}}
LOG.debug("Agent daemon_loop - iteration:%d started",
self.iter_num)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
ports.clear()
sync = False
# Notify the plugin of tunnel IP
if self.enable_tunneling and tunnel_sync:
LOG.info(_LI("Agent tunnel out of sync with plugin!"))
try:
tunnel_sync = self.tunnel_sync()
except Exception:
LOG.exception(_LE("Error while synchronizing tunnels"))
tunnel_sync = True
LOG.debug("Agent daemon_loop - iteration:%(iter_num)d - "
"starting polling. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
try:
# Save updated ports dict to perform rollback in
# case resync would be needed, and then clear
# self.updated_ports. As the greenthread should not yield
# between these two statements, this will be thread-safe
updated_ports_copy = self.updated_ports
self.updated_ports = set()
port_info = self.scan_ports(ports, updated_ports_copy)
ports = port_info['current']
LOG.debug("Agent daemon_loop - iteration:%(iter_num)d - "
"port information retrieved. "
"Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# Secure and wire/unwire VIFs and update their status
# on Neutron server
if (self._port_info_has_changes(port_info) or
self.sg_agent.firewall_refresh_needed()):
LOG.debug("Starting to process devices in:%s",
port_info)
# If treat devices fails - must resync with plugin
sync = self.process_network_ports(port_info)
LOG.debug("Agent daemon_loop - "
"iteration:%(iter_num)d - "
"ports processed. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
port_stats['regular']['added'] = (
len(port_info.get('added', [])))
port_stats['regular']['updated'] = (
len(port_info.get('updated', [])))
port_stats['regular']['removed'] = (
len(port_info.get('removed', [])))
except Exception:
LOG.exception(_LE("Error while processing VIF ports"))
# Put the ports back in self.updated_port
self.updated_ports |= updated_ports_copy
sync = True
# sleep till end of polling interval
elapsed = (time.time() - start)
LOG.debug("Agent daemon_loop - iteration:%(iter_num)d "
"completed. Processed ports statistics:"
"%(port_stats)s. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'port_stats': port_stats,
'elapsed': elapsed})
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
def create_agent_config_map(config):
"""Create a map of agent config parameters.
:param config: an instance of cfg.CONF
:returns: a map of agent configuration parameters
"""
try:
bridge_mappings = n_utils.parse_mappings(config.OVS.bridge_mappings)
except ValueError as e:
raise ValueError(_("Parsing bridge_mappings failed: %s.") % e)
try:
interface_mappings = n_utils.parse_mappings(
config.AGENT.physical_interface_mappings)
except ValueError as e:
raise ValueError(_("Parsing physical_interface_mappings failed: %s.")
% e)
kwargs = dict(
integ_br=config.OVS.integration_bridge,
local_ip=config.OVS.local_ip,
interface_mappings=interface_mappings,
bridge_mappings=bridge_mappings,
root_helper=config.AGENT.root_helper,
polling_interval=config.AGENT.polling_interval,
tunnel_types=config.AGENT.tunnel_types,
)
# Verify the tunnel_types specified are valid
for tun in kwargs['tunnel_types']:
if tun not in constants.TUNNEL_NETWORK_TYPES:
msg = _('Invalid tunnel type specificed: %s'), tun
raise ValueError(msg)
if not kwargs['local_ip']:
msg = _('Tunneling cannot be enabled without a valid local_ip.')
raise ValueError(msg)
return kwargs
|
|
"""
A clustergram function similar to MATLAB clustergram()
Author: Zichen Wang
Created on 4/7/2014
Major enhancement: enables group labels for rows and columns, which can be useful to
directly visualize whether the hierarchical clustering outcome agree with inherent
catagories of samples.
References:
https://code.activestate.com/recipes/578834-hierarchical-clustering-heatmap-python/
http://www.mathworks.com/help/bioinfo/ref/clustergram.html
"""
from collections import OrderedDict
import numpy as np
import scipy.cluster.hierarchy as sch
import scipy.spatial.distance as dist
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.stats import zscore
from collections import Counter
from matplotlib import rcParams
rcParams['pdf.fonttype'] = 42 ## Output Type 3 (Type3) or Type 42 (TrueType)
rcParams['font.sans-serif'] = 'Arial'
global COLORS10, COLORS20
COLORS10 = [
'#1f77b4',
'#ff7f0e',
'#2ca02c',
'#d62728',
'#9467bd',
'#8c564b',
'#e377c2',
'#7f7f7f',
'#bcbd22',
'#17becf',
]
COLORS20 = [
'#1f77b4',
'#aec7e8',
'#ff7f0e',
'#ffbb78',
'#2ca02c',
'#98df8a',
'#d62728',
'#ff9896',
'#9467bd',
'#c5b0d5',
'#8c564b',
'#c49c94',
'#e377c2',
'#f7b6d2',
'#7f7f7f',
'#c7c7c7',
'#bcbd22',
'#dbdb8d',
'#17becf',
'#9edae5',
]
def clustergram(data=None, row_labels=None, col_labels=None,
row_groups=None, col_groups=None, cluster=True,
row_linkage='average', col_linkage='average',
row_pdist='euclidean', col_pdist='euclidean',
standardize=3, log=False, colormap='redbluecmap',
display_range=3, figsize=12, figname=None, colorkey='colorkey'):
"""
Parameters:
----------
data: a numpy array or numpy matrix
row_labels: a list of strings corresponding to the rows in data
col_labels: a list of strings corresponding to the columns in data
row_groups: a list of strings used as the group labels for the rows in data
col_groups: a list of strings used as the group labels for the columns in data
cluster: boolean variable specifying whether to perform hierarchical clustering (True) or not (False)
row_linkage: linkage method used for rows
col_linkage: linkage method used for columns
options = ['average','single','complete','weighted','centroid','median','ward']
row_pdist: pdist metric used for rows
col_pdist: pdist metric used for columns
options = ['euclidean','minkowski','cityblock','seuclidean','sqeuclidean',
'cosine','correlation','hamming','jaccard','chebyshev','canberra','braycurtis',
'mahalanobis','wminkowski']
standardize: specifying the dimension for standardizing the values in data
options = {1: 'standardize along the columns of data',
2: 'standardize along the rows of data',
3: 'do not standardize the data'}
log: boolean variable specifying whether to perform log transform for the data
colormap: options = ['redbluecmap', 'redgreencmap']
display_range: specifies the display range of values of data,
if number is specified:
display_range is zero centered
elif a list or tuple of two numbers:
display_range is the exact same range with the input
figsize: specifies the size of the figure
figname: figure name (format of figure should be specified, e.g. .png, .pdf),
if specified, figure will be saved instead of being shown
colorkey: specifies the name of the colorkey to display
Example:
----------
from clustergram import clustergram
clustergram(data=np.random.randn(3,3),row_labels=['a','b','c'],
col_labels=['1','2','3'], row_groups=['A','A','B'],
col_groups=['group1','group1','group2'])
"""
## preprocess data
if log:
data = np.log2(data + 1.0)
if standardize == 1: # Standardize along the columns of data
data = zscore(data, axis=0)
elif standardize == 2: # Standardize along the rows of data
data = zscore(data, axis=1)
if colormap == 'redbluecmap':
cmap = plt.cm.coolwarm
elif colormap == 'redgreencmap':
cmap = plt.cm.RdYlGn
elif colormap == 'grey':
cmap = plt.cm.Greys
### Configure the Matplotlib figure size
default_window_hight = figsize
default_window_width = figsize * 1.8
fig = plt.figure(figsize=(default_window_width, default_window_hight)) ### could use m,n to scale here
color_bar_w = 0.01
group_bar_w = 0.01
hw_ratio = data.shape[0]/float(data.shape[1])
heatmap_w = 0.5
heatmap_h = 0.7
# heatmap_h = min(heatmap_w * hw_ratio, 0.7)
dendrogram_l = 0.15
color_legend_w = 0.18
color_legend_h = 0.09
margin = 0.01
fig_margin_l = 0.05
fig_margin_b = 0.10
## calculate positions for all elements
# ax1, placement of dendrogram 1, on the left of the heatmap
rect1 = [fig_margin_l, fig_margin_b, dendrogram_l, heatmap_h]
# axr, placement of row side colorbar
rectr = [fig_margin_l + dendrogram_l, fig_margin_b, color_bar_w, heatmap_h]
# axc, placement of column side colorbar
rectc = [fig_margin_l + dendrogram_l + group_bar_w + margin, heatmap_h + fig_margin_b + margin, heatmap_w, color_bar_w]
# axm, placement of heatmap
rectm = [fig_margin_l + dendrogram_l + group_bar_w + margin, fig_margin_b, heatmap_w, heatmap_h]
# ax2, placement of dendrogram 2, on the top of the heatmap
rect2 = [fig_margin_l + dendrogram_l + group_bar_w + margin, fig_margin_b + heatmap_h + group_bar_w + margin, heatmap_w, dendrogram_l] ### last one controls hight of the dendrogram
# axcb - placement of the color legend
rectcb = [0.05,0.85,0.10,0.06]
## plot color legend
if type(display_range) == int or type(display_range) == float:
display_range = float(display_range)
norm = mpl.colors.Normalize(-display_range, display_range)
step = display_range/2
bounds = np.arange(-display_range, display_range+step, step)
else:
if len(display_range) == 2:
norm = mpl.colors.Normalize(display_range[0], display_range[1])
step = (display_range[1]-display_range[0])/4.
bounds = np.arange(display_range[0], display_range[1]+step,step)
axcb = fig.add_axes(rectcb, frame_on=False)
cb = mpl.colorbar.ColorbarBase(axcb, cmap=cmap, norm=norm,
orientation='horizontal', ticks=bounds, spacing='proportional', extend='both')
axcb.set_title(colorkey)
if cluster: ## perform hierarchical clustering for rows and cols
## compute pdist for rows:
d1 = dist.pdist(data, metric=row_pdist)
D1 = dist.squareform(d1)
ax1 = fig.add_axes(rect1, frame_on=False)
Y1 = sch.linkage(D1, method=row_linkage, metric=row_pdist)
Z1 = sch.dendrogram(Y1, orientation='right')
idx1 = Z1['leaves']
ax1.set_xticks([])
ax1.set_yticks([])
## compute pdist for cols
d2 = dist.pdist(data.T, metric=col_pdist)
D2 = dist.squareform(d2)
ax2 = fig.add_axes(rect2, frame_on=False)
Y2 = sch.linkage(D2, method=col_linkage, metric=col_pdist)
Z2 = sch.dendrogram(Y2)
idx2 = Z2['leaves']
ax2.set_xticks([])
ax2.set_yticks([])
## plot heatmap
axm = fig.add_axes(rectm)
data_clustered = data
data_clustered = data_clustered[:,idx2]
data_clustered = data_clustered[idx1,:]
im = axm.matshow(data_clustered, aspect='auto', origin='lower',cmap=cmap, norm=norm)
axm.set_xticks([])
axm.set_yticks([])
## add labels
new_row_header=[]
new_column_header=[]
if row_labels is not None:
if len(row_labels) < 150:
for i in range(data.shape[0]):
axm.text(data.shape[1]-0.5, i, ' '+row_labels[idx1[i]])
new_row_header.append(row_labels[idx1[i]])
if col_labels is not None:
if len(col_labels) < 150:
for i in range(data.shape[1]):
axm.text(i, -0.8, ' '+ col_labels[idx2[i]], rotation=270, verticalalignment="top") # rotation could also be degrees
new_column_header.append(col_labels[idx2[i]])
else: ## not performing hierachical clustering
## plot heatmap
axm = fig.add_axes(rectm)
im = axm.matshow(data, aspect='auto', origin='lower',cmap=cmap, norm=norm)
axm.set_xticks([])
axm.set_yticks([])
## the index vecter for the original order
idx2 = range(data.shape[1])
idx1 = range(data.shape[0])
## add labels
if row_labels is not None:
if len(row_labels) < 150:
for i in range(data.shape[0]):
axm.text(data.shape[1]-0.5, i, ' '+row_labels[i])
if col_labels is not None:
if len(col_labels) < 150:
for i in range(data.shape[1]):
axm.text(i, -0.8, ' '+ col_labels[i], rotation=270, verticalalignment="top") # rotation could also be degrees
## plot group colors
# numerize group names
if row_groups != None:
uniq_row_groups = list(set(row_groups))
d_row_group = OrderedDict()
for i, group_name in enumerate(uniq_row_groups):
d_row_group[group_name] = i
colors_row_groups = []
if len(d_row_group) < 11:
colors_row_groups = COLORS10[0:len(d_row_group)]
elif len(d_row_group) < 21:
colors_row_groups = COLORS20[0:len(d_row_group)]
else:
for i in range(len(d_row_group)):
colors_row_groups.append(np.random.rand(3,1))
cmap_row_groups = mpl.colors.ListedColormap(colors_row_groups) ## make color lists into cmap for matshow
## row group color label:
axr = fig.add_axes(rectr)
new_row_group = np.array([d_row_group[row_groups[idx1[i]]] for i in range(data.shape[0])])
new_row_group.shape = (len(idx1), 1)
rmat = axr.matshow(new_row_group, aspect='auto', origin='lower', cmap=cmap_row_groups)
axr.set_xticks([])
axr.set_yticks([])
## axglr: placement for row group label legends
axglr = fig.add_axes([1- fig_margin_b, fig_margin_b, 0.02, 0.3], frame_on=False)
rcbar = fig.colorbar(rmat, cax=axglr, ticks=range(len(d_row_group)))
rcbar.set_label('row groups')
rcbar.set_ticklabels(d_row_group.keys())
rcbar.update_ticks()
if col_groups != None:
uniq_col_groups = list(set(col_groups))
d_col_group = OrderedDict()
for i, group_name in enumerate(uniq_col_groups):
d_col_group[group_name] = i
## config group colors and cmaps
colors_col_groups = []
if len(d_col_group) < 11:
colors_col_groups = COLORS10[0:len(d_col_group)]
elif len(d_col_group) < 21:
colors_col_groups = COLORS20[0:len(d_col_group)]
else:
for i in range(len(d_col_group)):
colors_col_groups.append(np.random.rand(3,1))
cmap_col_groups = mpl.colors.ListedColormap(colors_col_groups)
axc = fig.add_axes(rectc)
new_col_group = np.array([d_col_group[col_groups[idx2[i]]] for i in range(data.shape[1])])
new_col_group.shape = (1, len(idx2))
cmat = axc.matshow(new_col_group, aspect='auto', origin='lower', cmap=cmap_col_groups)
axc.set_xticks([])
axc.set_yticks([])
## axglc: placement for col group label legends
axglc = fig.add_axes([1- fig_margin_b, 0.5, 0.02, 0.3], frame_on=False)
ccbar = fig.colorbar(cmat, cax=axglc, ticks=range(len(d_col_group)))
ccbar.set_label('column groups')
ccbar.set_ticklabels(d_col_group.keys())
ccbar.update_ticks()
if figname != None:
plt.savefig(figname)
else:
plt.show()
def plot_fclusters(data=None, row_labels=None, col_labels=None,
linkage='average', pdist='euclidean', standardize=3, log=False):
"""a function to plot the relationship between thresholds and number of
flat clusters achieved from hierarchical clustering, aims to find the optimal
threshold for forming clusters"""
## preprocess data
if log:
data = np.log2(data + 1.0)
if standardize == 1: # Standardize along the columns of data
data = zscore(data, axis=0)
elif standardize == 2: # Standardize along the rows of data
data = zscore(data, axis=1)
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
if row_labels is not None and col_labels is None: ## only get fclusters for rows
d = dist.pdist(data, metric=pdist)
elif row_labels is None and col_labels is not None: ## only get fclusters for cols
d = dist.pdist(data.T, metric=pdist)
D = dist.squareform(d)
Y = sch.linkage(D, method=linkage, metric=pdist)
space1 = np.linspace(d.min(), d.max(), num=5, endpoint=False)
space2 = np.linspace(d.max(),1.,num=30,endpoint=True)
thresholds = np.concatenate((space1,space2))
num_clusters = []
num_singles = []
for t in thresholds:
fclusters = sch.fcluster(Y, t,'distance')
c = Counter(fclusters)
num_cluster = len(c.keys())
num_single = c.values().count(1)
num_clusters.append(num_cluster)
num_singles.append(num_single)
print 'threshold=', t, 'clusters:', num_cluster, 'singles:',num_single
if num_cluster < 290:
print c
ax1.plot(thresholds, num_clusters,label='# of flat clusters')
ax1.plot(thresholds, num_singles,label='# of singles',c='r')
ax1.plot(thresholds, np.array(num_clusters)-np.array(num_singles),label='# of non-singles',c='g')
ax1.legend(loc='upper right')
ax1.set_xlabel('threshold for forming flat clusters')
ax2.plot(thresholds, num_clusters,label='# of flat clusters')
ax2.plot(thresholds, num_singles,label='# of singles',c='r')
ax2.plot(thresholds, np.array(num_clusters)-np.array(num_singles),label='# of non-singles',c='g')
ax2.legend(loc='upper right')
ax2.set_xlabel('threshold for forming flat clusters')
ax2.set_yscale('log')
plt.show()
return
def collaspe_fclusters(data=None, t=None, row_labels=None, col_labels=None,
linkage='average', pdist='euclidean', standardize=3, log=False):
"""a function to collaspe flat clusters by averaging the vectors within
each flat clusters achieved from hierarchical clustering"""
## preprocess data
if log:
data = np.log2(data + 1.0)
if standardize == 1: # Standardize along the columns of data
data = zscore(data, axis=0)
elif standardize == 2: # Standardize along the rows of data
data = zscore(data, axis=1)
if row_labels is not None and col_labels is None: ## only get fclusters for rows
d = dist.pdist(data, metric=pdist)
axis = 1 ##!!! haven't checked whether this is correct yet
elif row_labels is None and col_labels is not None: ## only get fclusters for cols
d = dist.pdist(data.T, metric=pdist)
axis = 0
D = dist.squareform(d)
Y = sch.linkage(D, method=linkage, metric=pdist)
fclusters = sch.fcluster(Y, t, 'distance')
fcluster_set = set(fclusters)
data_cf = []
for fc in fcluster_set:
mask = np.where(fclusters==fc)
data_t = data.T
vector_avg = np.average(data_t[mask],axis=axis)
data_cf.append(vector_avg)
data_cf = np.array(data_cf).T
return data_cf
def read_matrix(fn, sep='\t'):
"""
a function that helps quickly import data frame from text file
"""
with open (fn) as f:
header = f.next()
col_labels = header.strip().split(sep)[1:]
row_labels = []
data = []
for line in f:
sl = line.strip().split('\t')
row_labels.append(sl[0])
data.append(sl[1:])
data = np.array(data, dtype=float)
return data, col_labels, row_labels
def read_matrix_subset(fn, row_names=None, col_names=None, sep='\t'):
"""
Only read selected rows/cols in the date frame, specified in row_names/col_names
"""
with open (fn) as f:
header = f.next()
col_labels = header.strip().split(sep)[1:]
col_labels = np.array(col_labels)
if col_names is not None:
col_mask = []
for col_name in col_labels:
if col_name in col_names:
col_mask.append(True)
else:
col_mask.append(False)
else:
col_mask = [True] * len(col_labels)
col_mask = np.array(col_mask) ## mask for col labels
data = []
for line in f:
sl = line.strip().split('\t')
row_name = sl[0]
row_values = np.array(sl[1:])
if row_names is None or row_name in row_names:
data.append(row_values[col_mask])
data = np.array(data, dtype=float)
return data, col_labels
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A sandbox implementation that emulates production App Engine."""
import __builtin__
import imp
import os
import re
import sys
import traceback
import types
import google
from google.appengine import dist
from google.appengine.api import app_logging
from google.appengine.api.logservice import logservice
from google.appengine import dist27 as dist27
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.runtime import request_environment
from google.appengine.tools.devappserver2.python import pdb_sandbox
from google.appengine.tools.devappserver2.python import request_state
from google.appengine.tools.devappserver2.python import stubs
# Needed to handle source file encoding
CODING_MAGIC_COMMENT_RE = re.compile('coding[:=]\s*([-\w.]+)')
DEFAULT_ENCODING = 'ascii'
_C_MODULES = frozenset(['numpy', 'Crypto', 'lxml', 'PIL'])
NAME_TO_CMODULE_WHITELIST_REGEX = {
'numpy': re.compile(r'numpy(\..*)?$'),
'pycrypto': re.compile(r'Crypto(\..*)?$'),
'lxml': re.compile(r'lxml(\..*)?$'),
'PIL': re.compile(r'(PIL(\..*)?|_imaging|_imagingft|_imagingmath)$'),
'ssl': re.compile(r'_ssl$'),
}
# Maps App Engine third-party library names to the Python package name for
# libraries whose names differ from the package names.
_THIRD_PARTY_LIBRARY_NAME_OVERRIDES = {
'pycrypto': 'Crypto'
}
# The location of third-party libraries will be different for the packaged SDK.
_THIRD_PARTY_LIBRARY_FORMAT_STRING = (
'lib/%(name)s-%(version)s')
# Store all the modules removed from sys.modules so they don't get cleaned up.
_removed_modules = []
def _make_request_id_aware_start_new_thread(base_start_new_thread):
"""Returns a replacement for start_new_thread that inherits request id.
Returns a function with an interface that matches thread.start_new_thread
where the new thread inherits the request id of the current thread. The
request id is used by the Remote API to associate API calls with the HTTP
request that provoked them.
Args:
base_start_new_thread: The thread.start_new_thread function to call to
create a new thread.
Returns:
A replacement for start_new_thread.
"""
def _start_new_thread(target, args, kw=None):
if kw is None:
kw = {}
request_id = remote_api_stub.RemoteStub._GetRequestId()
request = request_state.get_request_state(request_id)
def _run():
try:
remote_api_stub.RemoteStub._SetRequestId(request_id)
request.start_thread()
target(*args, **kw)
finally:
request_environment.current_request.Clear()
request.end_thread()
return base_start_new_thread(_run, ())
return _start_new_thread
def enable_sandbox(config):
"""Enable the sandbox based on the configuration.
This includes installing import hooks to restrict access to C modules and
stub out functions that are not implemented in production, replacing the file
builtins with read-only versions and add enabled libraries to the path.
Args:
config: The runtime_config_pb2.Config to use to configure the sandbox.
"""
devnull = open(os.path.devnull)
modules = [os, traceback, google]
c_module = _find_shared_object_c_module()
if c_module:
modules.append(c_module)
module_paths = [module.__file__ for module in modules]
module_paths.extend([os.path.realpath(module.__file__) for module in modules])
python_lib_paths = [config.application_root]
for path in sys.path:
if any(module_path.startswith(path) for module_path in module_paths):
python_lib_paths.append(path)
python_lib_paths.extend(_enable_libraries(config.libraries))
for name in list(sys.modules):
if not _should_keep_module(name):
_removed_modules.append(sys.modules[name])
del sys.modules[name]
path_override_hook = PathOverrideImportHook(
set(_THIRD_PARTY_LIBRARY_NAME_OVERRIDES.get(lib.name, lib.name)
for lib in config.libraries).intersection(_C_MODULES))
python_lib_paths.extend(path_override_hook.extra_sys_paths)
stubs.FakeFile.set_allowed_paths(config.application_root,
python_lib_paths[1:] +
path_override_hook.extra_accessible_paths)
stubs.FakeFile.set_skip_files(config.skip_files)
stubs.FakeFile.set_static_files(config.static_files)
__builtin__.file = stubs.FakeFile
__builtin__.open = stubs.FakeFile
types.FileType = stubs.FakeFile
sys.platform = 'linux3'
enabled_library_regexes = [
NAME_TO_CMODULE_WHITELIST_REGEX[lib.name] for lib in config.libraries
if lib.name in NAME_TO_CMODULE_WHITELIST_REGEX]
sys.meta_path = [
StubModuleImportHook(),
ModuleOverrideImportHook(_MODULE_OVERRIDE_POLICIES),
CModuleImportHook(enabled_library_regexes),
path_override_hook,
PyCryptoRandomImportHook,
PathRestrictingImportHook(enabled_library_regexes)
]
sys.path_importer_cache = {}
sys.path = python_lib_paths[:]
thread = __import__('thread')
__import__('%s.threading' % dist27.__name__)
threading = sys.modules['%s.threading' % dist27.__name__]
thread.start_new_thread = _make_request_id_aware_start_new_thread(
thread.start_new_thread)
# This import needs to be after enabling the sandbox so it imports the
# sandboxed version of the logging module.
from google.appengine.runtime import runtime
runtime.PatchStartNewThread(thread)
threading._start_new_thread = thread.start_new_thread
os.chdir(config.application_root)
sandboxed_os = __import__('os')
request_environment.PatchOsEnviron(sandboxed_os)
os.__dict__.update(sandboxed_os.__dict__)
_init_logging(config.stderr_log_level)
pdb_sandbox.install(config)
sys.stdin = devnull
sys.stdout = sys.stderr
def _find_shared_object_c_module():
for module_name in ['_sqlite3', '_multiprocessing', '_ctypes', 'bz2']:
try:
module = __import__(module_name)
except ImportError:
continue
else:
if hasattr(module, '__file__'):
return module
return None
def _should_keep_module(name):
"""Returns True if the module should be retained after sandboxing."""
return (name in ('__builtin__', 'sys', 'codecs', 'encodings', 'site',
'google') or
name.startswith('google.') or name.startswith('encodings.') or
# Making mysql available is a hack to make the CloudSQL functionality
# work.
'mysql' in name.lower())
def _init_logging(stderr_log_level):
logging = __import__('logging')
logger = logging.getLogger()
console_handler = logging.StreamHandler(sys.stderr)
if stderr_log_level == 0:
console_handler.setLevel(logging.DEBUG)
elif stderr_log_level == 1:
console_handler.setLevel(logging.INFO)
elif stderr_log_level == 2:
console_handler.setLevel(logging.WARNING)
elif stderr_log_level == 3:
console_handler.setLevel(logging.ERROR)
elif stderr_log_level == 4:
console_handler.setLevel(logging.CRITICAL)
console_handler.setFormatter(logging.Formatter(
'%(levelname)-8s %(asctime)s %(filename)s:%(lineno)s] %(message)s'))
logger.addHandler(console_handler)
logging_stream = request_environment.RequestLocalStream(
request_environment.current_request)
logger.addHandler(app_logging.AppLogsHandler())
logger.setLevel(logging.DEBUG)
logservice.logs_buffer = lambda: request_environment.current_request.errors
sys.stderr = Tee(sys.stderr, logging_stream)
class Tee(object):
"""A writeable stream that forwards to zero or more streams."""
def __init__(self, *streams):
self._streams = streams
def close(self):
for stream in self._streams:
stream.close()
def flush(self):
for stream in self._streams:
stream.flush()
def write(self, data):
for stream in self._streams:
stream.write(data)
def writelines(self, data):
for stream in self._streams:
stream.writelines(data)
def _enable_libraries(libraries):
"""Add enabled libraries to the path.
Args:
libraries: A repeated Config.Library containing the libraries to enable.
Returns:
A list of paths containing the enabled libraries.
"""
library_dirs = []
library_pattern = os.path.join(os.path.dirname(
os.path.dirname(google.__file__)), _THIRD_PARTY_LIBRARY_FORMAT_STRING)
for library in libraries:
# Encode the library name/version to convert the Python type
# from unicode to str so that Python doesn't try to decode
# library pattern from str to unicode (which can cause problems
# when the SDK has non-ASCII data in the directory). Encode as
# ASCII should be safe as we control library info and are not
# likely to have non-ASCII names/versions.
library_dir = os.path.abspath(
library_pattern % {'name': library.name.encode('ascii'),
'version': library.version.encode('ascii')})
library_dirs.append(library_dir)
return library_dirs
class BaseImportHook(object):
"""A base class implementing common import hook functionality.
This provides utilities for implementing both the finder and loader parts of
the PEP 302 importer protocol and implements the optional extensions to the
importer protocol.
"""
def _find_module_or_loader(self, submodule_name, fullname, path):
"""Acts like imp.find_module with support for path hooks.
Args:
submodule_name: The name of the submodule within its parent package.
fullname: The full name of the module to load.
path: A list containing the paths to search for the module.
Returns:
A tuple (source_file, path_name, description, loader) where:
source_file: An open file or None.
path_name: A str containing the path to the module.
description: A description tuple like the one imp.find_module returns.
loader: A PEP 302 compatible path hook. If this is not None, then the
other elements will be None.
Raises:
ImportError: The module could not be imported.
"""
for path_entry in path + [None]:
result = self._find_path_hook(submodule_name, fullname, path_entry)
if result is not None:
break
else:
raise ImportError('No module named %s' % fullname)
if isinstance(result, tuple):
return result + (None,)
else:
return None, None, None, result.find_module(fullname)
def _find_and_load_module(self, submodule_name, fullname, path):
"""Finds and loads a module, using a provided search path.
Args:
submodule_name: The name of the submodule within its parent package.
fullname: The full name of the module to load.
path: A list containing the paths to search for the module.
Returns:
The requested module.
Raises:
ImportError: The module could not be imported.
"""
source_file, path_name, description, loader = self._find_module_or_loader(
submodule_name, fullname, path)
if loader:
return loader.load_module(fullname)
try:
return imp.load_module(fullname, source_file, path_name, description)
finally:
if source_file:
source_file.close()
def _find_path_hook(self, submodule, submodule_fullname, path_entry):
"""Helper for _find_and_load_module to find a module in a path entry.
Args:
submodule: The last portion of the module name from submodule_fullname.
submodule_fullname: The full name of the module to be imported.
path_entry: A single sys.path entry, or None representing the builtins.
Returns:
None if nothing was found, a PEP 302 loader if one was found or a
tuple (source_file, path_name, description) where:
source_file: An open file of the source file.
path_name: A str containing the path to the source file.
description: A description tuple to be passed to imp.load_module.
"""
if path_entry is None:
# This is the magic entry that tells us to look for a built-in module.
if submodule_fullname in sys.builtin_module_names:
try:
result = imp.find_module(submodule)
except ImportError:
pass
else:
# Did find_module() find a built-in module? Unpack the result.
_, _, description = result
_, _, file_type = description
if file_type == imp.C_BUILTIN:
return result
# Skip over this entry if we get this far.
return None
# It's a regular sys.path entry.
try:
importer = sys.path_importer_cache[path_entry]
except KeyError:
# Cache miss; try each path hook in turn.
importer = None
for hook in sys.path_hooks:
try:
importer = hook(path_entry)
# Success.
break
except ImportError:
# This importer doesn't handle this path entry.
pass
# Cache the result, whether an importer matched or not.
sys.path_importer_cache[path_entry] = importer
if importer is None:
# No importer. Use the default approach.
try:
return imp.find_module(submodule, [path_entry])
except ImportError:
pass
else:
# Have an importer. Try it.
loader = importer.find_module(submodule_fullname)
if loader is not None:
# This importer knows about this module.
return loader
# None of the above.
return None
def _get_parent_package(self, fullname):
"""Retrieves the parent package of a fully qualified module name.
Args:
fullname: Full name of the module whose parent should be retrieved (e.g.,
foo.bar).
Returns:
Module instance for the parent or None if there is no parent module.
Raises:
ImportError: The module's parent could not be found.
"""
all_modules = fullname.split('.')
parent_module_fullname = '.'.join(all_modules[:-1])
if parent_module_fullname:
__import__(parent_module_fullname)
return sys.modules[parent_module_fullname]
return None
def _get_parent_search_path(self, fullname):
"""Determines the search path of a module's parent package.
Args:
fullname: Full name of the module to look up (e.g., foo.bar).
Returns:
Tuple (submodule, search_path) where:
submodule: The last portion of the module name from fullname (e.g.,
if fullname is foo.bar, then this is bar).
search_path: List of paths that belong to the parent package's search
path or None if there is no parent package.
Raises:
ImportError exception if the module or its parent could not be found.
"""
_, _, submodule = fullname.rpartition('.')
parent_package = self._get_parent_package(fullname)
search_path = sys.path
if parent_package is not None and hasattr(parent_package, '__path__'):
search_path = parent_package.__path__
return submodule, search_path
def _get_module_info(self, fullname):
"""Determines the path on disk and the search path of a module or package.
Args:
fullname: Full name of the module to look up (e.g., foo.bar).
Returns:
Tuple (pathname, search_path, submodule, loader) where:
pathname: String containing the full path of the module on disk,
or None if the module wasn't loaded from disk (e.g. from a zipfile).
search_path: List of paths that belong to the found package's search
path or None if found module is not a package.
submodule: The relative name of the submodule that's being imported.
loader: A PEP 302 compatible path hook. If this is not None, then the
other elements will be None.
"""
submodule, search_path = self._get_parent_search_path(fullname)
_, pathname, description, loader = self._find_module_or_loader(
submodule, fullname, search_path)
if loader:
return None, None, None, loader
else:
_, _, file_type = description
module_search_path = None
if file_type == imp.PKG_DIRECTORY:
module_search_path = [pathname]
pathname = os.path.join(pathname, '__init__%spy' % os.extsep)
return pathname, module_search_path, submodule, None
def is_package(self, fullname):
"""Returns whether the module specified by fullname refers to a package.
This implements part of the extensions to the PEP 302 importer protocol.
Args:
fullname: The fullname of the module.
Returns:
True if fullname refers to a package.
"""
submodule, search_path = self._get_parent_search_path(fullname)
_, _, description, loader = self._find_module_or_loader(
submodule, fullname, search_path)
if loader:
return loader.is_package(fullname)
_, _, file_type = description
if file_type == imp.PKG_DIRECTORY:
return True
return False
def get_source(self, fullname):
"""Returns the source for the module specified by fullname.
This implements part of the extensions to the PEP 302 importer protocol.
Args:
fullname: The fullname of the module.
Returns:
The source for the module.
"""
full_path, _, _, loader = self._get_module_info(fullname)
if loader:
return loader.get_source(fullname)
if full_path is None:
return None
source_file = open(full_path)
try:
return source_file.read()
finally:
source_file.close()
def get_code(self, fullname):
"""Returns the code object for the module specified by fullname.
This implements part of the extensions to the PEP 302 importer protocol.
Args:
fullname: The fullname of the module.
Returns:
The code object associated the module.
"""
full_path, _, _, loader = self._get_module_info(fullname)
if loader:
return loader.get_code(fullname)
if full_path is None:
return None
source_file = open(full_path)
try:
source_code = source_file.read()
finally:
source_file.close()
# Check that coding cookie is correct if present, error if not present and
# we can't decode with the default of 'ascii'. According to PEP 263 this
# coding cookie line must be in the first or second line of the file.
encoding = DEFAULT_ENCODING
for line in source_code.split('\n', 2)[:2]:
matches = CODING_MAGIC_COMMENT_RE.findall(line)
if matches:
encoding = matches[0].lower()
# This may raise up to the user, which is what we want, however we ignore
# the output because we don't want to return a unicode version of the code.
source_code.decode(encoding)
return compile(source_code, full_path, 'exec')
class PathOverrideImportHook(BaseImportHook):
"""An import hook that imports enabled modules from predetermined paths.
Imports handled by this hook ignore the paths in sys.path, instead using paths
discovered at initialization time.
Attributes:
extra_sys_paths: A list of paths that should be added to sys.path.
extra_accessible_paths: A list of paths that should be accessible by
sandboxed code.
"""
def __init__(self, modules):
self._modules = {}
self.extra_accessible_paths = []
self.extra_sys_paths = []
for module in modules:
module_path = self._get_module_path(module)
if module_path:
self._modules[module] = module_path
if isinstance(module_path, str):
package_dir = os.path.join(module_path, module)
if os.path.isdir(package_dir):
if module == 'PIL':
self.extra_sys_paths.append(package_dir)
else:
self.extra_accessible_paths.append(package_dir)
def find_module(self, fullname, unused_path=None):
return fullname in self._modules and self or None
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
module_path = self._modules[fullname]
if hasattr(module_path, 'load_module'):
module = module_path.load_module(fullname)
else:
module = self._find_and_load_module(fullname, fullname, [module_path])
module.__loader__ = self
return module
def _get_module_path(self, fullname):
"""Returns the directory containing the module or None if not found."""
try:
_, _, submodule = fullname.rpartition('.')
f, filepath, _, loader = self._find_module_or_loader(
submodule, fullname, sys.path)
except ImportError:
return None
if f:
f.close()
if loader:
return loader.find_module(fullname)
return os.path.dirname(filepath)
class ModuleOverridePolicy(object):
"""A policy for implementing a partial whitelist for a module."""
def __init__(self, default_stub=None,
whitelist=None,
overrides=None,
deletes=None,
constant_types=(str, int, long, BaseException),
default_pass_through=False):
self.default_stub = default_stub
self.whitelist = whitelist or []
self.overrides = overrides or {}
self.deletes = deletes or []
self.constant_types = constant_types
self.default_pass_through = default_pass_through
def apply_policy(self, module_dict):
"""Apply this policy to the provided module dict.
In order, one of the following will apply:
- Symbols in overrides are set to the override value.
- Symbols in deletes are removed.
- Whitelisted symbols and symbols with a constant type are unchanged.
- If a default stub is set, all other symbols are replaced by it.
- If default_pass_through is True, all other symbols are unchanged.
- If default_pass_through is False, all other symbols are removed.
Args:
module_dict: The module dict to be filtered.
"""
for symbol in module_dict.keys():
if symbol in self.overrides:
module_dict[symbol] = self.overrides[symbol]
elif symbol in self.deletes:
del module_dict[symbol]
elif not (symbol in self.whitelist or
isinstance(module_dict[symbol], self.constant_types) or
(symbol.startswith('__') and symbol.endswith('__'))):
if self.default_stub:
module_dict[symbol] = self.default_stub
elif not self.default_pass_through:
del module_dict[symbol]
_MODULE_OVERRIDE_POLICIES = {
'os': ModuleOverridePolicy(
default_stub=stubs.os_error_not_implemented,
whitelist=['altsep', 'curdir', 'defpath', 'devnull', 'environ', 'error',
'fstat', 'getcwd', 'getcwdu', 'getenv', '_get_exports_list',
'name', 'open', 'pardir', 'path', 'pathsep', 'sep',
'stat_float_times', 'stat_result', 'strerror', 'sys',
'walk'],
overrides={
'access': stubs.fake_access,
'listdir': stubs.RestrictedPathFunction(os.listdir),
# Alias lstat() to stat() to match the behavior in production.
'lstat': stubs.RestrictedPathFunction(os.stat),
'open': stubs.fake_open,
'stat': stubs.RestrictedPathFunction(os.stat),
'uname': stubs.fake_uname,
'getpid': stubs.return_minus_one,
'getppid': stubs.return_minus_one,
'getpgrp': stubs.return_minus_one,
'getgid': stubs.return_minus_one,
'getegid': stubs.return_minus_one,
'geteuid': stubs.return_minus_one,
'getuid': stubs.return_minus_one,
'urandom': stubs.fake_urandom,
'system': stubs.return_minus_one,
},
deletes=['execv', 'execve']),
'signal': ModuleOverridePolicy(overrides={'__doc__': None}),
'locale': ModuleOverridePolicy(
overrides={'setlocale': stubs.fake_set_locale},
default_pass_through=True),
'distutils.util': ModuleOverridePolicy(
overrides={'get_platform': stubs.fake_get_platform},
default_pass_through=True),
# TODO: Stub out imp.find_module and friends.
}
class ModuleOverrideImportHook(BaseImportHook):
"""An import hook that applies a ModuleOverridePolicy to modules."""
def __init__(self, policies):
super(ModuleOverrideImportHook, self).__init__()
self.policies = policies
def find_module(self, fullname, unused_path=None):
return fullname in self.policies and self or None
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
parent_name, _, submodule_name = fullname.rpartition('.')
if parent_name:
parent = sys.modules[parent_name]
path = getattr(parent, '__path__', sys.path)
else:
path = sys.path
parent = None
module = self._find_and_load_module(submodule_name, fullname, path)
self.policies[fullname].apply_policy(module.__dict__)
module.__loader__ = self
sys.modules[fullname] = module
return module
class StubModuleImportHook(BaseImportHook):
"""An import hook that replaces entire modules with stubs."""
def find_module(self, fullname, unused_path=None):
return self if fullname in dist27.MODULE_OVERRIDES else None
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
return self.import_stub_module(fullname)
def import_stub_module(self, name):
"""Import the stub module replacement for the specified module."""
# Do the equivalent of
# ``from google.appengine.dist import <name>``.
providing_dist = dist
# When using the Py27 runtime, modules in dist27 have priority.
# (They have already been vetted.)
if name in dist27.__all__:
providing_dist = dist27
fullname = '%s.%s' % (providing_dist.__name__, name)
__import__(fullname, {}, {})
module = imp.new_module(fullname)
module.__dict__.update(sys.modules[fullname].__dict__)
module.__loader__ = self
module.__name__ = name
module.__package__ = None
module.__name__ = name
sys.modules[name] = module
return module
_WHITE_LIST_C_MODULES = [
'array',
'_ast',
'binascii',
'_bisect',
'_bytesio',
'bz2',
'cmath',
'_codecs',
'_codecs_cn',
'_codecs_hk',
'_codecs_iso2022',
'_codecs_jp',
'_codecs_kr',
'_codecs_tw',
'_collections', # Python 2.6 compatibility
'crypt',
'cPickle',
'cStringIO',
'_csv',
'datetime',
'_elementtree',
'errno',
'exceptions',
'_fileio',
'_functools',
'future_builtins',
'gc',
'_hashlib',
'_heapq',
'imp',
'_io',
'itertools',
'_json',
'_locale',
'_lsprof',
'__main__',
'marshal',
'math',
'_md5', # Python2.5 compatibility
'_multibytecodec',
'nt', # Only indirectly through the os module.
'operator',
'parser',
'posix', # Only indirectly through the os module.
'pyexpat',
'_random',
'_sha256', # Python2.5 compatibility
'_sha512', # Python2.5 compatibility
'_sha', # Python2.5 compatibility
'_sre',
'strop',
'_struct',
'_symtable',
'sys',
'thread',
'time',
'timing',
'unicodedata',
'_warnings',
'_weakref',
'zipimport',
'zlib',
]
class CModuleImportHook(object):
"""An import hook implementing a C module (builtin or extensions) whitelist.
CModuleImportHook implements the PEP 302 finder protocol where it returns
itself as a loader for any builtin module that isn't whitelisted or part of an
enabled third-party library. The loader implementation always raises
ImportError.
"""
def __init__(self, enabled_regexes):
self._enabled_regexes = enabled_regexes
@staticmethod
def _module_type(fullname, path):
_, _, submodule_name = fullname.rpartition('.')
try:
f, _, description = imp.find_module(submodule_name, path)
_, _, file_type = description
except ImportError:
return None
if f:
f.close()
return file_type
def find_module(self, fullname, path=None):
if (fullname in _WHITE_LIST_C_MODULES or
any(regex.match(fullname) for regex in self._enabled_regexes)):
return None
if self._module_type(fullname, path) in [imp.C_EXTENSION, imp.C_BUILTIN]:
return self
return None
def load_module(self, fullname):
raise ImportError('No module named %s' % fullname)
class PathRestrictingImportHook(object):
"""An import hook that restricts imports to accessible paths.
This import hook uses FakeFile.is_file_accessible to determine which paths are
accessible.
"""
_EXCLUDED_TYPES = frozenset([
imp.C_BUILTIN,
imp.PY_FROZEN,
])
def __init__(self, enabled_regexes):
self._enabled_regexes = enabled_regexes
def find_module(self, fullname, path=None):
if any(regex.match(fullname) for regex in self._enabled_regexes):
return None
_, _, submodule_name = fullname.rpartition('.')
try:
f, filename, description = imp.find_module(submodule_name, path)
except ImportError:
return None
if f:
f.close()
_, _, file_type = description
if (file_type in self._EXCLUDED_TYPES or
stubs.FakeFile.is_file_accessible(filename) or
(filename.endswith('.pyc') and
os.path.exists(filename.replace('.pyc', '.py')))):
return None
return self
def load_module(self, fullname):
raise ImportError('No module named %s' % fullname)
class PyCryptoRandomImportHook(BaseImportHook):
"""An import hook that allows Crypto.Random.OSRNG.new() to work on posix.
This changes PyCrypto to always use os.urandom() instead of reading from
/dev/urandom.
"""
def __init__(self, path):
self._path = path
@classmethod
def find_module(cls, fullname, path=None):
if fullname == 'Crypto.Random.OSRNG.posix':
return cls(path)
return None
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
__import__('Crypto.Random.OSRNG.fallback')
module = self._find_and_load_module('posix', fullname, self._path)
fallback = sys.modules['Crypto.Random.OSRNG.fallback']
module.new = fallback.new
module.__loader__ = self
sys.modules[fullname] = module
return module
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import mock
from neutron.common import exceptions
from neutron import context
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers.haproxy import (
plugin_driver
)
from neutron.tests import base
from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
class TestLoadBalancerPluginBase(
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
def setUp(self):
super(TestLoadBalancerPluginBase, self).setUp()
# create another API instance to make testing easier
# pass a mock to our API instance
# we need access to loaded plugins to modify models
loaded_plugins = manager.NeutronManager().get_service_plugins()
self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
class TestLoadBalancerCallbacks(TestLoadBalancerPluginBase):
def setUp(self):
super(TestLoadBalancerCallbacks, self).setUp()
self.callbacks = plugin_driver.LoadBalancerCallbacks(
self.plugin_instance
)
get_lbaas_agents_patcher = mock.patch(
'neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.get_lbaas_agents')
get_lbaas_agents_patcher.start()
# mocking plugin_driver create_pool() as it does nothing more than
# pool scheduling which is beyond the scope of this test case
mock.patch('neutron.services.loadbalancer.drivers.haproxy'
'.plugin_driver.HaproxyOnHostPluginDriver'
'.create_pool').start()
self.addCleanup(mock.patch.stopall)
def test_get_ready_devices(self):
with self.vip() as vip:
with mock.patch('neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertEqual(ready, [vip['vip']['pool_id']])
def test_get_ready_devices_multiple_vips_and_pools(self):
ctx = context.get_admin_context()
# add 3 pools and 2 vips directly to DB
# to create 2 "ready" devices and one pool without vip
pools = []
for i in xrange(0, 3):
pools.append(ldb.Pool(id=uuidutils.generate_uuid(),
subnet_id=self._subnet_id,
protocol="HTTP",
lb_method="ROUND_ROBIN",
status=constants.ACTIVE,
admin_state_up=True))
ctx.session.add(pools[i])
vip0 = ldb.Vip(id=uuidutils.generate_uuid(),
protocol_port=80,
protocol="HTTP",
pool_id=pools[0].id,
status=constants.ACTIVE,
admin_state_up=True,
connection_limit=3)
ctx.session.add(vip0)
pools[0].vip_id = vip0.id
vip1 = ldb.Vip(id=uuidutils.generate_uuid(),
protocol_port=80,
protocol="HTTP",
pool_id=pools[1].id,
status=constants.ACTIVE,
admin_state_up=True,
connection_limit=3)
ctx.session.add(vip1)
pools[1].vip_id = vip1.id
ctx.session.flush()
self.assertEqual(ctx.session.query(ldb.Pool).count(), 3)
self.assertEqual(ctx.session.query(ldb.Vip).count(), 2)
with mock.patch('neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin'
'.list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {'pools': [{'id': pools[0].id},
{'id': pools[1].id},
{'id': pools[2].id}]}
ready = self.callbacks.get_ready_devices(ctx)
self.assertEqual(len(ready), 2)
self.assertIn(pools[0].id, ready)
self.assertIn(pools[1].id, ready)
self.assertNotIn(pools[2].id, ready)
# cleanup
ctx.session.query(ldb.Pool).delete()
ctx.session.query(ldb.Vip).delete()
def test_get_ready_devices_inactive_vip(self):
with self.vip() as vip:
# set the vip inactive need to use plugin directly since
# status is not tenant mutable
self.plugin_instance.update_vip(
context.get_admin_context(),
vip['vip']['id'],
{'vip': {'status': constants.INACTIVE}}
)
with mock.patch('neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertFalse(ready)
def test_get_ready_devices_inactive_pool(self):
with self.vip() as vip:
# set the pool inactive need to use plugin directly since
# status is not tenant mutable
self.plugin_instance.update_pool(
context.get_admin_context(),
vip['vip']['pool_id'],
{'pool': {'status': constants.INACTIVE}}
)
with mock.patch('neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertFalse(ready)
def test_get_logical_device_inactive(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']):
self.assertRaises(
exceptions.Invalid,
self.callbacks.get_logical_device,
context.get_admin_context(),
pool['pool']['id'],
activate=False
)
def test_get_logical_device_activate(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
# build the expected
port = self.plugin_instance._core_plugin.get_port(
ctx, vip['vip']['port_id']
)
subnet = self.plugin_instance._core_plugin.get_subnet(
ctx, vip['vip']['subnet_id']
)
port['fixed_ips'][0]['subnet'] = subnet
# reload pool to add members and vip
pool = self.plugin_instance.get_pool(
ctx, pool['pool']['id']
)
pool['status'] = constants.ACTIVE
vip['vip']['status'] = constants.ACTIVE
vip['vip']['port'] = port
member['member']['status'] = constants.ACTIVE
expected = {
'pool': pool,
'vip': vip['vip'],
'members': [member['member']],
'healthmonitors': []
}
logical_config = self.callbacks.get_logical_device(
ctx, pool['id'], activate=True
)
self.assertEqual(logical_config, expected)
def _update_port_test_helper(self, expected, func, **kwargs):
core = self.plugin_instance._core_plugin
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']):
ctx = context.get_admin_context()
func(ctx, port_id=vip['vip']['port_id'], **kwargs)
db_port = core.get_port(ctx, vip['vip']['port_id'])
for k, v in expected.iteritems():
self.assertEqual(db_port[k], v)
def test_plug_vip_port(self):
exp = {
'device_owner': 'neutron:' + constants.LOADBALANCER,
'device_id': 'c596ce11-db30-5c72-8243-15acaae8690f',
'admin_state_up': True
}
self._update_port_test_helper(
exp,
self.callbacks.plug_vip_port,
host='host'
)
def test_unplug_vip_port(self):
exp = {
'device_owner': '',
'device_id': '',
'admin_state_up': False
}
self._update_port_test_helper(
exp,
self.callbacks.unplug_vip_port,
host='host'
)
class TestLoadBalancerAgentApi(base.BaseTestCase):
def setUp(self):
super(TestLoadBalancerAgentApi, self).setUp()
self.addCleanup(mock.patch.stopall)
self.api = plugin_driver.LoadBalancerAgentApi('topic')
self.mock_cast = mock.patch.object(self.api, 'cast').start()
self.mock_msg = mock.patch.object(self.api, 'make_msg').start()
def test_init(self):
self.assertEqual(self.api.topic, 'topic')
def _call_test_helper(self, method_name):
rv = getattr(self.api, method_name)(mock.sentinel.context, 'test',
'host')
self.assertEqual(rv, self.mock_cast.return_value)
self.mock_cast.assert_called_once_with(
mock.sentinel.context,
self.mock_msg.return_value,
topic='topic.host'
)
self.mock_msg.assert_called_once_with(
method_name,
pool_id='test',
host='host'
)
def test_reload_pool(self):
self._call_test_helper('reload_pool')
def test_destroy_pool(self):
self._call_test_helper('destroy_pool')
def test_modify_pool(self):
self._call_test_helper('modify_pool')
def test_agent_updated(self):
rv = self.api.agent_updated(mock.sentinel.context, True, 'host')
self.assertEqual(rv, self.mock_cast.return_value)
self.mock_cast.assert_called_once_with(
mock.sentinel.context,
self.mock_msg.return_value,
topic='topic.host',
version='1.1'
)
self.mock_msg.assert_called_once_with(
'agent_updated',
payload={'admin_state_up': True}
)
class TestLoadBalancerPluginNotificationWrapper(TestLoadBalancerPluginBase):
def setUp(self):
self.log = mock.patch.object(plugin_driver, 'LOG')
api_cls = mock.patch.object(plugin_driver,
'LoadBalancerAgentApi').start()
super(TestLoadBalancerPluginNotificationWrapper, self).setUp()
self.mock_api = api_cls.return_value
# mocking plugin_driver create_pool() as it does nothing more than
# pool scheduling which is beyond the scope of this test case
mock.patch('neutron.services.loadbalancer.drivers.haproxy'
'.plugin_driver.HaproxyOnHostPluginDriver'
'.create_pool').start()
self.addCleanup(mock.patch.stopall)
def test_create_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet) as vip:
self.mock_api.reload_pool.assert_called_once_with(
mock.ANY,
vip['vip']['pool_id'],
'host'
)
def test_update_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet) as vip:
self.mock_api.reset_mock()
ctx = context.get_admin_context()
vip['vip'].pop('status')
new_vip = self.plugin_instance.update_vip(
ctx,
vip['vip']['id'],
vip
)
self.mock_api.reload_pool.assert_called_once_with(
mock.ANY,
vip['vip']['pool_id'],
'host'
)
self.assertEqual(
new_vip['status'],
constants.PENDING_UPDATE
)
def test_delete_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet, no_delete=True) as vip:
self.mock_api.reset_mock()
ctx = context.get_admin_context()
self.plugin_instance.delete_vip(ctx, vip['vip']['id'])
self.mock_api.destroy_pool.assert_called_once_with(
mock.ANY,
vip['vip']['pool_id'],
'host'
)
def test_create_pool(self):
with self.pool():
self.assertFalse(self.mock_api.reload_pool.called)
self.assertFalse(self.mock_api.modify_pool.called)
self.assertFalse(self.mock_api.destroy_pool.called)
def test_update_pool_non_active(self):
with self.pool() as pool:
pool['pool']['status'] = 'INACTIVE'
ctx = context.get_admin_context()
self.plugin_instance.update_pool(ctx, pool['pool']['id'], pool)
self.mock_api.destroy_pool.assert_called_once_with(
mock.ANY, pool['pool']['id'], 'host')
self.assertFalse(self.mock_api.reload_pool.called)
self.assertFalse(self.mock_api.modify_pool.called)
def test_update_pool_no_vip_id(self):
with self.pool() as pool:
ctx = context.get_admin_context()
self.plugin_instance.update_pool(ctx, pool['pool']['id'], pool)
self.assertFalse(self.mock_api.destroy_pool.called)
self.assertFalse(self.mock_api.reload_pool.called)
self.assertFalse(self.mock_api.modify_pool.called)
def test_update_pool_with_vip_id(self):
with self.pool() as pool:
with self.vip(pool=pool):
ctx = context.get_admin_context()
self.plugin_instance.update_pool(ctx, pool['pool']['id'], pool)
self.mock_api.reload_pool.assert_called_once_with(
mock.ANY, pool['pool']['id'], 'host')
self.assertFalse(self.mock_api.destroy_pool.called)
self.assertFalse(self.mock_api.modify_pool.called)
def test_delete_pool(self):
with self.pool(no_delete=True) as pool:
req = self.new_delete_request('pools',
pool['pool']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
self.mock_api.destroy_pool.assert_called_once_with(
mock.ANY, pool['pool']['id'], 'host')
def test_create_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id):
self.mock_api.modify_pool.assert_called_once_with(
mock.ANY, pool_id, 'host')
def test_update_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id) as member:
ctx = context.get_admin_context()
self.mock_api.modify_pool.reset_mock()
self.plugin_instance.update_member(
ctx, member['member']['id'], member)
self.mock_api.modify_pool.assert_called_once_with(
mock.ANY, pool_id, 'host')
def test_update_member_new_pool(self):
with self.pool() as pool1:
pool1_id = pool1['pool']['id']
with self.pool() as pool2:
pool2_id = pool2['pool']['id']
with self.member(pool_id=pool1_id) as member:
ctx = context.get_admin_context()
self.mock_api.modify_pool.reset_mock()
member['member']['pool_id'] = pool2_id
self.plugin_instance.update_member(ctx,
member['member']['id'],
member)
self.assertEqual(2, self.mock_api.modify_pool.call_count)
self.mock_api.modify_pool.assert_has_calls(
[mock.call(mock.ANY, pool1_id, 'host'),
mock.call(mock.ANY, pool2_id, 'host')])
def test_delete_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id,
no_delete=True) as member:
self.mock_api.modify_pool.reset_mock()
req = self.new_delete_request('members',
member['member']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
self.mock_api.modify_pool.assert_called_once_with(
mock.ANY, pool_id, 'host')
def test_create_pool_health_monitor(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.health_monitor() as hm:
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx,
hm,
pool_id)
self.mock_api.modify_pool.assert_called_once_with(
mock.ANY, pool_id, 'host')
def test_delete_pool_health_monitor(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.health_monitor() as hm:
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx,
hm,
pool_id)
self.mock_api.modify_pool.reset_mock()
self.plugin_instance.delete_pool_health_monitor(
ctx, hm['health_monitor']['id'], pool_id)
self.mock_api.modify_pool.assert_called_once_with(
mock.ANY, pool_id, 'host')
def test_update_health_monitor_associated_with_pool(self):
with self.health_monitor(type='HTTP') as monitor:
with self.pool() as pool:
data = {
'health_monitor': {
'id': monitor['health_monitor']['id'],
'tenant_id': self._tenant_id
}
}
req = self.new_create_request(
'pools',
data,
fmt=self.fmt,
id=pool['pool']['id'],
subresource='health_monitors')
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
self.mock_api.modify_pool.assert_called_once_with(
mock.ANY,
pool['pool']['id'],
'host'
)
self.mock_api.reset_mock()
data = {'health_monitor': {'delay': 20,
'timeout': 20,
'max_retries': 2,
'admin_state_up': False}}
req = self.new_update_request("health_monitors",
data,
monitor['health_monitor']['id'])
req.get_response(self.ext_api)
self.mock_api.modify_pool.assert_called_once_with(
mock.ANY,
pool['pool']['id'],
'host'
)
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Load fonts and render text.
This is a fairly-low level interface to text rendering. Obtain a font using :meth:`load`::
from pyglet import font
arial = font.load('Arial', 14, bold=True, italic=False)
Manually loading fonts is only required in the following situations:
* When manually rendering fonts;
* When using the deprecated font rendering in :mod:`pyglet.font.text`.
You are encouraged to use :mod:`pyglet.text` for actual text rendering. Classes in this module will
handle font loading for you, so manual loading is not required.
pyglet will automatically load any system-installed fonts. You can add additional fonts
(for example, from your program resources) using :meth:`add_file` or
:meth:`add_directory`. These fonts are then available in the same way as system-installed fonts::
from pyglet import font
font.add_file('action_man.ttf')
action_man = font.load('Action Man', 16)
See the :mod:`pyglet.font.base` module for documentation on the base classes used
by this package.
"""
from __future__ import absolute_import, division
from past.builtins import basestring
import os
import sys
import weakref
import pyglet
from pyglet import gl
if not getattr(sys, 'is_epydoc', False):
if pyglet.compat_platform == 'darwin':
if pyglet.options['darwin_cocoa']:
from pyglet.font.quartz import QuartzFont
_font_class = QuartzFont
else:
from pyglet.font.carbon import CarbonFont
_font_class = CarbonFont
elif pyglet.compat_platform in ('win32', 'cygwin'):
if pyglet.options['font'][0] == 'win32':
from pyglet.font.win32 import Win32Font
_font_class = Win32Font
elif pyglet.options['font'][0] == 'gdiplus':
from pyglet.font.win32 import GDIPlusFont
_font_class = GDIPlusFont
else:
assert False, 'Unknown font driver'
else:
from pyglet.font.freetype import FreeTypeFont
_font_class = FreeTypeFont
def have_font(name):
"""Check if specified system font name is available."""
return _font_class.have_font(name)
def load(name=None, size=None, bold=False, italic=False, dpi=None):
"""Load a font for rendering.
:Parameters:
`name` : str, or list of str
Font family, for example, "Times New Roman". If a list of names
is provided, the first one matching a known font is used. If no
font can be matched to the name(s), a default font is used. In
pyglet 1.1, the name may be omitted.
`size` : float
Size of the font, in points. The returned font may be an exact
match or the closest available. In pyglet 1.1, the size may be
omitted, and defaults to 12pt.
`bold` : bool
If True, a bold variant is returned, if one exists for the given
family and size.
`italic` : bool
If True, an italic variant is returned, if one exists for the given
family and size.
`dpi` : float
The assumed resolution of the display device, for the purposes of
determining the pixel size of the font. Defaults to 96.
:rtype: `Font`
"""
# Arbitrary default size
if size is None:
size = 12
if dpi is None:
dpi = 96
# Find first matching name
if type(name) in (tuple, list):
for n in name:
if _font_class.have_font(n):
name = n
break
else:
name = None
# Locate or create font cache
shared_object_space = gl.current_context.object_space
if not hasattr(shared_object_space, 'pyglet_font_font_cache'):
shared_object_space.pyglet_font_font_cache = \
weakref.WeakValueDictionary()
shared_object_space.pyglet_font_font_hold = []
font_cache = shared_object_space.pyglet_font_font_cache
font_hold = shared_object_space.pyglet_font_font_hold
# Look for font name in font cache
descriptor = (name, size, bold, italic, dpi)
if descriptor in font_cache:
return font_cache[descriptor]
# Not in cache, create from scratch
font = _font_class(name, size, bold=bold, italic=italic, dpi=dpi)
# Save parameters for new-style layout classes to recover
font.name = name
font.size = size
font.bold = bold
font.italic = italic
font.dpi = dpi
# Cache font in weak-ref dictionary to avoid reloading while still in use
font_cache[descriptor] = font
# Hold onto refs of last three loaded fonts to prevent them being
# collected if momentarily dropped.
del font_hold[3:]
font_hold.insert(0, font)
return font
def add_file(font):
"""Add a font to pyglet's search path.
In order to load a font that is not installed on the system, you must
call this method to tell pyglet that it exists. You can supply
either a filename or any file-like object.
The font format is platform-dependent, but is typically a TrueType font
file containing a single font face. Note that to use a font added with this method,
you should pass the face name (not the file name) to :meth::py:func:`pyglet.font.load` or any
other place where you normally specify a font.
:Parameters:
`font` : str or file-like object
Filename or file-like object to load fonts from.
"""
if isinstance(font, basestring):
font = open(font, 'rb')
if hasattr(font, 'read'):
font = font.read()
_font_class.add_font_data(font)
def add_directory(dir):
"""Add a directory of fonts to pyglet's search path.
This function simply calls :meth:`pyglet.font.add_file` for each file with a ``.ttf``
extension in the given directory. Subdirectories are not searched.
:Parameters:
`dir` : str
Directory that contains font files.
"""
for file in os.listdir(dir):
if file[-4:].lower() == '.ttf':
add_file(os.path.join(dir, file))
from .text import Text
__all__ = ('Text', 'add_file', 'add_directory', 'load', 'have_font')
|
|
# -*- coding: utf-8
""" Classes for loading digital elevation models as numeric grids """
import numexpr
import numpy as np
import os
import subprocess
import sys
import matplotlib
import matplotlib.pyplot as plt
from copy import copy
from osgeo import gdal, gdalconst
from rasterio.fill import fillnodata
from scarplet.utils import BoundingBox
sys.setrecursionlimit(10000)
FLOAT32_MIN = np.finfo(np.float32).min
GDAL_DRIVER_NAME = 'GTiff'
class CalculationMixin(object):
"""Mix-in class for grid calculations"""
def __init__(self):
pass
def _calculate_slope(self):
"""Calculate gradient of grid in x and y directions.
Pads boundary so as to return slope grids of same size as object's
grid data
Returns
-------
slope_x : numpy array
slope in x direction
slope_y : numpy array
slope in y direction
"""
dx = self._georef_info.dx
dy = self._georef_info.dy
PAD_DX = 2
PAD_DY = 2
self._pad_boundary(PAD_DX, PAD_DY)
z_pad = self._griddata
slope_x = (z_pad[1:-1, 2:] - z_pad[1:-1, :-2]) / (2 * dx)
slope_y = (z_pad[2:, 1:-1] - z_pad[:-2, 1:-1]) / (2 * dy)
return slope_x, slope_y
def _calculate_laplacian(self):
"""Calculate curvature of grid in y direction.
"""
return self._calculate_directional_laplacian(0)
def _calculate_directional_laplacian(self, alpha):
"""Calculate curvature of grid in arbitrary direction.
Parameters
----------
alpha : float
direction angle (azimuth) in radians. 0 is north or y-axis.
Returns
-------
del2s : numpy array
grid of curvature values
"""
dx = self._georef_info.dx
dy = self._georef_info.dy
z = self._griddata
nan_idx = np.isnan(z)
z[nan_idx] = 0
dz_dx = np.diff(z, 1, 1)/dx
d2z_dxdy = np.diff(dz_dx, 1, 0)/dx
pad_x = np.zeros((d2z_dxdy.shape[0], 1))
d2z_dxdy = np.hstack([pad_x, d2z_dxdy])
pad_y = np.zeros((1, d2z_dxdy.shape[1]))
d2z_dxdy = np.vstack([pad_y, d2z_dxdy])
d2z_dx2 = np.diff(z, 2, 1)/dx**2
pad_x = np.zeros((d2z_dx2.shape[0], 1))
d2z_dx2 = np.hstack([pad_x, d2z_dx2, pad_x])
d2z_dy2 = np.diff(z, 2, 0)/dy**2
pad_y = np.zeros((1, d2z_dy2.shape[1]))
d2z_dy2 = np.vstack([pad_y, d2z_dy2, pad_y])
del2z = d2z_dx2 * np.cos(alpha) ** 2 - 2 * d2z_dxdy * np.sin(alpha) \
* np.cos(alpha) + d2z_dy2 * np.sin(alpha) ** 2
del2z[nan_idx] = np.nan
return del2z
def _calculate_directional_laplacian_numexpr(self, alpha):
"""Calculate curvature of grid in arbitrary direction.
Optimized with numexpr expressions.
Parameters
----------
alpha : float
direction angle (azimuth) in radians. 0 is north or y-axis.
Returns
-------
del2s : numpyarray
grid of curvature values
"""
dx = self._georef_info.dx
dy = self._georef_info.dy
z = self._griddata
nan_idx = np.isnan(z)
z[nan_idx] = 0
dz_dx = np.diff(z, 1, 1)/dx
d2z_dxdy = np.diff(dz_dx, 1, 0)/dx
pad_x = np.zeros((d2z_dxdy.shape[0], 1))
d2z_dxdy = np.hstack([pad_x, d2z_dxdy])
pad_y = np.zeros((1, d2z_dxdy.shape[1]))
d2z_dxdy = np.vstack([pad_y, d2z_dxdy])
d2z_dx2 = np.diff(z, 2, 1)/dx**2
pad_x = np.zeros((d2z_dx2.shape[0], 1))
d2z_dx2 = np.hstack([pad_x, d2z_dx2, pad_x])
d2z_dy2 = np.diff(z, 2, 0)/dy**2
pad_y = np.zeros((1, d2z_dy2.shape[1]))
d2z_dy2 = np.vstack([pad_y, d2z_dy2, pad_y])
del2z = numexpr.evaluate("d2z_dx2*cos(alpha)**2 - \
2*d2z_dxdy*sin(alpha)*cos(alpha) + d2z_dy2*sin(alpha)**2")
del2z[nan_idx] = np.nan
return del2z
def _estimate_curvature_noiselevel(self):
"""Estimate noise level in curvature of grid as a function of direction.
Returns
-------
angles : numpy array
array of orientations (azimuths) in radians
mean : float
array of mean curvature in correponding direction
sd : float
array of curvature standard deviation in correponding direction
"""
from scipy import ndimage
angles = np.linspace(0, np.pi, num=180)
mean = []
sd = []
for alpha in angles:
del2z = self._calculate_directional_laplacian(alpha)
lowpass = ndimage.gaussian_filter(del2z, 100)
highpass = del2z - lowpass
mean.append(np.nanmean(highpass))
sd.append(np.nanstd(highpass))
return angles, mean, sd
def _pad_boundary(self, dx, dy):
"""Pad grid boundary with reflected boundary conditions.
"""
self._griddata = np.pad(self._griddata, pad_width=(dy, dx),
mode='reflect')
self.padded = True
self.pad_dx = dx
self.pad_dy = dy
ny, nx = self._griddata.shape
self._georef_info.nx = nx
self._georef_info.ny = ny
self._georef_info.xllcenter -= dx
self._georef_info.yllcenter -= dy
class GDALMixin(object):
pass
class GeorefInfo(object):
def __init__(self):
self.geo_transform = None
self.projection = None
self.xllcenter = None
self.yllcenter = None
self.dx = None
self.dy = None
self.nx = None
self.ny = None
self.ulx = None
self.uly = None
self.lrx = None
self.lry = None
class BaseSpatialGrid(GDALMixin):
"""Base class for spatial grid"""
dtype = gdalconst.GDT_Float32
def __init__(self, filename=None):
_georef_info = GeorefInfo()
if filename is not None:
self._georef_info = _georef_info
self.load(filename)
self.filename = filename
else:
self.filename = None
self._georef_info = _georef_info
self._griddata = np.empty((0, 0))
def is_contiguous(self, grid):
"""Returns true if grids are contiguous or overlap
Parameters
----------
grid : BaseSpatialGrid
"""
return self.bbox.intersects(grid.bbox)
def merge(self, grid):
"""Merge this grid with another BaseSpatialGrid.
Wrapper argound gdal_merge.py.
Parameters
----------
grid : BaseSpatialGrid
Returns
-------
merged_grid : BaseSpatialGrid
"""
if not self.is_contiguous(grid):
raise ValueError("ValueError: Grids are not contiguous")
# XXX: this is hacky, eventually implement as native GDAL
try:
command = ['gdal_merge.py', self.filename, grid.filename]
subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print("Failed to merge grids. gdal_merge may not be installed.")
raise e
merged_grid = BaseSpatialGrid('out.tif')
merged_grid._griddata[merged_grid._griddata == FLOAT32_MIN] = np.nan
os.remove('out.tif')
return merged_grid
def plot(self, **kwargs):
"""Plot grid data
Keyword args:
Any valid keyword argument for matplotlib.pyplot.imshow
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.imshow(self._griddata, **kwargs)
def save(self, filename):
"""Save grid as georeferenced TIFF
"""
ncols = self._georef_info.nx
nrows = self._georef_info.ny
driver = gdal.GetDriverByName(GDAL_DRIVER_NAME)
out_raster = driver.Create(filename, ncols, nrows, 1, self.dtype)
out_raster.SetGeoTransform(self._georef_info.geo_transform)
out_band = out_raster.GetRasterBand(1)
out_band.WriteArray(self._griddata)
proj = self._georef_info.projection.ExportToWkt()
out_raster.SetProjection(proj)
out_band.FlushCache()
def load(self, filename):
"""Load grid from file
"""
self.label = filename.split('/')[-1].split('.')[0]
gdal_dataset = gdal.Open(filename)
band = gdal_dataset.GetRasterBand(1)
nodata = band.GetNoDataValue()
self._griddata = band.ReadAsArray().astype(float)
if nodata is not None:
nodata_index = np.where(self._griddata == nodata)
if self.dtype is not np.uint8:
self._griddata[nodata_index] = np.nan
geo_transform = gdal_dataset.GetGeoTransform()
projection = gdal_dataset.GetProjection()
nx = gdal_dataset.RasterXSize
ny = gdal_dataset.RasterYSize
self._georef_info.geo_transform = geo_transform
self._georef_info.projection = projection
self._georef_info.dx = self._georef_info.geo_transform[1]
self._georef_info.dy = self._georef_info.geo_transform[5]
self._georef_info.nx = nx
self._georef_info.ny = ny
self._georef_info.xllcenter = self._georef_info.geo_transform[0] \
+ self._georef_info.dx
self._georef_info.yllcenter = self._georef_info.geo_transform[3] \
- (self._georef_info.ny+1) \
* np.abs(self._georef_info.dy)
self._georef_info.ulx = self._georef_info.geo_transform[0]
self._georef_info.uly = self._georef_info.geo_transform[3]
self._georef_info.lrx = self._georef_info.geo_transform[0] \
+ self._georef_info.dx * self._georef_info.nx
self._georef_info.lry = self._georef_info.geo_transform[3] \
+ self._georef_info.dy * self._georef_info.ny
self.bbox = BoundingBox((self._georef_info.lrx, self._georef_info.lry),
(self._georef_info.ulx, self._georef_info.uly))
class DEMGrid(CalculationMixin, BaseSpatialGrid):
"""Class representing grid of elevation values"""
def __init__(self, filename=None):
_georef_info = GeorefInfo()
if filename is not None:
self._georef_info = _georef_info
self.load(filename)
self._griddata[self._griddata == FLOAT32_MIN] = np.nan
self.nodata_value = np.nan
self.filename = filename
self.shape = self._griddata.shape
self.is_interpolated = False
else:
self.filename = None
self.label = ''
self.shape = (0, 0)
self._georef_info = _georef_info
self._griddata = np.empty((0, 0))
self.is_interpolated = False
def plot(self, color=True, **kwargs):
fig, ax = plt.subplots(1, 1, **kwargs)
hs = Hillshade(self)
hs.plot()
if color:
im = ax.imshow(self._griddata, alpha=0.75, cmap='terrain')
plt.colorbar(im, ax=ax, shrink=0.75, label='Elevation')
ax.tick_params(direction='in')
ax.set_xlabel('x')
ax.set_ylabel('y')
def _fill_nodata(self):
"""Fill nodata values in elevation grid by interpolation.
Wrapper around GDAL/rasterio's FillNoData, fillnodata methods
"""
if ~np.isnan(self.nodata_value):
nodata_mask = self._griddata == self.nodata_value
else:
nodata_mask = np.isnan(self._griddata)
self.nodata_mask = nodata_mask
# XXX: GDAL (or rasterio) FillNoData takes mask with 0s at nodata
num_nodata = np.sum(nodata_mask)
prev_nodata = np.nan
while num_nodata > 0 or num_nodata == prev_nodata:
mask = np.isnan(self._griddata)
col_nodata = np.sum(mask, axis=0).max()
row_nodata = np.sum(mask, axis=1).max()
dist = max(row_nodata, col_nodata) / 2
self._griddata = fillnodata(self._griddata,
mask=~mask,
max_search_distance=dist)
prev_nodata = copy(num_nodata)
num_nodata = np.sum(np.isnan(self._griddata))
self.is_interpolated = True
def _fill_nodata_with_edge_values(self):
"""Fill nodata values using swath edge values by row,"""
if ~np.isnan(self.nodata_value):
nodata_mask = self._griddata == self.nodata_value
else:
nodata_mask = np.isnan(self._griddata)
self.nodata_mask = nodata_mask
for row in self._griddata:
idx = np.where(np.isnan(row)).min()
fill_value = row[idx]
row[np.isnan(row)] = fill_value
self.is_interpolated = True
class Hillshade(BaseSpatialGrid):
"""Class representing hillshade of DEM"""
def __init__(self, dem):
"""Load DEMGrid object as Hillshade
"""
self._georef_info = dem._georef_info
self._griddata = dem._griddata
self._hillshade = None
def plot(self, az=315, elev=45):
"""Plot hillshade
Paramaters
----------
az : float
azimuth of light source in degrees
elev : float
elevation angle of light source in degrees
"""
ax = plt.gca()
ls = matplotlib.colors.LightSource(azdeg=az, altdeg=elev)
self._hillshade = ls.hillshade(self._griddata, vert_exag=1,
dx=self._georef_info.dx,
dy=self._georef_info.dy)
ax.imshow(self._hillshade, alpha=1, cmap='gray', origin='lower')
|
|
#!/usr/bin/env python3
import cv2
import numpy as np
from time import sleep
from time import time
LowH=0
HighH=50
LowS=59
HighS=255
LowV=0
HighV=236
morph=11,3
'''LowH2=0
HighH2=0
LowS2=0
HighS2=19
LowV2=235
HighV2=255'''
morph=(11,3)
def imfill(img):
ret,im_flood = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
th,inrangeframe = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
im_flood[0:480,0:1]=np.zeros((480,1))
h, w = im_flood.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
#cv2.imshow("0 in corner",im_flood)
cv2.floodFill(im_flood, mask, (0,0), 255);
#cv2.imshow("filled",im_flood)
cv2.bitwise_not(im_flood,im_flood)
imfilled=cv2.bitwise_or(im_flood,inrangeframe)
#cv2.imshow("filledOR",inrangeframe)
return imfilled
def filter_2HSV(img):
kernel = np.ones((5,5),np.float32)/25
img = cv2.filter2D(img,-1,kernel)
# Change colorspace
hsvframe = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
return hsvframe
def open_and_close(img,size):
#opening
morphoimage=cv2.morphologyEx(img,cv2.MORPH_OPEN,cv2.getStructuringElement(cv2.MORPH_RECT, size))
#Closing
morphoimage=cv2.morphologyEx(morphoimage,cv2.MORPH_CLOSE,cv2.getStructuringElement(cv2.MORPH_RECT, size))
return morphoimage
def get_centers(img,Arearef=130):
#Apply contours to get the properties of the images
contourimage, contours, hierarchy = cv2.findContours(img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#matrix to draw the contours
img2=np.zeros((480,640,3))
img2 = cv2.drawContours(img2, contours, -1, (0,255,0), 3)
# Display the resulting frame
center_list=[]
closest_list=[]
for i in range(len(contours)):
if cv2.contourArea(contours[i])>Arearef:
(x,y),radius = cv2.minEnclosingCircle(contours[i])
center_list.append([int(x),int(y)])
contourfigure=contours[i]
#print (contourfigure.shape)
closecontour=np.argmax(contourfigure[:,:,1],axis=0)
#print("close contour:",closecontour,contourfigure[closecontour,0,:])
closest_list.append(contourfigure[closecontour,0,:])
return center_list,closest_list
def get_objective(center_list,closest_list=[]):
center_array=np.array(center_list)
center_array[:,0]=abs(center_array[:,0]-320)
index=np.argmin(center_array,axis=0)
objective_center=center_list[index[0]]
if len(closest_list)>0:
objective_closest=closest_list[index[0]]
else:
objective_closest=[]
return objective_center,objective_closest
def all_operations(frame):
LowH2=cv2.getTrackbarPos("LowH","frame")
HighH2=cv2.getTrackbarPos("HighH","frame")
LowS2=cv2.getTrackbarPos("LowS","frame")
HighS2=cv2.getTrackbarPos("HighS","frame")
LowV2=cv2.getTrackbarPos("LowV","frame")
HighV2=cv2.getTrackbarPos("HighV","frame")
hsvframe=filter_2HSV(frame)
lowerboundcolor=np.array([LowH,LowS,LowV])
upperboundcolor=np.array([HighH,HighS,HighV])
# Binarization
inrangeframe=cv2.inRange(hsvframe,lowerboundcolor,upperboundcolor)
cv2.imshow("Before morphology",inrangeframe)
#Morphologic operations
# Infill
inrangeframe=imfill(inrangeframe)
cv2.imshow("filledOR",inrangeframe)
#Opening and closing
morphoimg=open_and_close(inrangeframe,(11,3))
morphoimg=open_and_close(morphoimg,(3,11))
cv2.imshow("after morphology",morphoimg)
#Getting the centers
center_list,closest_list=get_centers(morphoimg,1)
#plotting
for i in range(len(center_list)):
#cv2.circle(frame,(center_list[i][0],center_list[i][1]),2,(255,255,255),thickness=2)
#
print(len(closest_list))
print (center_list)
#Draw the lines that determine the action space
cv2.line(frame,(280,0),(260,479),(255,0,0),2)
cv2.line(frame,(360,0),(380,479),(255,0,0),2)
cv2.line(frame,(220,0),(180,479),(0,0,255),2)
cv2.line(frame,(420,0),(460,479),(0,0,255),2)
if len(center_list)>0:
#check which center is more in the center
objective_center=get_objective(center_list)
cv2.circle(frame,(objective_center[0],objective_center[1]),3,(255,0,0),thickness=2)
else:
objective_center=[]
return objective_center
def detection(frame,LowH,HighH,LowS,HighS,LowV,HighV,sizemorph,Arearef=10):
hsvframe=filter_2HSV(frame)
lowerboundcolor=np.array([LowH,LowS,LowV])
upperboundcolor=np.array([HighH,HighS,HighV])
# Binarization
inrangeframe=cv2.inRange(hsvframe,lowerboundcolor,upperboundcolor)
cv2.imshow("Before morphology",inrangeframe)
#Morphologic operations
# Infill
inrangeframe=imfill(inrangeframe)
#cv2.imshow("filledOR",inrangeframe)
#Opening and closing
morphoimg=open_and_close(inrangeframe,sizemorph)
sizemorph2=tuple(reversed(sizemorph))
morphoimg=open_and_close(morphoimg,sizemorph2)
#Getting the centers
center_list,closest_list=get_centers(morphoimg,1)
closest_list=np.array(closest_list)
#print(closest_list.shape)
if len(closest_list.shape)>2:
closest_list=np.squeeze(closest_list,axis=1)
#print("After squeezing.",closest_list.shape)
cv2.imshow("morpho image",morphoimg)
#plotting
#print(closest_list[0,0])
for i in range(len(center_list)):
cv2.circle(frame,(center_list[i][0],center_list[i][1]),2,(255,255,255),thickness=2)
cv2.circle(frame,(closest_list[i][0],closest_list[i][1]),2,(0,0,255),thickness=2)
#print (center_list)
#Draw the lines that determine the action space
#cv2.line(frame,(280,0),(260,479),(255,0,0),2)
#cv2.line(frame,(360,0),(380,479),(255,0,0),2)
#cv2.line(frame,(220,0),(180,479),(0,0,255),2)
#cv2.line(frame,(420,0),(460,479),(0,0,255),2)
if len(center_list)>0:
#check which center is more in the center
objective_center,objective_closest=get_objective(center_list,closest_list)
if len(set(sizemorph))==1:
cv2.circle(frame,(objective_center[0],objective_center[1]),3,(255,0,0),thickness=2)
cv2.circle(frame,(objective_closest[0],objective_closest[1]),4,(0,0,0),thickness=2)
else:
cv2.circle(frame,(objective_center[0],objective_center[1]),3,(0,0,255),thickness=2)
else:
objective_center=[]
objective_closest=[]
#return objective_center,objective_closest
return center_list
def init_trackbars():
cv2.namedWindow('frame')
cv2.createTrackbar("LowH", 'frame',LowH, 255,nothing);
cv2.createTrackbar("HighH", 'frame',HighH, 255,nothing);
cv2.createTrackbar("LowS", 'frame',LowS, 255,nothing);
cv2.createTrackbar("HighS", 'frame',HighS, 255,nothing);
cv2.createTrackbar("LowV", 'frame',LowV, 255,nothing);
cv2.createTrackbar("HighV", 'frame',HighV, 255,nothing);
return 0
def nothing(x):
pass
if __name__ == "__main__":
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
cap = cv2.VideoCapture(0)
#cap.set(cv2.CAP_PROP_BUFFERSIZE,100)
init_trackbars()
feature_params = dict( maxCorners = 50,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
#sleep(1)
LowH2=0
HighH2=50
LowS2=10
HighS2=1
LowV2=0
HighV2=237
ret, oldframe = cap.read()
black_frame_old=np.zeros((480,640,3),dtype=np.uint8)
old_list= detection(oldframe, LowH2, HighH2, LowS2,HighS2, LowV2, HighV2, (3, 9),1)
old_list=np.array(old_list)
for i in range(len(old_list)):
cv2.rectangle(black_frame_old,(old_list[i,:]),(old_list[i,0]+30,old_list[i,1]+50),color=[0,255,0])
old_gray = cv2.cvtColor(black_frame_old, cv2.COLOR_BGR2GRAY)
p0=cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
cv2.imshow("old_gray",old_gray)
cv2.waitKey()
while(True):
# Capture frame-by-frame
t0=time()
retard=0
LowH2=cv2.getTrackbarPos("LowH","frame")
HighH2=cv2.getTrackbarPos("HighH","frame")
LowS2=cv2.getTrackbarPos("LowS","frame")
HighS2=cv2.getTrackbarPos("HighS","frame")
LowV2=cv2.getTrackbarPos("LowV","frame")
HighV2=cv2.getTrackbarPos("HighV","frame")
ret, newframe = cap.read()
black_frame_new=np.zeros((480,640,3),dtype=np.uint8)
new_list= detection(newframe, LowH2, HighH2, LowS2,HighS2, LowV2, HighV2, (3, 9),1)
new_list=np.array(new_list)
for i in range(len(new_list)):
cv2.rectangle(black_frame_new,(new_list[i,0],new_list[i,1]),(new_list[i,0]+30,new_list[i,1]+50),color=(0,255,0))
new_gray = cv2.cvtColor(black_frame_new, cv2.COLOR_BGR2GRAY)
cv2.imshow("black",new_gray)
cv2.waitKey()
#p0=cv2.goodFeaturesToTrack(new_gray, mask = None, **feature_params)
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, new_gray, p0, None, **lk_params)
good_new = p1[st==1]
good_old = p0[st==1]
# Get the trackbar poses
p1, status, err = cv2.calcOpticalFlowPyrLK(oldframe, newframe, old_list, None, **lk_params)
old_grey_image=new_grey_image
p0 = good_new.reshape(-1,1,2)
if cv2.waitKey(1) & 0xFF == 27:
break
#sleep(1)
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
|
from cardboard import types
from cardboard.ability import (
AbilityNotImplemented, spell, activated, triggered, static
)
from cardboard.cards import card, common, keywords, match
@card("Storm Cauldron")
def storm_cauldron(card, abilities):
def storm_cauldron():
return AbilityNotImplemented
def storm_cauldron():
return AbilityNotImplemented
return storm_cauldron, storm_cauldron,
@card("Krovikan Horror")
def krovikan_horror(card, abilities):
def krovikan_horror():
return AbilityNotImplemented
def krovikan_horror():
return AbilityNotImplemented
return krovikan_horror, krovikan_horror,
@card("Kjeldoran Pride")
def kjeldoran_pride(card, abilities):
def kjeldoran_pride():
return AbilityNotImplemented
def kjeldoran_pride():
return AbilityNotImplemented
def kjeldoran_pride():
return AbilityNotImplemented
return kjeldoran_pride, kjeldoran_pride, kjeldoran_pride,
@card("Tidal Control")
def tidal_control(card, abilities):
def tidal_control():
return AbilityNotImplemented
def tidal_control():
return AbilityNotImplemented
return tidal_control, tidal_control,
@card("Gorilla Berserkers")
def gorilla_berserkers(card, abilities):
def gorilla_berserkers():
return AbilityNotImplemented
def gorilla_berserkers():
return AbilityNotImplemented
return gorilla_berserkers, gorilla_berserkers,
@card("Casting of Bones")
def casting_of_bones(card, abilities):
def casting_of_bones():
return AbilityNotImplemented
def casting_of_bones():
return AbilityNotImplemented
return casting_of_bones, casting_of_bones,
@card("Balduvian Horde")
def balduvian_horde(card, abilities):
def balduvian_horde():
return AbilityNotImplemented
return balduvian_horde,
@card("Fevered Strength")
def fevered_strength(card, abilities):
def fevered_strength():
return AbilityNotImplemented
def fevered_strength():
return AbilityNotImplemented
return fevered_strength, fevered_strength,
@card("Mystic Compass")
def mystic_compass(card, abilities):
def mystic_compass():
return AbilityNotImplemented
return mystic_compass,
@card("Yavimaya Ancients")
def yavimaya_ancients(card, abilities):
def yavimaya_ancients():
return AbilityNotImplemented
return yavimaya_ancients,
@card("Force of Will")
def force_of_will(card, abilities):
def force_of_will():
return AbilityNotImplemented
def force_of_will():
return AbilityNotImplemented
return force_of_will, force_of_will,
@card("Ritual of the Machine")
def ritual_of_the_machine(card, abilities):
def ritual_of_the_machine():
return AbilityNotImplemented
def ritual_of_the_machine():
return AbilityNotImplemented
return ritual_of_the_machine, ritual_of_the_machine,
@card("Balduvian Trading Post")
def balduvian_trading_post(card, abilities):
def balduvian_trading_post():
return AbilityNotImplemented
def balduvian_trading_post():
return AbilityNotImplemented
def balduvian_trading_post():
return AbilityNotImplemented
return balduvian_trading_post, balduvian_trading_post, balduvian_trading_post,
@card("Lodestone Bauble")
def lodestone_bauble(card, abilities):
def lodestone_bauble():
return AbilityNotImplemented
return lodestone_bauble,
@card("Swamp Mosquito")
def swamp_mosquito(card, abilities):
def swamp_mosquito():
return AbilityNotImplemented
def swamp_mosquito():
return AbilityNotImplemented
return swamp_mosquito, swamp_mosquito,
@card("Balduvian War-Makers")
def balduvian_warmakers(card, abilities):
def balduvian_warmakers():
return AbilityNotImplemented
def balduvian_warmakers():
return AbilityNotImplemented
return balduvian_warmakers, balduvian_warmakers,
@card("Phelddagrif")
def phelddagrif(card, abilities):
def phelddagrif():
return AbilityNotImplemented
def phelddagrif():
return AbilityNotImplemented
def phelddagrif():
return AbilityNotImplemented
return phelddagrif, phelddagrif, phelddagrif,
@card("Gorilla War Cry")
def gorilla_war_cry(card, abilities):
def gorilla_war_cry():
return AbilityNotImplemented
def gorilla_war_cry():
return AbilityNotImplemented
def gorilla_war_cry():
return AbilityNotImplemented
return gorilla_war_cry, gorilla_war_cry, gorilla_war_cry,
@card("Foresight")
def foresight(card, abilities):
def foresight():
return AbilityNotImplemented
def foresight():
return AbilityNotImplemented
return foresight, foresight,
@card("Phantasmal Sphere")
def phantasmal_sphere(card, abilities):
def phantasmal_sphere():
return AbilityNotImplemented
def phantasmal_sphere():
return AbilityNotImplemented
def phantasmal_sphere():
return AbilityNotImplemented
return phantasmal_sphere, phantasmal_sphere, phantasmal_sphere,
@card("Taste of Paradise")
def taste_of_paradise(card, abilities):
def taste_of_paradise():
return AbilityNotImplemented
def taste_of_paradise():
return AbilityNotImplemented
return taste_of_paradise, taste_of_paradise,
@card("Pillage")
def pillage(card, abilities):
def pillage():
return AbilityNotImplemented
return pillage,
@card("Varchild's Crusader")
def varchilds_crusader(card, abilities):
def varchilds_crusader():
return AbilityNotImplemented
return varchilds_crusader,
@card("Soldevi Steam Beast")
def soldevi_steam_beast(card, abilities):
def soldevi_steam_beast():
return AbilityNotImplemented
def soldevi_steam_beast():
return AbilityNotImplemented
return soldevi_steam_beast, soldevi_steam_beast,
@card("Nature's Wrath")
def natures_wrath(card, abilities):
def natures_wrath():
return AbilityNotImplemented
def natures_wrath():
return AbilityNotImplemented
def natures_wrath():
return AbilityNotImplemented
return natures_wrath, natures_wrath, natures_wrath,
@card("Lim-Dul's Vault")
def limduls_vault(card, abilities):
def limduls_vault():
return AbilityNotImplemented
return limduls_vault,
@card("Gift of the Woods")
def gift_of_the_woods(card, abilities):
def gift_of_the_woods():
return AbilityNotImplemented
def gift_of_the_woods():
return AbilityNotImplemented
return gift_of_the_woods, gift_of_the_woods,
@card("Astrolabe")
def astrolabe(card, abilities):
def astrolabe():
return AbilityNotImplemented
return astrolabe,
@card("Balduvian Dead")
def balduvian_dead(card, abilities):
def balduvian_dead():
return AbilityNotImplemented
return balduvian_dead,
@card("Nature's Chosen")
def natures_chosen(card, abilities):
def natures_chosen():
return AbilityNotImplemented
def natures_chosen():
return AbilityNotImplemented
def natures_chosen():
return AbilityNotImplemented
return natures_chosen, natures_chosen, natures_chosen,
@card("Reprisal")
def reprisal(card, abilities):
def reprisal():
return AbilityNotImplemented
return reprisal,
@card("Hail Storm")
def hail_storm(card, abilities):
def hail_storm():
return AbilityNotImplemented
return hail_storm,
@card("Exile")
def exile(card, abilities):
def exile():
return AbilityNotImplemented
return exile,
@card("Benthic Explorers")
def benthic_explorers(card, abilities):
def benthic_explorers():
return AbilityNotImplemented
return benthic_explorers,
@card("Enslaved Scout")
def enslaved_scout(card, abilities):
def enslaved_scout():
return AbilityNotImplemented
return enslaved_scout,
@card("Chaos Harlequin")
def chaos_harlequin(card, abilities):
def chaos_harlequin():
return AbilityNotImplemented
return chaos_harlequin,
@card("Kjeldoran Outpost")
def kjeldoran_outpost(card, abilities):
def kjeldoran_outpost():
return AbilityNotImplemented
def kjeldoran_outpost():
return AbilityNotImplemented
def kjeldoran_outpost():
return AbilityNotImplemented
return kjeldoran_outpost, kjeldoran_outpost, kjeldoran_outpost,
@card("Diseased Vermin")
def diseased_vermin(card, abilities):
def diseased_vermin():
return AbilityNotImplemented
def diseased_vermin():
return AbilityNotImplemented
return diseased_vermin, diseased_vermin,
@card("Gargantuan Gorilla")
def gargantuan_gorilla(card, abilities):
def gargantuan_gorilla():
return AbilityNotImplemented
def gargantuan_gorilla():
return AbilityNotImplemented
return gargantuan_gorilla, gargantuan_gorilla,
@card("Keeper of Tresserhorn")
def keeper_of_tresserhorn(card, abilities):
def keeper_of_tresserhorn():
return AbilityNotImplemented
return keeper_of_tresserhorn,
@card("School of the Unseen")
def school_of_the_unseen(card, abilities):
def school_of_the_unseen():
return AbilityNotImplemented
def school_of_the_unseen():
return AbilityNotImplemented
return school_of_the_unseen, school_of_the_unseen,
@card("Mishra's Groundbreaker")
def mishras_groundbreaker(card, abilities):
def mishras_groundbreaker():
return AbilityNotImplemented
return mishras_groundbreaker,
@card("Lat-Nam's Legacy")
def latnams_legacy(card, abilities):
def latnams_legacy():
return AbilityNotImplemented
return latnams_legacy,
@card("Thought Lash")
def thought_lash(card, abilities):
def thought_lash():
return AbilityNotImplemented
def thought_lash():
return AbilityNotImplemented
def thought_lash():
return AbilityNotImplemented
return thought_lash, thought_lash, thought_lash,
@card("Whip Vine")
def whip_vine(card, abilities):
def whip_vine():
return AbilityNotImplemented
def whip_vine():
return AbilityNotImplemented
def whip_vine():
return AbilityNotImplemented
return whip_vine, whip_vine, whip_vine,
@card("Lim-Dul's Paladin")
def limduls_paladin(card, abilities):
def limduls_paladin():
return AbilityNotImplemented
def limduls_paladin():
return AbilityNotImplemented
def limduls_paladin():
return AbilityNotImplemented
def limduls_paladin():
return AbilityNotImplemented
return limduls_paladin, limduls_paladin, limduls_paladin, limduls_paladin,
@card("Martyrdom")
def martyrdom(card, abilities):
def martyrdom():
return AbilityNotImplemented
return martyrdom,
@card("Splintering Wind")
def splintering_wind(card, abilities):
def splintering_wind():
return AbilityNotImplemented
return splintering_wind,
@card("Shield Sphere")
def shield_sphere(card, abilities):
def shield_sphere():
return AbilityNotImplemented
def shield_sphere():
return AbilityNotImplemented
return shield_sphere, shield_sphere,
@card("Errand of Duty")
def errand_of_duty(card, abilities):
def errand_of_duty():
return AbilityNotImplemented
return errand_of_duty,
@card("Rogue Skycaptain")
def rogue_skycaptain(card, abilities):
def rogue_skycaptain():
return AbilityNotImplemented
def rogue_skycaptain():
return AbilityNotImplemented
return rogue_skycaptain, rogue_skycaptain,
@card("Soldier of Fortune")
def soldier_of_fortune(card, abilities):
def soldier_of_fortune():
return AbilityNotImplemented
return soldier_of_fortune,
@card("Ivory Gargoyle")
def ivory_gargoyle(card, abilities):
def ivory_gargoyle():
return AbilityNotImplemented
def ivory_gargoyle():
return AbilityNotImplemented
def ivory_gargoyle():
return AbilityNotImplemented
return ivory_gargoyle, ivory_gargoyle, ivory_gargoyle,
@card("Gorilla Chieftain")
def gorilla_chieftain(card, abilities):
def gorilla_chieftain():
return AbilityNotImplemented
return gorilla_chieftain,
@card("Kjeldoran Escort")
def kjeldoran_escort(card, abilities):
def kjeldoran_escort():
return AbilityNotImplemented
return kjeldoran_escort,
@card("Lord of Tresserhorn")
def lord_of_tresserhorn(card, abilities):
def lord_of_tresserhorn():
return AbilityNotImplemented
def lord_of_tresserhorn():
return AbilityNotImplemented
return lord_of_tresserhorn, lord_of_tresserhorn,
@card("Gustha's Scepter")
def gusthas_scepter(card, abilities):
def gusthas_scepter():
return AbilityNotImplemented
def gusthas_scepter():
return AbilityNotImplemented
def gusthas_scepter():
return AbilityNotImplemented
return gusthas_scepter, gusthas_scepter, gusthas_scepter,
@card("Heart of Yavimaya")
def heart_of_yavimaya(card, abilities):
def heart_of_yavimaya():
return AbilityNotImplemented
def heart_of_yavimaya():
return AbilityNotImplemented
def heart_of_yavimaya():
return AbilityNotImplemented
return heart_of_yavimaya, heart_of_yavimaya, heart_of_yavimaya,
@card("Misfortune")
def misfortune(card, abilities):
def misfortune():
return AbilityNotImplemented
return misfortune,
@card("Feast or Famine")
def feast_or_famine(card, abilities):
def feast_or_famine():
return AbilityNotImplemented
return feast_or_famine,
@card("Royal Herbalist")
def royal_herbalist(card, abilities):
def royal_herbalist():
return AbilityNotImplemented
return royal_herbalist,
@card("Winter's Night")
def winters_night(card, abilities):
def winters_night():
return AbilityNotImplemented
return winters_night,
@card("Yavimaya Ants")
def yavimaya_ants(card, abilities):
def yavimaya_ants():
return AbilityNotImplemented
def yavimaya_ants():
return AbilityNotImplemented
return yavimaya_ants, yavimaya_ants,
@card("Storm Elemental")
def storm_elemental(card, abilities):
def storm_elemental():
return AbilityNotImplemented
def storm_elemental():
return AbilityNotImplemented
def storm_elemental():
return AbilityNotImplemented
return storm_elemental, storm_elemental, storm_elemental,
@card("Misinformation")
def misinformation(card, abilities):
def misinformation():
return AbilityNotImplemented
return misinformation,
@card("Insidious Bookworms")
def insidious_bookworms(card, abilities):
def insidious_bookworms():
return AbilityNotImplemented
return insidious_bookworms,
@card("Nature's Blessing")
def natures_blessing(card, abilities):
def natures_blessing():
return AbilityNotImplemented
return natures_blessing,
@card("Juniper Order Advocate")
def juniper_order_advocate(card, abilities):
def juniper_order_advocate():
return AbilityNotImplemented
return juniper_order_advocate,
@card("Lake of the Dead")
def lake_of_the_dead(card, abilities):
def lake_of_the_dead():
return AbilityNotImplemented
def lake_of_the_dead():
return AbilityNotImplemented
def lake_of_the_dead():
return AbilityNotImplemented
return lake_of_the_dead, lake_of_the_dead, lake_of_the_dead,
@card("Soldevi Excavations")
def soldevi_excavations(card, abilities):
def soldevi_excavations():
return AbilityNotImplemented
def soldevi_excavations():
return AbilityNotImplemented
def soldevi_excavations():
return AbilityNotImplemented
return soldevi_excavations, soldevi_excavations, soldevi_excavations,
@card("Fatal Lore")
def fatal_lore(card, abilities):
def fatal_lore():
return AbilityNotImplemented
return fatal_lore,
@card("Soldevi Adnate")
def soldevi_adnate(card, abilities):
def soldevi_adnate():
return AbilityNotImplemented
return soldevi_adnate,
@card("Awesome Presence")
def awesome_presence(card, abilities):
def awesome_presence():
return AbilityNotImplemented
def awesome_presence():
return AbilityNotImplemented
return awesome_presence, awesome_presence,
@card("Burnout")
def burnout(card, abilities):
def burnout():
return AbilityNotImplemented
def burnout():
return AbilityNotImplemented
return burnout, burnout,
@card("Soldevi Sentry")
def soldevi_sentry(card, abilities):
def soldevi_sentry():
return AbilityNotImplemented
return soldevi_sentry,
@card("Phyrexian Portal")
def phyrexian_portal(card, abilities):
def phyrexian_portal():
return AbilityNotImplemented
return phyrexian_portal,
@card("Carrier Pigeons")
def carrier_pigeons(card, abilities):
def carrier_pigeons():
return AbilityNotImplemented
def carrier_pigeons():
return AbilityNotImplemented
return carrier_pigeons, carrier_pigeons,
@card("Stromgald Spy")
def stromgald_spy(card, abilities):
def stromgald_spy():
return AbilityNotImplemented
return stromgald_spy,
@card("Kaysa")
def kaysa(card, abilities):
def kaysa():
return AbilityNotImplemented
return kaysa,
@card("Krovikan Plague")
def krovikan_plague(card, abilities):
def krovikan_plague():
return AbilityNotImplemented
def krovikan_plague():
return AbilityNotImplemented
def krovikan_plague():
return AbilityNotImplemented
return krovikan_plague, krovikan_plague, krovikan_plague,
@card("Storm Crow")
def storm_crow(card, abilities):
def storm_crow():
return AbilityNotImplemented
return storm_crow,
@card("Scars of the Veteran")
def scars_of_the_veteran(card, abilities):
def scars_of_the_veteran():
return AbilityNotImplemented
def scars_of_the_veteran():
return AbilityNotImplemented
return scars_of_the_veteran, scars_of_the_veteran,
@card("Agent of Stromgald")
def agent_of_stromgald(card, abilities):
def agent_of_stromgald():
return AbilityNotImplemented
return agent_of_stromgald,
@card("Phyrexian Devourer")
def phyrexian_devourer(card, abilities):
def phyrexian_devourer():
return AbilityNotImplemented
def phyrexian_devourer():
return AbilityNotImplemented
return phyrexian_devourer, phyrexian_devourer,
@card("Deadly Insect")
def deadly_insect(card, abilities):
def deadly_insect():
return AbilityNotImplemented
return deadly_insect,
@card("Viscerid Armor")
def viscerid_armor(card, abilities):
def viscerid_armor():
return AbilityNotImplemented
def viscerid_armor():
return AbilityNotImplemented
def viscerid_armor():
return AbilityNotImplemented
return viscerid_armor, viscerid_armor, viscerid_armor,
@card("Noble Steeds")
def noble_steeds(card, abilities):
def noble_steeds():
return AbilityNotImplemented
return noble_steeds,
@card("Phyrexian War Beast")
def phyrexian_war_beast(card, abilities):
def phyrexian_war_beast():
return AbilityNotImplemented
return phyrexian_war_beast,
@card("Bounty of the Hunt")
def bounty_of_the_hunt(card, abilities):
def bounty_of_the_hunt():
return AbilityNotImplemented
def bounty_of_the_hunt():
return AbilityNotImplemented
return bounty_of_the_hunt, bounty_of_the_hunt,
@card("Sol Grail")
def sol_grail(card, abilities):
def sol_grail():
return AbilityNotImplemented
def sol_grail():
return AbilityNotImplemented
return sol_grail, sol_grail,
@card("Phyrexian Boon")
def phyrexian_boon(card, abilities):
def phyrexian_boon():
return AbilityNotImplemented
def phyrexian_boon():
return AbilityNotImplemented
return phyrexian_boon, phyrexian_boon,
@card("Thawing Glaciers")
def thawing_glaciers(card, abilities):
def thawing_glaciers():
return AbilityNotImplemented
def thawing_glaciers():
return AbilityNotImplemented
return thawing_glaciers, thawing_glaciers,
@card("Death Spark")
def death_spark(card, abilities):
def death_spark():
return AbilityNotImplemented
def death_spark():
return AbilityNotImplemented
return death_spark, death_spark,
@card("Guerrilla Tactics")
def guerrilla_tactics(card, abilities):
def guerrilla_tactics():
return AbilityNotImplemented
def guerrilla_tactics():
return AbilityNotImplemented
return guerrilla_tactics, guerrilla_tactics,
@card("Phantasmal Fiend")
def phantasmal_fiend(card, abilities):
def phantasmal_fiend():
return AbilityNotImplemented
def phantasmal_fiend():
return AbilityNotImplemented
return phantasmal_fiend, phantasmal_fiend,
@card("Royal Decree")
def royal_decree(card, abilities):
def royal_decree():
return AbilityNotImplemented
def royal_decree():
return AbilityNotImplemented
return royal_decree, royal_decree,
@card("Helm of Obedience")
def helm_of_obedience(card, abilities):
def helm_of_obedience():
return AbilityNotImplemented
return helm_of_obedience,
@card("Lim-Dul's High Guard")
def limduls_high_guard(card, abilities):
def limduls_high_guard():
return AbilityNotImplemented
def limduls_high_guard():
return AbilityNotImplemented
return limduls_high_guard, limduls_high_guard,
@card("Varchild's War-Riders")
def varchilds_warriders(card, abilities):
def varchilds_warriders():
return AbilityNotImplemented
def varchilds_warriders():
return AbilityNotImplemented
return varchilds_warriders, varchilds_warriders,
@card("Scarab of the Unseen")
def scarab_of_the_unseen(card, abilities):
def scarab_of_the_unseen():
return AbilityNotImplemented
return scarab_of_the_unseen,
@card("Stench of Decay")
def stench_of_decay(card, abilities):
def stench_of_decay():
return AbilityNotImplemented
return stench_of_decay,
@card("Diminishing Returns")
def diminishing_returns(card, abilities):
def diminishing_returns():
return AbilityNotImplemented
return diminishing_returns,
@card("Suffocation")
def suffocation(card, abilities):
def suffocation():
return AbilityNotImplemented
def suffocation():
return AbilityNotImplemented
def suffocation():
return AbilityNotImplemented
return suffocation, suffocation, suffocation,
@card("Kjeldoran Home Guard")
def kjeldoran_home_guard(card, abilities):
def kjeldoran_home_guard():
return AbilityNotImplemented
return kjeldoran_home_guard,
@card("Seasoned Tactician")
def seasoned_tactician(card, abilities):
def seasoned_tactician():
return AbilityNotImplemented
return seasoned_tactician,
@card("Omen of Fire")
def omen_of_fire(card, abilities):
def omen_of_fire():
return AbilityNotImplemented
def omen_of_fire():
return AbilityNotImplemented
return omen_of_fire, omen_of_fire,
@card("Fyndhorn Druid")
def fyndhorn_druid(card, abilities):
def fyndhorn_druid():
return AbilityNotImplemented
return fyndhorn_druid,
@card("Storm Shaman")
def storm_shaman(card, abilities):
def storm_shaman():
return AbilityNotImplemented
return storm_shaman,
@card("Urza's Engine")
def urzas_engine(card, abilities):
def urzas_engine():
return AbilityNotImplemented
def urzas_engine():
return AbilityNotImplemented
def urzas_engine():
return AbilityNotImplemented
return urzas_engine, urzas_engine, urzas_engine,
@card("Bestial Fury")
def bestial_fury(card, abilities):
def bestial_fury():
return AbilityNotImplemented
def bestial_fury():
return AbilityNotImplemented
def bestial_fury():
return AbilityNotImplemented
return bestial_fury, bestial_fury, bestial_fury,
@card("Elvish Bard")
def elvish_bard(card, abilities):
def elvish_bard():
return AbilityNotImplemented
return elvish_bard,
@card("Undergrowth")
def undergrowth(card, abilities):
def undergrowth():
return AbilityNotImplemented
def undergrowth():
return AbilityNotImplemented
return undergrowth, undergrowth,
@card("Aesthir Glider")
def aesthir_glider(card, abilities):
def aesthir_glider():
return AbilityNotImplemented
def aesthir_glider():
return AbilityNotImplemented
return aesthir_glider, aesthir_glider,
@card("Ashnod's Cylix")
def ashnods_cylix(card, abilities):
def ashnods_cylix():
return AbilityNotImplemented
return ashnods_cylix,
@card("Pyrokinesis")
def pyrokinesis(card, abilities):
def pyrokinesis():
return AbilityNotImplemented
def pyrokinesis():
return AbilityNotImplemented
return pyrokinesis, pyrokinesis,
@card("Inheritance")
def inheritance(card, abilities):
def inheritance():
return AbilityNotImplemented
return inheritance,
@card("Library of Lat-Nam")
def library_of_latnam(card, abilities):
def library_of_latnam():
return AbilityNotImplemented
return library_of_latnam,
@card("Floodwater Dam")
def floodwater_dam(card, abilities):
def floodwater_dam():
return AbilityNotImplemented
return floodwater_dam,
@card("Soldevi Digger")
def soldevi_digger(card, abilities):
def soldevi_digger():
return AbilityNotImplemented
return soldevi_digger,
@card("Unlikely Alliance")
def unlikely_alliance(card, abilities):
def unlikely_alliance():
return AbilityNotImplemented
return unlikely_alliance,
@card("Sustaining Spirit")
def sustaining_spirit(card, abilities):
def sustaining_spirit():
return AbilityNotImplemented
def sustaining_spirit():
return AbilityNotImplemented
return sustaining_spirit, sustaining_spirit,
@card("Elvish Spirit Guide")
def elvish_spirit_guide(card, abilities):
def elvish_spirit_guide():
return AbilityNotImplemented
return elvish_spirit_guide,
@card("Gorilla Shaman")
def gorilla_shaman(card, abilities):
def gorilla_shaman():
return AbilityNotImplemented
return gorilla_shaman,
@card("Arcane Denial")
def arcane_denial(card, abilities):
def arcane_denial():
return AbilityNotImplemented
def arcane_denial():
return AbilityNotImplemented
return arcane_denial, arcane_denial,
@card("Energy Arc")
def energy_arc(card, abilities):
def energy_arc():
return AbilityNotImplemented
return energy_arc,
@card("Dystopia")
def dystopia(card, abilities):
def dystopia():
return AbilityNotImplemented
def dystopia():
return AbilityNotImplemented
return dystopia, dystopia,
@card("Viscerid Drone")
def viscerid_drone(card, abilities):
def viscerid_drone():
return AbilityNotImplemented
def viscerid_drone():
return AbilityNotImplemented
return viscerid_drone, viscerid_drone,
@card("Wild Aesthir")
def wild_aesthir(card, abilities):
def wild_aesthir():
return AbilityNotImplemented
def wild_aesthir():
return AbilityNotImplemented
return wild_aesthir, wild_aesthir,
@card("Wandering Mage")
def wandering_mage(card, abilities):
def wandering_mage():
return AbilityNotImplemented
def wandering_mage():
return AbilityNotImplemented
def wandering_mage():
return AbilityNotImplemented
return wandering_mage, wandering_mage, wandering_mage,
@card("Veteran's Voice")
def veterans_voice(card, abilities):
def veterans_voice():
return AbilityNotImplemented
def veterans_voice():
return AbilityNotImplemented
return veterans_voice, veterans_voice,
@card("Whirling Catapult")
def whirling_catapult(card, abilities):
def whirling_catapult():
return AbilityNotImplemented
return whirling_catapult,
@card("Sheltered Valley")
def sheltered_valley(card, abilities):
def sheltered_valley():
return AbilityNotImplemented
def sheltered_valley():
return AbilityNotImplemented
def sheltered_valley():
return AbilityNotImplemented
return sheltered_valley, sheltered_valley, sheltered_valley,
@card("Tornado")
def tornado(card, abilities):
def tornado():
return AbilityNotImplemented
def tornado():
return AbilityNotImplemented
return tornado, tornado,
@card("Contagion")
def contagion(card, abilities):
def contagion():
return AbilityNotImplemented
def contagion():
return AbilityNotImplemented
return contagion, contagion,
@card("Browse")
def browse(card, abilities):
def browse():
return AbilityNotImplemented
return browse,
@card("Soldevi Sage")
def soldevi_sage(card, abilities):
def soldevi_sage():
return AbilityNotImplemented
return soldevi_sage,
@card("Primitive Justice")
def primitive_justice(card, abilities):
def primitive_justice():
return AbilityNotImplemented
def primitive_justice():
return AbilityNotImplemented
return primitive_justice, primitive_justice,
@card("False Demise")
def false_demise(card, abilities):
def false_demise():
return AbilityNotImplemented
def false_demise():
return AbilityNotImplemented
return false_demise, false_demise,
@card("Reinforcements")
def reinforcements(card, abilities):
def reinforcements():
return AbilityNotImplemented
return reinforcements,
@card("Spiny Starfish")
def spiny_starfish(card, abilities):
def spiny_starfish():
return AbilityNotImplemented
def spiny_starfish():
return AbilityNotImplemented
return spiny_starfish, spiny_starfish,
@card("Soldevi Heretic")
def soldevi_heretic(card, abilities):
def soldevi_heretic():
return AbilityNotImplemented
return soldevi_heretic,
@card("Sworn Defender")
def sworn_defender(card, abilities):
def sworn_defender():
return AbilityNotImplemented
return sworn_defender,
@card("Surge of Strength")
def surge_of_strength(card, abilities):
def surge_of_strength():
return AbilityNotImplemented
def surge_of_strength():
return AbilityNotImplemented
return surge_of_strength, surge_of_strength,
|
|
"""
The PyGame-powered screen
"""
import pygame
import time
import threading
from .message import Message
## Some default configuration for the screen
from spotted_wall.server.utils import lazy_property, Counter
SCREEN_PADDING = 40
MESSAGES_PADDING = 40
FONT_SIZE = 40
FRAME_RATE = 60
SHOW_FPS = True
DEBUG = True
class SpottedWallScreen(object):
"""
The main Screen application, to be run in its own thread.
Note: here we need to handle all the thread safety part
when manipulating the list of messages and stuff.. beware!
"""
def __init__(self,
initial_size=(1280, 1024),
fullscreen=False,
show_fps=True,
show_clock=True,
rpc_server_address=None,
enable_web_ui=False,
web_ui_address=None,
messages_font_size=FONT_SIZE):
## Container for the messages
self.messages = {}
## threading.Lock() for access to the messages list
self._msgs_access_lock = threading.Lock()
## Counter yielding message ids. Just call .next() to get one.
self.message_id = Counter()
## Initialize pygame
pygame.init()
## The clock, used to calculate FPS etc.
self.clock = pygame.time.Clock()
## Prepare screen resolution sizes
self._window_res = initial_size
self._fullscreen_res = max(pygame.display.list_modes())
## Set the desired fullscreen mode
self._set_video_mode(fullscreen=fullscreen)
## Set window title
pygame.display.set_caption("Spotted Wall (main window)")
## Some extra configuration options
self._messages_font_size = messages_font_size
self.show_fps = show_fps
self.show_clock = True
def list_screen_resolutions(self):
"""
List the available video modes
"""
return pygame.display.list_modes()
@property
def size(self):
return self.screen.get_size()
@property
def width(self):
return self.size[0]
@property
def height(self):
return self.size[1]
def run(self):
"""Start threads and main loop"""
self._set_video_mode(self.size, self._fullscreen)
self._main_loop()
def _set_video_mode(self, resolution=None, fullscreen=False):
"""
Change the current video mode.
:param resolution:
The new resolution to set, or None for autodiscover.
If no resolution is specified, the last one for window/fullscreen
will be used. If no fullscreen resolution is set, the largest
available will be autoselect.
:param fullscreen:
Whether to go fullscreen or not.
"""
_screen_flags = pygame.DOUBLEBUF | pygame.RESIZABLE
self._fullscreen = fullscreen
if resolution is None:
resolution = self._fullscreen_res \
if fullscreen else self._window_res
if fullscreen:
_screen_flags |= pygame.FULLSCREEN
self._fullscreen_res = resolution
else:
self._window_res = resolution
self.screen = pygame.display.set_mode(resolution, _screen_flags)
def toggle_fullscreen(self):
self._set_video_mode(fullscreen=not self._fullscreen)
def _check_events(self):
"""Process all the new pygame events"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
pass # todo: use some kind of event to signal we want to quit?
#self._quit()
if event.type == pygame.KEYDOWN:
if event.key in (pygame.K_f, pygame.K_F11):
self.toggle_fullscreen()
# elif event.key == pygame.K_q:
# pass
elif event.type == pygame.VIDEORESIZE:
self._set_video_mode(event.size, self._fullscreen)
def _cleanup_messages(self):
"""Cleanup the expired messages from queue"""
with self._msgs_access_lock:
for k in self.messages.keys(): # *NOT* iterkeys() !!
if self.messages[k].is_expired():
del self.messages[k]
def _draw_messages(self):
with self._msgs_access_lock:
_filled_space = SCREEN_PADDING
messages_iterator = iter(sorted(self.messages.iteritems()))
_shown_messages = 0
while True: # Loop until we finish space or messages..
try:
message_id, message = messages_iterator.next()
except StopIteration:
break # no more messages..
message_width = self.width - (2 * SCREEN_PADDING)
req_space = \
_filled_space + message.get_height(message_width) \
+ SCREEN_PADDING
## We make sure we draw at least one message no matter its
## length, to avoid jamming up the queue..
if (req_space > self.height) and (_shown_messages > 0):
message.pause()
for message_id, message in messages_iterator:
message.pause()
break # no more space..
message.resume() # make sure it's not paused..
rendered = message.render(message_width)
_shown_messages += 1
if isinstance(rendered, (float, int)):
_filled_space += int(rendered *
(message.height + MESSAGES_PADDING))
else:
self.screen.blit(rendered, (SCREEN_PADDING, _filled_space))
_filled_space += rendered.get_height() + MESSAGES_PADDING
def _draw_fps(self):
if self.show_fps:
fps = self.clock.get_fps()
fpslabel = self.service_font.render(
str(int(fps)), True, (255, 255, 255))
rec = fpslabel.get_rect(top=5, right=self.width - 5)
self.screen.blit(fpslabel, rec)
def _draw_clock(self):
if self.show_clock:
clock_time = time.strftime('%T')
clock_label = self.service_font.render(
clock_time, True, (255, 255, 255))
rec = clock_label.get_rect(
bottom=self.height - 5, centerx=self.width/2)
self.screen.blit(clock_label, rec)
def _main_loop(self):
"""Application main loop"""
while 1:
self._check_events()
self._cleanup_messages()
self.screen.fill((0, 0, 0))
self._draw_messages()
self._draw_fps()
self._draw_clock()
pygame.display.flip()
self.clock.tick(FRAME_RATE)
@lazy_property
def service_font(self):
return pygame.font.SysFont('monospace', 16)
@lazy_property
def messages_font(self):
return pygame.font.SysFont('monospace', self._messages_font_size)
##--------------------------------------------------------------------------
## Public interface
##--------------------------------------------------------------------------
def add_message(self, text, color=None, duration=None):
"""Add a message to the wall"""
with self._msgs_access_lock:
print "Added message: {} {} {}".format(text, color, duration)
message = Message(text, font=self.messages_font, color=color)
if duration is not None:
message.max_show_time = duration
else:
message.max_show_time = 10 + int(len(text) * .1)
message_id = self.message_id.next()
self.messages[message_id] = message
return message_id
def list_messages(self):
"""List all the messages"""
with self._msgs_access_lock:
for message_id, message in self.messages.iteritems():
msg = message.to_dict()
msg['id'] = message_id
yield msg
def get_message(self, message_id):
"""Get the contents of a given message, by id"""
with self._msgs_access_lock:
message = self.messages[message_id]
msg = message.to_dict()
msg['id'] = message_id
return msg
def delete_message(self, message_id, immediate=False):
"""Delete or fade out a message"""
with self._msgs_access_lock:
if immediate:
del self.messages[message_id]
else:
self.messages[message_id].fadeOut()
def hide_message(self, message_id):
"""Delete a specific message"""
self.delete_message(message_id, immediate=False)
def edit_message(self, message_id, values):
"""Update the selected message"""
self.messages[message_id].update(values)
def flush_messages(self):
"""Empty the list of messages"""
with self._msgs_access_lock:
self.messages[:] = []
class SpottedWallScreenThread(threading.Thread):
daemon = True
parent = None
def __init__(self, parent):
self.parent = parent
super(SpottedWallScreenThread, self).__init__()
self.screen = SpottedWallScreen()
def run(self):
self.screen.run()
|
|
import sys,string
import os
def makeTestFile():
all_data = [['name','harold','bob','frank','sally','kim','jim'],
['a','0','0','1','2','0','5'],['b','0','0','1','2','0','5'],
['c','0','0','1','2','0','5'],['d','0','0','1','2','0','5']]
input_file = 'test.txt'
export_object = open(input_file,'w')
for i in all_data:
export_object.write(string.join(i,'\t')+'\n')
export_object.close()
return input_file
def filterFile(input_file,output_file,filter_names):
export_object = open(output_file,'w')
firstLine = True
for line in open(input_file,'rU').xreadlines():
data = cleanUpLine(line)
if '.csv' in input_file:
values = string.split(data,',')
else:
values = string.split(data,'\t')
if firstLine:
if data[0]!='#':
sample_index_list = map(lambda x: values.index(x), filter_names)
firstLine = False
header = values
try: filtered_values = map(lambda x: values[x], sample_index_list) ### simple and fast way to reorganize the samples
except Exception:
### For PSI files with missing values at the end of each line, often
if len(header) != len(values):
diff = len(header)-len(values)
values+=diff*['']
filtered_values = map(lambda x: values[x], sample_index_list) ### simple and fast way to reorganize the samples
#print values[0]; print sample_index_list; print values; print len(values); print len(prior_values);kill
prior_values=values
export_object.write(string.join([values[0]]+filtered_values,'\t')+'\n')
export_object.close()
print 'Filtered columns printed to:',output_file
def filterRows(input_file,output_file,filterDB=None):
export_object = open(output_file,'w')
firstLine = True
for line in open(input_file,'rU').xreadlines():
data = cleanUpLine(line)
values = string.split(data,'\t')
if firstLine:
firstLine = False
export_object.write(line)
else:
if filterDB!=None:
if values[0] in filterDB:
export_object.write(line)
else:
max_val = max(map(float,values[1:]))
#min_val = min(map(float,values[1:]))
#if max_val>5:
if max_val < 0.1:
export_object.write(line)
export_object.close()
print 'Filtered rows printed to:',output_file
def getFilters(filter_file):
filter_list=[]
for line in open(filter_file,'rU').xreadlines():
data = cleanUpLine(line)
sample = string.split(data,'\t')[0]
filter_list.append(sample)
return filter_list
"""" Filter a dataset based on number of genes with expression above the indicated threshold"""
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def statisticallyFilterFile(input_file,output_file,threshold):
if 'exp.' in input_file:
counts_file = string.replace(input_file,'exp.','geneCount.')
else:
counts_file = input_file[:-4]+'-geneCount.txt'
sample_expressed_genes={}
header=True
junction_max=[]
count_sum_array=[]
for line in open(input_file,'rU').xreadlines():
data = cleanUpLine(line)
if '.csv' in input_file:
t = string.split(data,',')
else:
t = string.split(data,'\t')
if header:
samples = t[1:]
header=False
count_sum_array=[0]*len(samples)
else:
try: values = map(float,t[1:])
except Exception:
if 'NA' in t[1:]:
tn = [0 if x=='NA' else x for x in t[1:]] ### Replace NAs
values = map(float,tn)
else:
tn = [0 if x=='' else x for x in t[1:]] ### Replace NAs
values = map(float,tn)
binarized_values = []
for v in values:
if v>threshold: binarized_values.append(1)
else: binarized_values.append(0)
count_sum_array = [sum(value) for value in zip(*[count_sum_array,binarized_values])]
index=0
distribution=[]
count_sum_array_db={}
samples_to_retain =[]
samples_to_exclude = []
for sample in samples:
count_sum_array_db[sample] = count_sum_array[index]
distribution.append(count_sum_array[index])
index+=1
import statistics
distribution.sort()
avg = int(statistics.avg(distribution))
stdev = int(statistics.stdev(distribution))
min_exp = int(min(distribution))
cutoff = avg - (stdev*2)
dev = 2
print 'The average number of genes expressed above %s is %s, (SD is %s, min is %s)' % (threshold,avg,stdev,min_exp)
if cutoff<0:
if (stdev-avg)>0:
cutoff = avg - (stdev/2); dev = 0.5
else:
cutoff = avg - stdev; dev = 1
if min_exp>cutoff:
cutoff = avg - stdev; dev = 1
import export
eo = export.ExportFile(counts_file)
eo.write('Sample\tGenes Expressed(threshold:'+str(threshold)+')\n')
for sample in samples: ### keep the original order
if count_sum_array_db[sample]>cutoff:
samples_to_retain.append(sample)
else:
samples_to_exclude.append(sample)
eo.write(sample+'\t'+str(count_sum_array_db[sample])+'\n')
eo.close()
print len(samples_to_exclude), 'samples removed (# exp. genes, < %d SD away) (%s)' % (dev,string.join(samples_to_exclude,', '))
print 'Exporting the filtered expression file to:'
print output_file
filterFile(input_file,output_file,samples_to_retain)
def combineDropSeq(input_dir):
import unique
files = unique.read_directory(input_dir)
combinedGeneExpression={}
for input_file in files: #:70895507-70895600
header=True
if '.txt' in input_file:
for line in open(input_dir+'/'+input_file,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if header:
header_row = line
samples = t[1:]
header=False
else:
values = map(float,t[1:])
gene = t[0]
if gene in combinedGeneExpression:
prior_values = combinedGeneExpression[gene]
count_sum_array = [sum(value) for value in zip(*[prior_values,values])]
else:
count_sum_array = values
combinedGeneExpression[gene] = count_sum_array
input_file = input_dir+'/test.txt'
export_object = open(input_file,'w')
export_object.write(string.join(['UID']+samples,'\t')+'\n')
for gene in combinedGeneExpression:
values = string.join(map(str,[gene]+combinedGeneExpression[gene]),'\t')
export_object.write(values+'\n')
export_object.close()
if __name__ == '__main__':
################ Comand-line arguments ################
#statisticallyFilterFile('/Users/saljh8/Desktop/dataAnalysis/Driscoll/R3/ExpressionInput/exp.2000_run1708B_normalized.txt','/Users/saljh8/Desktop/dataAnalysis/Driscoll/R3/ExpressionInput/exp.2000_run1708B_normalized2.txt',1)
import getopt
filter_rows=False
filter_file=None
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
filter_names = ['bob','sally','jim']
input_file = makeTestFile()
#Filtering samples in a datasets
#python SampleSelect.py --i /Users/saljh8/Desktop/C4-hESC/ExpressionInput/exp.C4.txt --f /Users/saljh8/Desktop/C4-hESC/ExpressionInput/groups.C4.txt
else:
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','f=','r='])
#print sys.argv[1:]
for opt, arg in options:
if opt == '--i': input_file=arg
elif opt == '--f': filter_file=arg
elif opt == '--r': filter_rows=True
output_file = input_file[:-4]+'-filtered.txt'
if filter_file ==None:
combineDropSeq(input_file)
elif filter_rows:
filterRows(input_file,output_file)
else:
filter_names = getFilters(filter_file)
filterFile(input_file,output_file,filter_names)
|
|
#!/usr/bin/env python
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from tests import testlib
import unittest
import splunklib.client as client
from splunklib.client import AuthenticationError
from splunklib.client import Service
from splunklib.binding import HTTPError
class ServiceTestCase(testlib.SDKTestCase):
def test_autologin(self):
service = client.connect(autologin=True, **self.opts.kwargs)
self.service.restart(timeout=120)
reader = service.jobs.oneshot("search index=internal | head 1")
self.assertIsNotNone(reader)
def test_capabilities(self):
capabilities = self.service.capabilities
self.assertTrue(isinstance(capabilities, list))
self.assertTrue(all([isinstance(c, str) for c in capabilities]))
self.assertTrue('change_own_password' in capabilities) # This should always be there...
def test_info(self):
info = self.service.info
keys = ["build", "cpu_arch", "guid", "isFree", "isTrial", "licenseKeys",
"licenseSignature", "licenseState", "master_guid", "mode",
"os_build", "os_name", "os_version", "serverName", "version"]
for key in keys:
self.assertTrue(key in list(info.keys()))
def test_info_with_namespace(self):
# Make sure we're not accessing /servicesNS/admin/search/server/info
# instead of /services/server/info
# Backup the values, which are probably set to None
owner, app = self.service.namespace["owner"], self.service.namespace["app"]
self.service.namespace["owner"] = self.service.username
self.service.namespace["app"] = "search"
try:
self.assertEqual(self.service.info.licenseState, 'OK')
except HTTPError as he:
self.fail("Couldn't get the server info, probably got a 403! %s" % he.message)
self.service.namespace["owner"] = owner
self.service.namespace["app"] = app
def test_without_namespace(self):
service = client.connect(**self.opts.kwargs)
service.apps.list()
def test_app_namespace(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "search", 'owner': None})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_owner_wildcard(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({ 'app': "search", 'owner': "-" })
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_default_app(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({ 'app': None, 'owner': "admin" })
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_app_wildcard(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({ 'app': "-", 'owner': "admin" })
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_user_namespace(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({ 'app': "search", 'owner': "admin" })
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_parse(self):
# At the moment the parse method returns the raw XML. At
# some point this will change and it will return a nice,
# objectified form of the results, but for now there's
# nothing to test but a good response code.
response = self.service.parse('search * abc="def" | dedup abc')
self.assertEqual(response.status, 200)
def test_parse_fail(self):
try:
self.service.parse("xyzzy")
self.fail('Parse on nonsense did not fail')
except HTTPError as e:
self.assertEqual(e.status, 400)
def test_restart(self):
service = client.connect(**self.opts.kwargs)
self.service.restart(timeout=300)
service.login() # Make sure we are awake
def test_read_outputs_with_type(self):
name = testlib.tmpname()
service = client.connect(**self.opts.kwargs)
service.post('data/outputs/tcp/syslog', name=name, type='tcp')
entity = client.Entity(service, 'data/outputs/tcp/syslog/' + name)
self.assertTrue('tcp', entity.content.type)
if service.restart_required:
self.restartSplunk()
service = client.connect(**self.opts.kwargs)
client.Entity(service, 'data/outputs/tcp/syslog/' + name).delete()
if service.restart_required:
self.restartSplunk()
def test_splunk_version(self):
service = client.connect(**self.opts.kwargs)
v = service.splunk_version
self.assertTrue(isinstance(v, tuple))
self.assertTrue(len(v) >= 2)
for p in v:
self.assertTrue(isinstance(p, int) and p >= 0)
for version in [(4,3,3), (5,), (5,0,1)]:
with self.fake_splunk_version(version):
self.assertEqual(version, self.service.splunk_version)
def test_query_without_login_raises_auth_error(self):
service = self._create_unauthenticated_service()
self.assertRaises(AuthenticationError, lambda: service.indexes.list())
# This behavior is needed for backward compatibility for code
# prior to the introduction of AuthenticationError
def test_query_without_login_raises_http_401(self):
service = self._create_unauthenticated_service()
try:
service.indexes.list()
self.fail('Expected HTTP 401.')
except HTTPError as he:
if he.status == 401:
# Good
pass
else:
raise
def _create_unauthenticated_service(self):
return Service(**{
'host': self.opts.kwargs['host'],
'port': self.opts.kwargs['port'],
'scheme': self.opts.kwargs['scheme']
})
class TestCookieAuthentication(unittest.TestCase):
def setUp(self):
self.opts = testlib.parse([], {}, ".splunkrc")
self.service = client.Service(**self.opts.kwargs)
if getattr(unittest.TestCase, 'assertIsNotNone', None) is None:
def assertIsNotNone(self, obj, msg=None):
if obj is None:
raise self.failureException(msg or '%r is not None' % obj)
def test_login_and_store_cookie(self):
self.assertIsNotNone(self.service.get_cookies())
self.assertEqual(len(self.service.get_cookies()), 0)
self.service.login()
self.assertIsNotNone(self.service.get_cookies())
self.assertNotEquals(self.service.get_cookies(), {})
self.assertEqual(len(self.service.get_cookies()), 1)
def test_login_with_cookie(self):
self.service.login()
self.assertIsNotNone(self.service.get_cookies())
# Use the cookie from the other service as the only auth param (don't need user/password)
service2 = client.Service(**{"cookie": "%s=%s" % list(self.service.get_cookies().items())[0]})
service2.login()
self.assertEqual(len(service2.get_cookies()), 1)
self.assertEqual(service2.get_cookies(), self.service.get_cookies())
self.assertEqual(len(service2.get_cookies()), len(self.service.get_cookies()))
self.assertEqual(list(service2.get_cookies().keys())[0][:8], "splunkd_")
self.assertEqual(service2.apps.get().status, 200)
def test_login_fails_with_bad_cookie(self):
bad_cookie = {'bad': 'cookie'}
service2 = client.Service()
self.assertEqual(len(service2.get_cookies()), 0)
service2.get_cookies().update(bad_cookie)
service2.login()
self.assertEqual(service2.get_cookies(), {'bad': 'cookie'})
# Should get an error with a bad cookie
try:
service2.apps.get()
self.fail()
except AuthenticationError as ae:
self.assertEqual(str(ae), "Request failed: Session is not logged in.")
def test_autologin_with_cookie(self):
self.service.login()
self.assertTrue(self.service.has_cookies())
service = client.connect(
autologin=True,
cookie="%s=%s" % list(self.service.get_cookies().items())[0],
**self.opts.kwargs)
self.assertTrue(service.has_cookies())
self.service.restart(timeout=120)
reader = service.jobs.oneshot("search index=internal | head 1")
self.assertIsNotNone(reader)
def test_login_fails_with_no_cookie(self):
service2 = client.Service()
self.assertEqual(len(service2.get_cookies()), 0)
# Should get an error when no authentication method
try:
service2.login()
self.fail()
except AuthenticationError as ae:
self.assertEqual(str(ae), "Login failed.")
def test_login_with_multiple_cookie_headers(self):
cookies = {
'bad': 'cookie',
'something_else': 'bad'
}
self.service.logout()
self.service.get_cookies().update(cookies)
self.service.login()
self.assertEqual(self.service.apps.get().status, 200)
def test_login_with_multiple_cookies(self):
bad_cookie = 'bad=cookie'
self.service.login()
self.assertIsNotNone(self.service.get_cookies())
service2 = client.Service(**{"cookie": bad_cookie})
service2.login()
# Should get an error with a bad cookie
try:
service2.apps.get()
self.fail()
except AuthenticationError as ae:
self.assertEqual(str(ae), "Request failed: Session is not logged in.")
# Add on valid cookies, and try to use all of them
service2.get_cookies().update(self.service.get_cookies())
self.assertEqual(len(service2.get_cookies()), 2)
self.service.get_cookies().update({'bad': 'cookie'})
self.assertEqual(service2.get_cookies(), self.service.get_cookies())
self.assertEqual(len(service2.get_cookies()), 2)
self.assertTrue([cookie for cookie in service2.get_cookies() if "splunkd_" in cookie])
self.assertTrue('bad' in service2.get_cookies())
self.assertEqual(service2.get_cookies()['bad'], 'cookie')
self.assertEqual(set(self.service.get_cookies()), set(service2.get_cookies()))
service2.login()
self.assertEqual(service2.apps.get().status, 200)
class TestSettings(testlib.SDKTestCase):
def test_read_settings(self):
settings = self.service.settings
# Verify that settings contains the keys we expect
keys = [
"SPLUNK_DB", "SPLUNK_HOME", "enableSplunkWebSSL", "host",
"httpport", "mgmtHostPort", "minFreeSpace", "pass4SymmKey",
"serverName", "sessionTimeout", "startwebserver", "trustedIP"
]
for key in keys:
self.assertTrue(key in settings)
def test_update_settings(self):
settings = self.service.settings
# Verify that we can update the settings
original = settings['sessionTimeout']
self.assertTrue(original != "42h")
settings.update(sessionTimeout="42h")
settings.refresh()
updated = settings['sessionTimeout']
self.assertEqual(updated, "42h")
# Restore (and verify) original value
settings.update(sessionTimeout=original)
settings.refresh()
updated = settings['sessionTimeout']
self.assertEqual(updated, original)
self.restartSplunk()
class TestTrailing(unittest.TestCase):
template = '/servicesNS/boris/search/another/path/segment/that runs on'
def test_raises_when_not_found_first(self):
self.assertRaises(ValueError, client._trailing, 'this is a test', 'boris')
def test_raises_when_not_found_second(self):
self.assertRaises(ValueError, client._trailing, 'this is a test', 's is', 'boris')
def test_no_args_is_identity(self):
self.assertEqual(self.template, client._trailing(self.template))
def test_trailing_with_one_arg_works(self):
self.assertEqual('boris/search/another/path/segment/that runs on', client._trailing(self.template, 'ervicesNS/'))
def test_trailing_with_n_args_works(self):
self.assertEqual(
'another/path/segment/that runs on',
client._trailing(self.template, 'servicesNS/', '/', '/')
)
class TestEntityNamespacing(testlib.SDKTestCase):
def test_proper_namespace_with_arguments(self):
entity = self.service.apps['search']
self.assertEqual((None,None,"global"), entity._proper_namespace(sharing="global"))
self.assertEqual((None,"search","app"), entity._proper_namespace(sharing="app", app="search"))
self.assertEqual(
("admin", "search", "user"),
entity._proper_namespace(sharing="user", app="search", owner="admin")
)
def test_proper_namespace_with_entity_namespace(self):
entity = self.service.apps['search']
namespace = (entity.access.owner, entity.access.app, entity.access.sharing)
self.assertEqual(namespace, entity._proper_namespace())
def test_proper_namespace_with_service_namespace(self):
entity = client.Entity(self.service, client.PATH_APPS + "search")
del entity._state['access']
namespace = (self.service.namespace.owner,
self.service.namespace.app,
self.service.namespace.sharing)
self.assertEqual(namespace, entity._proper_namespace())
if __name__ == "__main__":
try:
import unittest2 as unittest
except ImportError:
import unittest
unittest.main()
|
|
# N-Dimensional Tic-Tac-Toe by Thomas Lively
from __future__ import division
import curses, curses.ascii, sys
# logical representation of the n-dimensional board as a single list
class Model(object):
def __init__(self, dimensions=2, size=0, players=2):
if size < 3:
size = dimensions+1
self.dimensions = dimensions
self.size = size
if self.size < 3:
self.size = 3
self.players = players
if self.players < 2 or self.players > 9:
self.players = 2
self.board = [0 for i in xrange(size**dimensions)]
self.current_player = 1
self.game_over = False
self.tied_game = False
self.moves = 0
# makes the next player the active player
def nextTurn(self):
self.current_player += 1
if self.current_player > self.players:
self.current_player = 1
return self.current_player
def playAtCoordinate(self, coord):
self.validateCoord(coord)
self.playAtIndex(self.getIndexFromCoord(coord))
# puts the current player's number into this index of the array then check game over
def playAtIndex(self, index):
self.validateIndex(index)
if self.board[index] != 0:
raise IllegalMoveError(index)
return
self.board[index] = self.current_player
seqs = self.getSequencesFromIndex(index)
for seq in seqs:
n = 0
for coord in seq:
if self.board[self.getIndexFromCoord(coord)] == self.current_player:
n += 1
if n == self.size:
self.game_over = True
break
self.moves += 1
if self.moves == self.size ** self.dimensions:
self.tied_game = True
self.game_over = True
def getIndexFromCoord(self, coord):
self.validateCoord(coord)
index = 0
for i in xrange(len(coord)-1,-1,-1):
index += coord[i]*(self.size**i)
return index
def getCoordFromIndex(self, index):
self.validateIndex(index)
coord_list = []
for i in xrange(self.dimensions):
nd = self.size**(self.dimensions-1-i)
coord_list.append(index//nd)
index %= nd
coord_list.reverse()
return tuple(coord_list)
def getSequencesFromIndex(self, index):
return self.getSequencesFromCoord(self.getCoordFromIndex(index))
# returns all the possible winning sequences containing this coordinate set
def getSequencesFromCoord(self, coord):
# from a set of indices, return a subset with elements indicated by the ones in
# bin_rep
def getIndexSet(indices, bin_rep):
iset = []
for i in xrange(len(indices)):
if bin_rep[i] == u"1":
iset.append(indices[i])
return iset
# given a set of indices that should be varied, return the n versions of coord
def getVariedSequences(varying_indices):
returned_sequences = []
for i in xrange(self.size):
new_coord = list(coord)
for index in varying_indices:
if coord[index] < self.size//2:
new_coord[index] = i
else:
new_coord[index] = self.size-i-1
returned_sequences.append(new_coord)
return returned_sequences
# given a set of indices that should be varied and a binary representation of
# the direction in which they should vary, return the n versions of coord
def getMidVariedSequences(varying_indices, vary_dir):
returned_sequences = []
for i in xrange(self.size):
new_coord = list(coord)
for j in xrange(len(varying_indices)):
if vary_dir[j] == u"1":
new_coord[varying_indices[j]] = i
else:
new_coord[varying_indices[j]] = self.size-i-1
returned_sequences.append(new_coord)
return returned_sequences
self.validateCoord(coord)
returned_sequences = []
# for values up to half if evenly sized, up to middle-1 if oddly sized
for x in xrange(self.size//2+1):
x2 = self.size-x-1
all_indices = []
for index in xrange(len(coord)):
if coord[index] == x or coord[index] == x2:
all_indices.append(index)
for i in xrange(1, 2 ** len(all_indices)):
bin_rep = bin(i)[2:]
while len(bin_rep) < len(all_indices):
bin_rep = u"0" + bin_rep
iset = getIndexSet(all_indices, bin_rep)
if x != x2:
returned_sequences.append(getVariedSequences(iset))
else:
for j in xrange(2 ** (len(iset)-1)):
dir_vary = bin(j)[2:]
while len(dir_vary) < len(iset):
dir_vary = u"0" + dir_vary
mid_sequences = getMidVariedSequences(iset, dir_vary)
returned_sequences.append(mid_sequences)
return returned_sequences
def validateIndex(self, index):
if index < 0 or index >= len(self.board):
raise ValueError(u"Invalid index")
def validateCoord(self, coord):
if len(coord) != self.dimensions:
raise ValueError(u"Coordinate needs " + unicode(self.dimensions) + u" dimensions")
return
for i in coord:
if i >= self.size or i < 0:
raise ValueError(u"0 <= coordinate < " + unicode(self.size))
return
# xy pairs from high order to low order to model coordinates
def XYCoordToCoord(self, xy):
coord = []
start = 0
if self.dimensions % 2 == 1:
start = 1
for i in xrange(start+1, len(xy), 2):
coord.insert(0, xy[i])
if start == 1:
coord.insert(0, xy[0])
for i in xrange(start, len(xy), 2):
coord.insert(0, xy[i])
return tuple(coord)
class IllegalMoveError(Exception):
def __init__(self, index):
self.index = index
def __str__(self):
return u"Illegal move at index " + unicode(self.index)
# A view for the model. Other views might use Curses or a graphics library
class PlainTextView():
def __init__(self, model):
self.model = model
self.create()
# returns the divider that goes between board units of the d-th horizontal order
def getHorizontalDivider(self, d):
if d < 0: return
if d == 0: return [u"|"]
if d == 1: return [u" "]
div = [u" ", u" "]
for i in xrange(d-1):
div.insert(1, u"|")
return div
# returns the divider that goes between board units of the d-th vertical order
def getVerticalDivider(self, d):
if d < 0: return
if d == 0: return [u"-"]
if d == 1: return [u" "]
div = [u" ", u" "]
for i in xrange(d-1):
div.insert(1, u"-")
return div
# recursively create the board as a matrix of characters
def createMatrix(self, d):
if d < 0: return
if d == 0: return [[u"X"]]
sub_block = self.createMatrix(d-1)
returned = []
if d % 2 == 1:
divider = self.getHorizontalDivider(d // 2)
for row in sub_block:
new_row = []
for char in row:
new_row.append(char)
for i in xrange(self.model.size - 1):
for char in divider:
new_row.append(char)
for char in row:
new_row.append(char)
returned.append(new_row)
return returned
if d % 2 == 0:
divider = self.getVerticalDivider(d // 2 - 1)
for row in sub_block:
new_row = []
for char in row:
new_row.append(char)
returned.append(new_row)
for i in xrange (self.model.size - 1):
for char in divider:
new_row = []
for j in xrange(len(sub_block[0])):
new_row.append(char)
returned.append(new_row)
for row in sub_block:
new_row = []
for char in row:
new_row.append(char)
returned.append(new_row)
return returned
# use the matrix of characters that make up the board to create maps from the
# representation's indices to the models and vice versa, and create an str
def create(self):
matrix = self.createMatrix(self.model.dimensions)
self.str_rep = u""
for row in matrix:
for char in row:
self.str_rep += char
self.str_rep += u"\n"
#print(str_rep)
self.model_to_view = dict()
self.view_to_model = dict()
model_index = 0
for i in xrange(len(self.str_rep)):
if self.str_rep[i] == u"X":
self.str_rep = self.str_rep.replace(u"X", u" ", 1)
self.model_to_view[model_index] = i
self.view_to_model[i] = model_index
model_index += 1
# given char from model, return char for display
def getDisplayChar(self, c):
if c == 0: return u" "
if self.model.players == 2:
if c == 1: return u"X"
if c == 2: return u"O"
return unicode(c)
# must be called to update the view when the state of index i in the model changes
def update(self, i):
index = self.model_to_view[i]
char = self.getDisplayChar(self.model.board[i])
self.str_rep = self.str_rep[:index] + char + self.str_rep[index+1:]
def __str__(self):
return self.str_rep
# serves as a "Main" class and controls user interface with model and view
class TextGameController():
def __init__(self):
dimensions = int(raw_input(u"dimensions: "))
size = int(raw_input(u"size: "))
players = int(raw_input(u"players: "))
print u"creating model..."
self.board = Model(dimensions, size, players)
print u"creating view..."
self.view = PlainTextView(self.board)
while True:
print
print self.view
print
player = u"Player " + unicode(self.board.current_player)
coord = self.makeMove(player + u": ")
self.view.update(self.board.getIndexFromCoord(coord))
if self.board.game_over:
if self.board.tied_game:
print u"It's a tie :("
break
print self.view
print
print player + u" wins!"
break
self.board.nextTurn()
# transform user input to model coordinates
# and coordinates through necessary checks, repeating if necessary
def makeMove(self, prompt):
coord = None
while True:
try:
raw_in = eval(u"(" + raw_input(prompt) + u")")
coord = self.board.XYCoordToCoord(raw_in)
print coord
except Exception, e:
print u"Unrecognizable input"
continue
try:
self.board.validateCoord(coord)
except Exception, e:
print e
continue
try:
self.board.playAtCoordinate(coord)
break
except Exception, e:
print u"Illegal move!"
continue
return coord
class CursesController(object):
def main(self, stdscr):
model = self.model
view = self.view
def alert():
curses.beep()
curses.flash()
uneven = model.dimensions % 2 != 0
locked_coords = []
selected_x = model.size // 2
selected_y = 0
if not (len(locked_coords) == 0 and uneven):
selected_y = model.size // 2
def getEnclosingRectangle(coord):
extension = xrange(model.dimensions - len(coord))
min_xycoord = coord[:]
min_xycoord.extend([0 for i in extension])
min_coord = model.XYCoordToCoord(min_xycoord)
max_xycoord = coord[:]
max_xycoord.extend([model.size-1 for i in extension])
max_coord = model.XYCoordToCoord(max_xycoord)
min_index = view.model_to_view[model.getIndexFromCoord(min_coord)]
min_index = min_index - unicode(view).count(u"\n",0, min_index)
max_index = view.model_to_view[model.getIndexFromCoord(max_coord)]
max_index = max_index - unicode(view).count(u"\n",0, max_index)
length = unicode(view).find(u"\n")
min_x = min_index % length
min_y = min_index // length
max_x = max_index % length
max_y = max_index // length
return (min_y,min_x,max_y,max_x)
def getPlayerColor(p):
colors = {1:4,2:1,3:2,4:3,5:5,6:6,7:7,8:5,9:7}
return int(colors[((p-1)%9)+1])
curses.curs_set(0)
win = curses.newpad(unicode(view).count(u"\n")+1, unicode(view).find(u"\n")+1)
for i in xrange(1,8):
curses.init_pair(i,i,0)
history = []
initialized = False
while not model.game_over:
stdscr.clear()
# Title Box Outline
stdscr.addch(0,0,curses.ACS_ULCORNER)
stdscr.hline(0,1,curses.ACS_HLINE,curses.COLS-2)
stdscr.addch(0,curses.COLS-1,curses.ACS_URCORNER)
stdscr.vline(1,0,curses.ACS_VLINE,3)
stdscr.vline(1,curses.COLS-1,curses.ACS_VLINE,3)
panel_width = model.dimensions * 2 + 11
# Board Area Outline
stdscr.addch(4,0,curses.ACS_ULCORNER)
stdscr.hline(4,1,curses.ACS_HLINE,curses.COLS-panel_width-1)
stdscr.addch(curses.LINES-1,0,curses.ACS_LLCORNER)
stdscr.hline(curses.LINES-1,1,curses.ACS_HLINE,curses.COLS-panel_width-1)
stdscr.vline(5,0,curses.ACS_VLINE,curses.LINES-6)
# Top Panel Box Outline
stdscr.addch(4,curses.COLS-panel_width,curses.ACS_ULCORNER)
stdscr.hline(4,curses.COLS-panel_width+1,curses.ACS_HLINE,panel_width-2)
stdscr.addch(4,curses.COLS-1,curses.ACS_URCORNER)
stdscr.vline(5,curses.COLS-panel_width,curses.ACS_VLINE,4)
stdscr.vline(5,curses.COLS-1,curses.ACS_VLINE,4)
stdscr.addch(9,curses.COLS-panel_width,curses.ACS_LLCORNER)
stdscr.addch(9,curses.COLS-1,curses.ACS_LRCORNER)
stdscr.hline(9,curses.COLS-panel_width+1,curses.ACS_HLINE,panel_width-2)
# Bottom Panel OUTLINE
stdscr.vline(10,curses.COLS-panel_width,curses.ACS_VLINE,curses.LINES-11)
stdscr.vline(10,curses.COLS-1,curses.ACS_VLINE,curses.LINES-11)
stdscr.addch(curses.LINES-1,curses.COLS-panel_width,curses.ACS_LLCORNER)
stdscr.hline(curses.LINES-1,curses.COLS-panel_width+1,
curses.ACS_HLINE,panel_width-2)
try:stdscr.addch(curses.LINES-1,curses.COLS-1,curses.ACS_LRCORNER)
except:pass
title = u"N-Dimensional Tic-Tac-Toe ({0}^{1})"\
.format(model.size,model.dimensions)
stdscr.addstr(2, curses.COLS//2 - len(title)//2, title)
# Get input
key = None
curses.flushinp()
if initialized:
key = win.getch()
else:
initialized = True
if key == ord(u"w"):
if selected_y == 0 or len(locked_coords) == 0 and uneven:
alert()
else:
selected_y -= 1
if key == ord(u"s"):
if selected_y == model.size-1 or len(locked_coords) == 0 and uneven:
alert()
else:
selected_y += 1
if key == ord(u"a"):
if selected_x == 0:
alert()
else:
selected_x -= 1
if key == ord(u"d"):
if selected_x == model.size-1:
alert()
else:
selected_x += 1
if key == ord(u"\n"):
locked_coords.append(selected_x)
if not (len(locked_coords) == 1 and uneven):
locked_coords.append(selected_y)
selected_x = model.size // 2
selected_y = 0
if not (len(locked_coords) == 0 and uneven):
selected_y = model.size // 2
if len(locked_coords) == model.dimensions:
try:
coord = model.XYCoordToCoord(locked_coords)
model.playAtCoordinate(coord)
view.update(model.getIndexFromCoord(coord))
history.insert(0, (model.current_player, locked_coords[:]))
del locked_coords[:]
selected_x = model.size // 2
selected_y = 0
if not (len(locked_coords) == 0 and uneven):
selected_y = model.size // 2
if not model.game_over:
model.nextTurn()
except Exception:
key = curses.ascii.ESC
if key == curses.ascii.ESC:
if len(locked_coords) == 0:
alert()
else:
selected_y = locked_coords[-1]
del locked_coords[-1]
if not (len(locked_coords) == 0):
selected_x = locked_coords[-1]
del locked_coords[-1]
else:
selected_x = selected_y
selected_y = 0
# Draw info box contents
info_line = u"Player {0}".format(model.current_player)
stdscr.addstr(6, int(curses.COLS-(panel_width + len(info_line))/2),
info_line,
curses.color_pair(
getPlayerColor(
model.current_player)))
info_coord = locked_coords[:]
info_coord.append(selected_x)
if not (len(locked_coords) == 0 and uneven):
info_coord.append(selected_y)
info_line = unicode(info_coord)[1:-1].replace(u" ", u"")
stdscr.addstr(7, int(curses.COLS-(panel_width + len(info_line))/2),
info_line,
curses.color_pair(
getPlayerColor(
model.current_player)))
# Draw move history
for i, move in enumerate(history):
if 10 + i == curses.LINES -1:
break
p, loc = move
loc = unicode(loc)[1:-1].replace(u" ", u"")
stdscr.addstr(10+i, curses.COLS-panel_width+1,
u"Player {0}: {1}".format(p, loc),
curses.color_pair(getPlayerColor(p)))
# Draw board
win.addstr(0,0, unicode(view))
# Highlight selected area
coord = locked_coords[:]
coord.append(selected_x)
if not (len(locked_coords) == 0 and uneven):
coord.append(selected_y)
min_y,min_x,max_y,max_x = getEnclosingRectangle(coord)
for y in xrange(min_y, max_y+1):
win.chgat(y, min_x, max_x + 1 - min_x,
curses.A_REVERSE |
curses.color_pair(getPlayerColor(model.current_player)))
# Highlight past moves
for p, loc in history:
rect = getEnclosingRectangle(loc)
current = win.inch(rect[0], rect[1])
if current == current | curses.A_REVERSE:
win.chgat(rect[0], rect[1], 1, curses.color_pair(getPlayerColor(p)))
else:
win.chgat(rect[0], rect[1], 1,
curses.color_pair(getPlayerColor(p)) | curses.A_REVERSE)
# Calculate area of board to display
pminrow = 0
pmincol = 0
pheight = unicode(view).count(u"\n")-1
pwidth = unicode(view).find(u"\n")-1
sminrow = 5
smincol = 1
smaxrow = curses.LINES-2
smaxcol = curses.COLS-panel_width-1
sheight = smaxrow - sminrow
swidth = smaxcol - smincol
if pheight <= sheight:
dif = sheight - pheight
sminrow += dif // 2
else:
pminrow1 = min_y - sheight * min_y / pheight
pminrow2 = sheight/pheight*(pheight-max_y) + max_y - sheight
dif1 = min_y
dif2 = pheight - max_y
if not (dif1 == 0 and dif2 == 0):
pminrow = int((pminrow1 * dif2 + pminrow2 * dif1) / (dif1 + dif2)+.5)
else:
dif = sheight - pheight
sminrow += dif // 2
if pwidth <= swidth:
dif = swidth - pwidth
smincol += dif // 2
else:
pmincol1 = min_x - swidth * min_x / pwidth
pmincol2 = swidth/pwidth*(pwidth-max_x) + max_x - swidth
dif1 = min_x
dif2 = pwidth - max_x
if not (dif1 == 0 and dif2 == 0):
pmincol = int((pmincol1 * dif2 + pmincol2 * dif1) / (dif1 + dif2)+.5)
else:
dif = swidth - pwidth
smincol += dif // 2
# Refresh the display
stdscr.refresh()
win.refresh(pminrow, pmincol, sminrow, smincol, smaxrow, smaxcol)
stdscr.clear()
win.clear()
if not model.tied_game:
player = model.current_player
message = u"PLAYER {0} WINS!".format(player)
stdscr.addstr(curses.LINES//2, int((curses.COLS - len(message))/2+.5), message,
curses.A_BLINK | curses.A_REVERSE | curses.color_pair(getPlayerColor(player)))
else:
message = u"IT'S A TIE :("
stdscr.addstr(curses.LINES//2, int((curses.COLS - len(message))/2+.5), message,
curses.A_BLINK | curses.A_REVERSE)
stdscr.getch()
def __init__(self, model):
self.model = model
self.view = PlainTextView(self.model)
curses.wrapper(self.main)
# run the game if run as a script
if __name__ == u"__main__":
#TextGameController()
args = [int(i) for i in sys.argv[1:]]
if args:
CursesController(Model(*args))
else:
CursesController(Model(4))
|
|
'''
Created on Feb 4, 2012
@author: mchrzanowski
'''
import os.path
from time import time
cardNumberToArrayDict = {}
arrayToCardNumberDict = {}
suitToArrayDict = {}
arrayToSuitDict = {}
evaluationDict = {}
def doesContainRoyalFlush(hand):
for suit in suitToArrayDict:
if hand[cardNumberToArrayDict['T']][suitToArrayDict[suit]] == 1 and \
hand[cardNumberToArrayDict['J']][suitToArrayDict[suit]] == 1 and \
hand[cardNumberToArrayDict['Q']][suitToArrayDict[suit]] == 1 and \
hand[cardNumberToArrayDict['K']][suitToArrayDict[suit]] == 1 and \
hand[cardNumberToArrayDict['A']][suitToArrayDict[suit]] == 1:
return True
return False
def getHighestCardInHand(hand, ignoreSpecificCardNumbers = False):
''' ignore whatever values are in the passed list '''
for i in xrange(len(arrayToCardNumberDict) - 1, -1, -1):
for j in xrange(len(suitToArrayDict)):
if hand[i][j] == 1:
if ignoreSpecificCardNumbers is not False and arrayToCardNumberDict[i] in ignoreSpecificCardNumbers:
continue
return arrayToCardNumberDict[i]
def doesContainTwoPairs(hand):
''' check for one pair and then check for another pair by specifically asking
the function to ignore the first number. Return the answer as a sorted list
or False. '''
firstPair = doesContainOnePair(hand)
if firstPair is not False:
secondPair = doesContainOnePair(hand, False, firstPair)
if secondPair is not False:
pair = [firstPair, secondPair]
pair.sort()
pair.reverse()
return pair
return False
def doesContainStraightFlush(hand):
return doesContainStraight(hand, True)
def doesContainStraight(hand, mustBeInSameSuit = False):
currentNumberInHand = -1
iterations = 0
suit = -1
for i in xrange(len(arrayToCardNumberDict)):
for j in xrange(len(suitToArrayDict)):
if hand[i][j] == 1:
if iterations == 0:
currentNumberInHand = i
iterations = 1
suit = j
elif currentNumberInHand + 1 == i:
if mustBeInSameSuit and suit != j:
return False
currentNumberInHand = i
iterations += 1
else:
return False
if iterations == 5:
return True
return False
def doesContainOnePair(hand, onlyTwo = False, ignoreSpecificCardNumber = False):
''' Several switches here:
onlyTwo : when turned on, this will force the method to return a card value whose frequency is EXACTLY 2.
ignoreSpecificCardNumber: when on, the method will completely ignore a given card number '''
for i in xrange(len(arrayToCardNumberDict) - 1, -1, -1):
if ignoreSpecificCardNumber is not False and arrayToCardNumberDict[i] in ignoreSpecificCardNumber:
continue
number = 0
for j in xrange(len(suitToArrayDict)):
if hand[i][j] == 1:
number += 1
if (not onlyTwo and number >= 2) or (onlyTwo and number == 2) :
return arrayToCardNumberDict[i]
return False
def doesContainThreeOfAKind(hand, onlyThree = False):
for i in xrange(len(arrayToCardNumberDict) - 1, -1, -1):
number = 0
for j in xrange(len(suitToArrayDict)):
if hand[i][j] == 1:
number += 1
if (not onlyThree and number >= 3) or (onlyThree and number == 3):
return arrayToCardNumberDict[i]
return False
def doesContainFlush(hand):
for suit in suitToArrayDict:
numberOfCardsInSuit = 0
for number in xrange(len(cardNumberToArrayDict)):
if hand[number][suitToArrayDict[suit]] == 1:
numberOfCardsInSuit += 1
if numberOfCardsInSuit == 5:
return True
return False
def doesContainFourOfAKind(hand, onlyFour = False):
for i in xrange(len(arrayToCardNumberDict) - 1, -1, -1):
numberOfMatchingCards = 0
for j in xrange(len(suitToArrayDict)):
if hand[i][j] == 1:
numberOfMatchingCards += 1
if (not onlyFour and numberOfMatchingCards >= 4) or (onlyFour and numberOfMatchingCards == 4):
return arrayToCardNumberDict[i]
return False
def doesContainFullHouse(hand):
''' of 5 cards, if there is a three of a kind, then there can be only one pair.
therefore, as long as these two numbers are not identical, then we have a full house.
we enforce this check by specifically invoking the doesContainThreeOfAKind and doesContainOnePair methods
with a flag ignoring card frequencies greater than three and two. '''
threeOfAKindNumber = doesContainThreeOfAKind(hand, True)
onePairNumber = doesContainOnePair(hand, True)
if threeOfAKindNumber is not False and onePairNumber is not False:
return tuple([threeOfAKindNumber, onePairNumber])
return False
def setUpNumberDicts():
cardNumberToArrayDict['2'] = 0 ; arrayToCardNumberDict[0] = '2'
cardNumberToArrayDict['3'] = 1 ; arrayToCardNumberDict[1] = '3'
cardNumberToArrayDict['4'] = 2 ; arrayToCardNumberDict[2] = '4'
cardNumberToArrayDict['5'] = 3 ; arrayToCardNumberDict[3] = '5'
cardNumberToArrayDict['6'] = 4 ; arrayToCardNumberDict[4] = '6'
cardNumberToArrayDict['7'] = 5 ; arrayToCardNumberDict[5] = '7'
cardNumberToArrayDict['8'] = 6 ; arrayToCardNumberDict[6] = '8'
cardNumberToArrayDict['9'] = 7 ; arrayToCardNumberDict[7] = '9'
cardNumberToArrayDict['T'] = 8 ; arrayToCardNumberDict[8] = 'T'
cardNumberToArrayDict['J'] = 9 ; arrayToCardNumberDict[9] = 'J'
cardNumberToArrayDict['Q'] = 10 ; arrayToCardNumberDict[10] = 'Q'
cardNumberToArrayDict['K'] = 11 ; arrayToCardNumberDict[11] = 'K'
cardNumberToArrayDict['A'] = 12 ; arrayToCardNumberDict[12] = 'A'
def setUpSuitDicts():
suitToArrayDict['H'] = 0 ; arrayToSuitDict[0] = 'H'
suitToArrayDict['S'] = 1 ; arrayToSuitDict[1] = 'S'
suitToArrayDict['C'] = 2 ; arrayToSuitDict[2] = 'C'
suitToArrayDict['D'] = 3 ; arrayToSuitDict[3] = 'D'
def setUpEvaluationDict():
evaluationDict[9] = doesContainRoyalFlush
evaluationDict[8] = doesContainStraightFlush
evaluationDict[7] = doesContainFourOfAKind
evaluationDict[6] = doesContainFullHouse
evaluationDict[5] = doesContainFlush
evaluationDict[4] = doesContainStraight
evaluationDict[3] = doesContainThreeOfAKind
evaluationDict[2] = doesContainTwoPairs
evaluationDict[1] = doesContainOnePair
evaluationDict[0] = getHighestCardInHand
def evaluate(hand, resultDict):
''' evaluate the hand. store results in a dict.
key -> weight of hand
value -> actual result from method calls. '''
for i in xrange(len(evaluationDict)):
answer = evaluationDict[i](hand)
if answer is not False:
resultDict[i] = answer
def checkHighValueToTieBreak(firstHand, secondHand, ignoreList):
''' method to be used for tie breaking when two hands
have the same rank and are composed of the same cards.
In this scenario, we check the highest-valued card not part of the hand
until we find an unequal one. '''
firstNumber = secondNumber = 0
while firstNumber == secondNumber:
firstNumber = getHighestCardInHand(firstHand, ignoreList)
secondNumber = getHighestCardInHand(secondHand, ignoreList)
ignoreList.append(firstNumber)
if firstNumber > secondNumber:
return True
def doesFirstHandWin(firstDict, secondDict, firstHand, secondHand):
if max(firstDict) > max(secondDict):
return True
elif max(firstDict) == max(secondDict):
maxNumber = max(firstDict)
# doesContainTwoPairs is a special case as that's where you can have two pairs.
# so for that one, just check which result is greater
checkForTwoPairTest = lambda i: evaluationDict[i] == doesContainTwoPairs
if checkForTwoPairTest(maxNumber):
# is the first greater?
if cardNumberToArrayDict[firstDict[maxNumber][1]] > cardNumberToArrayDict[secondDict[maxNumber][1]]:
return True
# now try the second.
if firstDict[maxNumber][1] == secondDict[maxNumber][1] and \
cardNumberToArrayDict[firstDict[maxNumber][0]] > cardNumberToArrayDict[secondDict[maxNumber][0]]:
return True
# finally, use the highest card in the hand as a tie breaker
if firstDict[maxNumber] == secondDict[maxNumber]:
ignoreList = list(firstDict[maxNumber])
return checkHighValueToTieBreak(firstHand, secondHand, ignoreList)
# else, for any other test, check the results. the
# result consisting of higher-valued cards wins.
elif cardNumberToArrayDict[firstDict[maxNumber]] > \
cardNumberToArrayDict[secondDict[maxNumber]]:
return True
# if the two give the same result, check for the highest valued
# card in the hand.
elif firstDict[maxNumber] == secondDict[maxNumber]:
ignoreList = list(firstDict[maxNumber])
return checkHighValueToTieBreak(firstHand, secondHand, ignoreList)
return False
def main():
start = time()
solutions = 0
setUpNumberDicts()
setUpSuitDicts()
setUpEvaluationDict()
file = open(os.path.join(os.curdir,'./requiredFiles/Problem054PokerHands.txt'), 'r')
for row in file:
row = row.rstrip()
cards = row.split()
firstHand = [[0 for column in xrange(len(suitToArrayDict))] for row in xrange(len(cardNumberToArrayDict))]
secondHand = [[0 for column in xrange(len(suitToArrayDict))] for row in xrange(len(cardNumberToArrayDict))]
for i in xrange(len(cards)):
cardNumber = cardNumberToArrayDict[cards[i][0]]
cardSuit = suitToArrayDict[cards[i][1]]
if i < len(cards) / 2:
firstHand[cardNumber][cardSuit] = 1
else:
secondHand[cardNumber][cardSuit] = 1
firstHandResulsDict = {}
secondHandResultsDict = {}
evaluate(firstHand, firstHandResulsDict)
evaluate(secondHand, secondHandResultsDict)
if doesFirstHandWin(firstHandResulsDict, secondHandResultsDict, firstHand, secondHand):
solutions += 1
print "Games in which Player 1 wins: ", solutions
end = time()
print "Runtime: ", end - start, " seconds."
if __name__ == '__main__':
main()
|
|
from . import opcodes
from ..java import opcodes as JavaOpcodes
class Command:
"""A command is a sequence of instructions producing a distinct result.
The `operation` is the final instruction that yields a result.
A series of other instructions, known as `arguments` will be used
to execute `operation`.
The command also tracks the line number and code offset that it
represents, plus whether the command is a jump target.
Each argument is itself a Command; leaf nodes are Commands with no
arguments.
A command knows how many items it will pop from the stack, and
how many it will push onto the stack. The stack count on a Command
reflects the effect of the operation itself, plus *all* the arguments.
A command may also encompass an internal block - for example, a for
or while loop. Those blocks
"""
def __init__(self, instruction, arguments=None):
self.operation = instruction
if arguments:
self.arguments = arguments
else:
self.arguments = []
def __repr__(self):
try:
return '<Command %s (%s args)> %s' % (self.operation.opname, len(self.arguments), self.arguments[0].operation.name)
except:
return '<Command %s (%s args)>' % (self.operation.opname, len(self.arguments))
@property
def consume_count(self):
return sum(c.consume_count for c in self.arguments) + self.operation.consume_count
@property
def product_count(self):
return sum(c.product_count for c in self.arguments) + self.operation.product_count
def is_main_start(self):
return (
self.operation.opname == 'POP_JUMP_IF_FALSE'
and self.arguments[0].operation.opname == 'COMPARE_OP' and self.arguments[0].operation.comparison == '=='
and self.arguments[0].arguments[0].operation.opname == 'LOAD_NAME' and self.arguments[0].arguments[0].operation.name == '__name__'
and self.arguments[0].arguments[1].operation.opname == 'LOAD_CONST' and self.arguments[0].arguments[1].operation.const == '__main__'
)
def is_main_end(self, main_end):
if main_end == self.operation.python_offset:
return True
elif self.arguments and main_end <= self.arguments[0].operation.python_offset:
return True
return False
def dump(self, depth=0):
for op in self.arguments:
op.dump(depth=depth + 1)
print ('%s%4s:%4d -%s +%s' % (
'>' if self.operation.is_jump_target else ' ',
self.operation.starts_line if self.operation.starts_line is not None else ' ',
self.operation.python_offset,
self.operation.consume_count,
self.operation.product_count
) + ' ' * depth, self.operation)
def transpile(self, context):
self.operation.transpile(context, self.arguments)
class TryExcept:
def __init__(self, start, end, start_offset, end_offset, starts_line):
self.start = start
self.end = end
self.start_offset = start_offset
self.end_offset = end_offset
self.starts_line = starts_line
self.commands = []
self.exceptions = []
self.else_block = None
self.finally_block = None
def __repr__(self):
return '<Try %s-%s | %s%s%s>' % (
self.start,
self.end,
', '.join(str(handler) for handler in self.exceptions),
' %s' % self.else_block if self.else_block else '',
' %s' % self.finally_block if self.finally_block else ''
)
@property
def resume_index(self):
if self.finally_block and self.exceptions:
return self.start - 2
else:
return self.start - 1
@property
def consume_count(self):
return sum(c.consume_count for c in self.commands)
@property
def product_count(self):
return sum(c.product_count for c in self.commands)
def is_main_start(self):
return False
def is_main_end(self, main_end):
return False
def dump(self, depth=0):
print (' %4s:%4d ' % (
self.starts_line if self.starts_line is not None else ' ',
self.start_offset,
) + ' ' * depth,
'TRY:'
)
for command in self.commands:
command.dump(depth=depth + 1)
for handler in self.exceptions:
handler.dump(depth=depth)
if self.else_block:
self.else_block.dump(depth=depth)
if self.finally_block:
self.finally_block.dump(depth=depth)
print (' :%4d ' % (
self.end_offset,
) + ' ' * depth,
'END TRY'
)
def extract(self, instructions, blocks):
self.operation = instructions[self.start - 1]
i = self.end
self.commands = []
while i > self.start:
i, command = extract_command(instructions, blocks, i, self.start)
self.commands.append(command)
self.commands.reverse()
for handler in self.exceptions:
handler.extract(instructions, blocks)
if self.else_block:
self.else_block.extract(instructions, blocks)
if self.finally_block:
self.finally_block.extract(instructions, blocks)
def transpile(self, context):
context.add_opcodes(opcodes.TRY(
self.else_block,
self.finally_block
))
for command in self.commands:
command.transpile(context)
for handler in self.exceptions:
# Define the exception handler.
# On entry to the exception, the stack will contain
# a single value - the exception being thrown.
# This exception must be wrapped into an org/python/types/Object
# so it can be used as an argument elsewhere.
if len(handler.exceptions) > 1: # catch multiple - except (A, B) as v:
context.add_opcodes(
opcodes.CATCH([
'org/python/exceptions/%s' % e
for e in handler.exceptions
]),
)
if handler.var_name:
context.store_name(handler.var_name),
else:
# No named exception, but there is still an exception
# on the stack. Pop it off.
context.add_opcodes(JavaOpcodes.POP())
handler.transpile(context)
elif len(handler.exceptions) == 1: # catch single - except A as v:
context.add_opcodes(
opcodes.CATCH('org/python/exceptions/%s' % handler.exceptions[0]),
)
if handler.var_name:
context.store_name(handler.var_name),
else:
# No named exception, but there is still an exception
# on the stack. Pop it off.
context.add_opcodes(JavaOpcodes.POP())
handler.transpile(context)
else:
# The bucket case - except:
# No named exception, but there is still an exception
# on the stack. Pop it off.
context.add_opcodes(
opcodes.CATCH(),
JavaOpcodes.POP(),
)
handler.transpile(context)
if self.finally_block:
context.add_opcodes(
opcodes.FINALLY(),
)
opcodes.ASTORE_name(context, '##exception-%d##' % id(self))
for command in self.finally_block.commands:
command.transpile(context)
opcodes.ALOAD_name(context, '##exception-%d##' % id(self)),
context.add_opcodes(
JavaOpcodes.ATHROW(),
)
context.add_opcodes(opcodes.END_TRY())
class ExceptionBlock:
def __init__(self, exceptions, var_name, start, end, start_offset, end_offset, starts_line):
self.exceptions = exceptions
self.var_name = var_name
self.start = start
self.end = end
self.start_offset = start_offset
self.end_offset = end_offset
self.starts_line = starts_line
self.commands = []
def __repr__(self):
if self.exceptions:
if self.var_name:
return '%s (%s): %s-%s' % (','.join(self.exceptions), self.var_name, self.start, self.end)
else:
return '%s: %s-%s' % (','.join(self.exceptions), self.start, self.end)
else:
return 'Bucket: %s-%s' % (self.start, self.end)
def dump(self, depth=0):
print (' %4s:%4d ' % (
self.starts_line if self.starts_line is not None else ' ',
self.start_offset,
) + ' ' * depth,
'CATCH %s%s:' % (
', '.join(self.exceptions) if self.exceptions else '',
' as %s' % self.var_name if self.var_name else '',
)
)
for command in self.commands:
command.dump(depth=depth + 1)
def extract(self, instructions, blocks):
i = self.end
self.commands = []
while i > self.start:
i, command = extract_command(instructions, blocks, i, self.start)
self.commands.append(command)
self.commands.reverse()
def transpile(self, context):
context.next_opcode_starts_line = self.starts_line
for command in self.commands:
command.transpile(context)
class FinallyBlock:
def __init__(self, start, end, start_offset, end_offset, starts_line):
self.start = start
self.end = end
self.start_offset = start_offset
self.end_offset = end_offset
self.starts_line = starts_line
self.commands = []
def __repr__(self):
return 'Finally: %s-%s' % (self.start, self.end)
def dump(self, depth=0):
print (' %4s:%4d ' % (
self.starts_line if self.starts_line is not None else ' ',
self.start_offset,
) + ' ' * depth,
'FINALLY:'
)
for command in self.commands:
command.dump(depth=depth + 1)
def extract(self, instructions, blocks):
i = self.end
self.commands = []
while i > self.start:
i, command = extract_command(instructions, blocks, i, self.start)
self.commands.append(command)
self.commands.reverse()
def transpile(self, context):
context.next_opcode_starts_line = self.starts_line
for command in self.commands:
command.transpile(context)
class ElseBlock:
def __init__(self, start, end, start_offset, end_offset, starts_line):
self.start = start
self.end = end
self.start_offset = start_offset
self.end_offset = end_offset
self.starts_line = starts_line
self.commands = []
def __repr__(self):
return 'Else: %s-%s' % (self.start, self.end)
def dump(self, depth=0):
print (' %4s:%4d ' % (
self.starts_line if self.starts_line is not None else ' ',
self.start_offset,
) + ' ' * depth,
'ELSE:'
)
for command in self.commands:
command.dump(depth=depth + 1)
def extract(self, instructions, blocks):
i = self.end
self.commands = []
while i > self.start:
i, command = extract_command(instructions, blocks, i, self.start, literal=(i == self.end))
self.commands.append(command)
self.commands.reverse()
def transpile(self, context):
context.next_opcode_starts_line = self.starts_line
for command in self.commands:
command.transpile(context)
class ForLoop:
def __init__(self, start, loop, varname, end, start_offset, loop_offset, end_offset, starts_line):
self.start = start
self.loop = loop
self.end = end
self.varname = varname
self.start_offset = start_offset
self.loop_offset = loop_offset
self.end_offset = end_offset
self.starts_line = starts_line
self.loop_commands = []
self.commands = []
def __repr__(self):
return '<For %s: %s-%s>' % (
self.start,
self.loop,
self.end,
)
@property
def consume_count(self):
return sum(c.consume_count for c in self.commands)
@property
def product_count(self):
return sum(c.product_count for c in self.commands)
@property
def resume_index(self):
return self.start - 1
def is_main_start(self):
return False
def is_main_end(self, main_end):
return False
def dump(self, depth=0):
print (' %4s:%4d ' % (
self.starts_line if self.starts_line is not None else ' ',
self.start_offset,
) + ' ' * depth,
'FOR:'
)
for command in self.loop_commands:
command.dump(depth=depth + 1)
print (' :%4d ' % (
self.loop_offset,
) + ' ' * depth,
'LOOP:'
)
for command in self.commands:
command.dump(depth=depth + 1)
print (' :%4d ' % (
self.end_offset,
) + ' ' * depth,
'END FOR'
)
def extract(self, instructions, blocks):
# Collect the commands related to setting up the loop variable
i = self.end
while i > self.loop:
i, command = extract_command(instructions, blocks, i, self.loop)
self.commands.append(command)
self.commands.reverse()
# Collect the commands for the actual loop
i = self.loop - 2
while i > self.start:
i, command = extract_command(instructions, blocks, i, self.start)
self.loop_commands.append(command)
self.loop_commands.reverse()
def pre_loop(self, context):
pass
def pre_iteration(self, context):
context.add_opcodes(
JavaOpcodes.DUP(),
)
def post_loop(self, context):
context.add_opcodes(
JavaOpcodes.POP(),
)
def transpile(self, context):
context.next_opcode_starts_line = self.starts_line
self.pre_loop(context)
for command in self.loop_commands:
command.transpile(context)
loop = opcodes.START_LOOP()
context.add_opcodes(
loop,
opcodes.TRY(),
)
self.pre_iteration(context)
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE('org/python/Iterable', '__next__', '()Lorg/python/Object;'),
opcodes.CATCH('org/python/exceptions/StopIteration'),
)
self.post_loop(context)
context.add_opcodes(
opcodes.jump(JavaOpcodes.GOTO(0), context, loop, opcodes.Opcode.NEXT),
opcodes.END_TRY(),
)
context.store_name(self.varname),
for command in self.commands:
command.transpile(context)
context.add_opcodes(opcodes.END_LOOP())
class ComprehensionForLoop(ForLoop):
def __init__(self, start, loop, varname, end, start_offset, loop_offset, end_offset, starts_line):
super().__init__(start, loop, varname, end, start_offset, loop_offset, end_offset, starts_line)
def pre_loop(self, context):
context.store_name('##FOR-%s' % id(self)),
context.load_name('##FOR-%s' % id(self)),
def pre_iteration(self, context):
context.add_opcodes(
JavaOpcodes.DUP(),
)
context.load_name('.0'),
def post_loop(self, context):
context.add_opcodes(
JavaOpcodes.POP(),
)
context.load_name('##FOR-%s' % id(self)),
class WhileLoop:
def __init__(self, start, end, start_offset, end_offset, starts_line):
self.start = start
self.end = end
self.start_offset = start_offset
self.end_offset = end_offset
self.starts_line = starts_line
self.commands = []
def __repr__(self):
return '<For %s-%s>' % (
self.start,
self.end,
)
@property
def consume_count(self):
return sum(c.consume_count for c in self.commands)
@property
def product_count(self):
return sum(c.product_count for c in self.commands)
@property
def resume_index(self):
return self.start - 1
def is_main_start(self):
return False
def is_main_end(self, main_end):
return False
def dump(self, depth=0):
print (' %4s:%4d ' % (
self.starts_line if self.starts_line is not None else ' ',
self.start_offset,
) + ' ' * depth,
'WHILE:'
)
for command in self.commands:
command.dump(depth=depth + 1)
print (' :%4d ' % (
self.end_offset,
) + ' ' * depth,
'END WHILE'
)
def extract(self, instructions, blocks):
self.operation = instructions[self.start]
i = self.end
self.commands = []
while i > self.start:
i, command = extract_command(instructions, blocks, i, self.start)
self.commands.append(command)
self.commands.reverse()
def transpile(self, context):
context.next_opcode_starts_line = self.starts_line
context.add_opcodes(opcodes.START_LOOP())
for command in self.commands:
command.transpile(context)
end_loop = opcodes.END_LOOP()
context.add_opcodes(end_loop)
context.jump_targets[self.end_offset] = end_loop
def find_try_except(offset_index, instructions, i):
instruction = instructions[i]
try_start_index = i + 1
try_end_index = offset_index[instruction.argval] - 2
# Find the end of the entire try block
end_jump_index = offset_index[instruction.argval] - 1
end_block_offset = instructions[end_jump_index].argval
end_block_index = offset_index[end_block_offset]
while instructions[end_block_index].opname != 'END_FINALLY':
end_block_index -= 1
# print("START INDEX", try_start_index)
# print("START OFFSET", instructions[try_start_index].offset)
# print("TRY END INDEX", try_end_index)
# print("TRY END OFFSET", instructions[try_end_index].offset)
# print("END INDEX", end_block_index)
# print("END OFFSET", instructions[end_block_index].offset)
block = TryExcept(
start=try_start_index,
end=try_end_index,
start_offset=instructions[try_start_index].offset,
end_offset=instructions[try_end_index].offset,
starts_line=instruction.starts_line
)
# find all the except blocks
i = offset_index[instruction.argval] + 1
while i < end_block_index:
exceptions = []
starts_line = instructions[offset_index[instruction.argval]].starts_line
while instructions[i].opname == 'LOAD_NAME':
exceptions.append(instructions[i].argval)
i = i + 1
# If there's more than 1 exception named, there will be
# a BUILD_TUPLE instruction that needs to be skipped.
if len(exceptions) > 1:
i = i + 1
if instructions[i].opname == 'COMPARE_OP':
# An exception has been explicitly named
i = i + 3
# print ("CHECK", i, instructions[i].opname)
if instructions[i].opname == 'POP_TOP':
# Exception is specified, but not a name.
var_name = None
except_start_index = i + 2
# print("EXCEPT START", except_start_index)
elif instructions[i].opname == 'STORE_NAME':
var_name = instructions[i].argval
except_start_index = i + 3
# print("EXCEPT START e", except_start_index)
else:
i = i + 3
# Exception is specified, but not a name.
var_name = None
except_start_index = i
# print("EXCEPT START anon", except_start_index)
while not (instructions[i].opname in ('JUMP_FORWARD', 'JUMP_ABSOLUTE') and instructions[i].argval >= end_block_offset):
i = i + 1
if var_name:
except_end_index = i - 7
else:
except_end_index = i - 1
jump_offset = instructions[i].argval
# print("EXCEPT END", except_end_index)
# Step forward to the start of the next except block
# (or the end of the try/catch)
i = i + 2
block.exceptions.append(
ExceptionBlock(
exceptions=exceptions,
var_name=var_name,
start=except_start_index,
end=except_end_index,
start_offset=instructions[except_start_index].offset,
end_offset=instructions[except_end_index].offset,
starts_line=starts_line
)
)
if jump_offset > end_block_offset:
start_else_index = end_block_index + 1
end_else_index = offset_index[jump_offset]
if instructions[end_else_index-1].opname == 'JUMP_FORWARD':
end_else_index -= 1
block.else_block = ElseBlock(
start=start_else_index,
end=end_else_index,
start_offset=instructions[start_else_index].offset,
end_offset=jump_offset,
starts_line=instructions[end_block_index].starts_line
)
i = end_else_index
return i, block
def find_blocks(instructions):
offset_index = {}
# print(">>>>>" * 10)
for i, instruction in enumerate(instructions):
# print("%4d:%4d %s %s" % (i, instruction.offset, instruction.opname, instruction.argval if instruction.argval is not None else ''))
offset_index[instruction.offset] = i
# print(">>>>>" * 10)
blocks = {}
i = 0
while i < len(instructions):
instruction = instructions[i]
if instruction.opname == 'SETUP_EXCEPT':
i, block = find_try_except(offset_index, instructions, i)
blocks[i - 1] = block
elif instruction.opname == 'SETUP_FINALLY':
start_index = offset_index[instruction.argval]
# print("FINALLY START INDEX", start_index)
# print("FINALLY START OFFSET", instructions[start_index].offset)
i = i + 1
if instructions[i].opname == 'SETUP_EXCEPT':
i, block = find_try_except(offset_index, instructions, i)
else:
# print("START INDEX", i)
# print("START OFFSET", instructions[i].offset)
# print("END INDEX", start_index - 2)
# print("END OFFSET", instructions[start_index - 2].offset)
block = TryExcept(
start=i,
end=start_index - 2,
start_offset=instructions[i].offset,
end_offset=instructions[start_index - 2].offset,
starts_line=instruction.starts_line
)
i = i + 1
while instructions[i].opname != 'END_FINALLY':
i = i + 1
# print("FINALLY END INDEX", i)
# print("FINALLY END OFFSET", instructions[i].offset)
block.finally_block = FinallyBlock(
start=start_index,
end=i,
start_offset=instructions[start_index].offset,
end_offset=instructions[i].offset,
starts_line=instruction.starts_line
)
blocks[i] = block
i = i + 1
elif instruction.opname == 'SETUP_LOOP':
i = i + 1
start_index = i
while instructions[i].opname not in ('FOR_ITER', 'POP_JUMP_IF_FALSE'):
i = i + 1
# Find the end of the entire loop block.
# Ignore the final instruction to jump back to the start.
end_offset = instructions[i].argval
end_index = offset_index[end_offset] - 1
# print("START INDEX", start_index)
# print("START OFFSET", instructions[start_index].offset)
# print("END INDEX", end_index)
# print("END OFFSET", end_offset)
if instructions[i].opname == 'FOR_ITER':
loop_offset = instructions[i + 2].offset
loop_index = offset_index[loop_offset]
# print("LOOP INDEX", loop_index)
# print("LOOP OFFSET", loop_offset)
# print("LOOP VAR", instructions[loop_index - 1].argval)
block = ForLoop(
start=start_index,
loop=loop_index,
varname=instructions[loop_index - 1].argval,
end=end_index,
start_offset=instructions[start_index].offset,
loop_offset=loop_offset,
end_offset=end_offset,
starts_line=instruction.starts_line
)
else:
block = WhileLoop(
start=start_index,
end=end_index,
start_offset=instructions[start_index].offset,
end_offset=end_offset,
starts_line=instruction.starts_line,
)
blocks[end_index + 1] = block
i = i + 1
elif instruction.opname == 'FOR_ITER':
i = i + 1
start_index = i - 1
# Find the end of the entire loop block.
# Ignore the final instruction to jump back to the start.
end_offset = instruction.argval
end_index = offset_index[end_offset] - 1
# print("START INDEX", start_index)
# print("START OFFSET", instructions[start_index].offset)
# print("END INDEX", end_index)
# print("END OFFSET", end_offset)
loop_offset = instructions[i+1].offset
loop_index = offset_index[loop_offset]
# print("LOOP INDEX", loop_index)
# print("LOOP OFFSET", loop_offset)
# print("LOOP VAR", instructions[loop_index].argval)
block = ComprehensionForLoop(
start=start_index,
loop=loop_index,
varname=instructions[loop_index - 1].argval,
end=end_index,
start_offset=instructions[start_index].offset,
loop_offset=loop_offset,
end_offset=end_offset,
starts_line=instruction.starts_line
)
blocks[end_index + 1] = block
i = i + 1
else:
i = i + 1
return blocks
def extract_command(instructions, blocks, i, start_index=0, literal=False):
"""Extract a single command from the end of the instruction list.
See the definition of Command for details on the recursive nature
of commands. We start at the *end* of the instruction list and
work backwards because each command is essentially working towards
a final result; each Command can be thought of as a "result".
"""
i = i - 1
instruction = instructions[i]
argval = instruction.argval
OpType = getattr(opcodes, instruction.opname)
# If this instruction is preceded by EXTENDED_ARG, then
# there is more arugment information to come. Integrate it
# into the instruction argument we've already read.
if i > 0 and instructions[i - 1].opname == 'EXTENDED_ARG':
i = i - 1
extended = instructions[i]
argval = argval | extended.argval
try:
if literal:
raise KeyError()
# If this is a known block, defer to the block for
# extraction instructions.
cmd = blocks[i]
cmd.extract(instructions, blocks)
i = cmd.resume_index
except KeyError:
if instruction.arg is None:
opcode = OpType(instruction.offset, instruction.starts_line, instruction.is_jump_target)
else:
opcode = OpType(argval, instruction.offset, instruction.starts_line, instruction.is_jump_target)
cmd = Command(opcode)
# print('>', i, instruction.offset, cmd.operation.opname, cmd.operation.consume_count)
required = cmd.operation.consume_count
while required > 0 and i > start_index:
i, arg = extract_command(instructions, blocks, i)
cmd.arguments.append(arg)
required = required - arg.product_count + arg.consume_count
# print('<', i, instruction.offset, cmd.operation.opname)
# Since we did everything backwards, reverse to get
# arguments back in the right order.
cmd.arguments.reverse()
return i, cmd
|
|
###
# Copyright (c) 2003-2004, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import time
import os
import shutil
import tempfile
import supybot.conf as conf
import supybot.utils as utils
from supybot.commands import *
import supybot.schedule as schedule
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Scheduler')
import supybot.world as world
import supybot.utils.minisix as minisix
pickle = minisix.pickle
datadir = conf.supybot.directories.data()
filename = conf.supybot.directories.data.dirize('Scheduler.pickle')
class Scheduler(callbacks.Plugin):
"""This plugin allows you to schedule commands to execute at a later time."""
def __init__(self, irc):
self.__parent = super(Scheduler, self)
self.__parent.__init__(irc)
self.events = {}
self._restoreEvents(irc)
world.flushers.append(self._flush)
def _restoreEvents(self, irc):
try:
pkl = open(filename, 'rb')
try:
eventdict = pickle.load(pkl)
except Exception as e:
self.log.debug('Unable to load pickled data: %s', e)
return
finally:
pkl.close()
except IOError as e:
self.log.debug('Unable to open pickle file: %s', e)
return
for name, event in eventdict.items():
ircobj = callbacks.ReplyIrcProxy(irc, event['msg'])
try:
if event['type'] == 'single': # non-repeating event
n = None
if schedule.schedule.counter > int(name):
# counter not reset, we're probably reloading the plugin
# though we'll never know for sure, because other
# plugins can schedule stuff, too.
n = int(name)
self._add(ircobj, event['msg'],
event['time'], event['command'], n)
elif event['type'] == 'repeat': # repeating event
self._repeat(ircobj, event['msg'], name,
event['time'], event['command'], False)
except AssertionError as e:
if str(e) == 'An event with the same name has already been scheduled.':
# we must be reloading the plugin, event is still scheduled
self.log.info('Event %s already exists, adding to dict.' % (name,))
self.events[name] = event
else:
raise
def _flush(self):
try:
pklfd, tempfn = tempfile.mkstemp(suffix='scheduler', dir=datadir)
pkl = os.fdopen(pklfd, 'wb')
try:
pickle.dump(self.events, pkl)
except Exception as e:
self.log.warning('Unable to store pickled data: %s', e)
pkl.close()
shutil.move(tempfn, filename)
except (IOError, shutil.Error) as e:
self.log.warning('File error: %s', e)
def die(self):
self._flush()
world.flushers.remove(self._flush)
self.__parent.die()
def _makeCommandFunction(self, irc, msg, command, remove=True):
"""Makes a function suitable for scheduling from command."""
tokens = callbacks.tokenize(command)
def f():
if remove:
del self.events[str(f.eventId)]
self.Proxy(irc.irc, msg, tokens)
return f
def _add(self, irc, msg, t, command, name=None):
f = self._makeCommandFunction(irc, msg, command)
id = schedule.addEvent(f, t, name)
f.eventId = id
self.events[str(id)] = {'command':command,
'msg':msg,
'time':t,
'type':'single'}
return id
@internationalizeDocstring
def add(self, irc, msg, args, seconds, command):
"""<seconds> <command>
Schedules the command string <command> to run <seconds> seconds in the
future. For example, 'scheduler add [seconds 30m] "echo [cpu]"' will
schedule the command "cpu" to be sent to the channel the schedule add
command was given in (with no prefixed nick, a consequence of using
echo). Do pay attention to the quotes in that example.
"""
t = time.time() + seconds
id = self._add(irc, msg, t, command)
irc.replySuccess(format(_('Event #%i added.'), id))
add = wrap(add, ['positiveInt', 'text'])
@internationalizeDocstring
def remove(self, irc, msg, args, id):
"""<id>
Removes the event scheduled with id <id> from the schedule.
"""
if id in self.events:
del self.events[id]
try:
id = int(id)
except ValueError:
pass
try:
schedule.removeEvent(id)
irc.replySuccess()
except KeyError:
irc.error(_('Invalid event id.'))
else:
irc.error(_('Invalid event id.'))
remove = wrap(remove, ['lowered'])
def _repeat(self, irc, msg, name, seconds, command, now=True):
f = self._makeCommandFunction(irc, msg, command, remove=False)
id = schedule.addPeriodicEvent(f, seconds, name, now)
assert id == name
self.events[name] = {'command':command,
'msg':msg,
'time':seconds,
'type':'repeat'}
@internationalizeDocstring
def repeat(self, irc, msg, args, name, seconds, command):
"""<name> <seconds> <command>
Schedules the command <command> to run every <seconds> seconds,
starting now (i.e., the command runs now, and every <seconds> seconds
thereafter). <name> is a name by which the command can be
unscheduled.
"""
name = name.lower()
if name in self.events:
irc.error(_('There is already an event with that name, please '
'choose another name.'), Raise=True)
self._repeat(irc, msg, name, seconds, command)
# We don't reply because the command runs immediately.
# But should we? What if the command doesn't have visible output?
# irc.replySuccess()
repeat = wrap(repeat, ['nonInt', 'positiveInt', 'text'])
@internationalizeDocstring
def list(self, irc, msg, args):
"""takes no arguments
Lists the currently scheduled events.
"""
L = list(self.events.items())
if L:
L.sort()
for (i, (name, command)) in enumerate(L):
L[i] = format('%s: %q', name, command['command'])
irc.reply(format('%L', L))
else:
irc.reply(_('There are currently no scheduled commands.'))
list = wrap(list)
Class = Scheduler
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
|
#!/usr/bin/python
import sys, re, random, os, multiprocessing
import argparse
import json
import GenePredBasics, PSLBasics, FileBasics, BigFileBasics
from shutil import rmtree
# Pre: A long read psl file. Any number of genepred files in the format "Gene Predictions and RefSeq Genes with Gene Names".
# gzipped files are supported
# Post: A table with one row for each long read, and two columns for each annotation file
# The columns for each annotation file correspond to Genes, and then Transcripts
# It is possible for multiple genes or multiple transcripts to be reported back,
# and in those cases they will be comma separated.
# <read_id (unique)> <read_name> <read exon count> <gpd_1:gene> <gpd_1:transcript> ... <gpd_N:gene> < gpd_N:transcript>
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--output','-o',help='FILENAME output file, default: STDOUT')
parser.add_argument('--rawoutput',help='FILENAME to write a db friendly output before any chosing best hits or reformating the report takes place')
parser.add_argument('--bestoutput',help='FILENAME to write a db friendly output after chosing best hits but before reformating the report takes place')
parser.add_argument('--gpdout',help='FILENAME location to output the psl files genePred conversion')
group = parser.add_mutually_exclusive_group()
group.add_argument('--tempdir',default='/tmp',help='DIRECTORY temdir where a temporary directoy will be made and used')
group.add_argument('--specific_tempdir',help='DIRECTORY temdir the exact tempdir to be used')
parser.add_argument('--closegap',type=int,default=68,help='INT close gaps less than or equal to this')
parser.add_argument('--jobsize',type=int,default=500000,help='not a very important parameter, it just says how many jobs to send to a thread at a time INT')
parser.add_argument('--threads',type=int,help='INT number of threads default: cpu_count')
parser.add_argument('--minoverlap',default='0,0.8,0',help='FLOATLIST First exon, inner exons, Last exon requirements. Zero indicates that any overlap will suffice.')
parser.add_argument('--mincoverage',type=float,default=0,help='FLOAT fraction of overall coverage we want to make a call')
parser.add_argument('--minmatchbases',type=int,default=100,help='INT minimum number of bases needed to call a match default (100)')
parser.add_argument('--debug',action='store_true',help='dont remove temporary files on execute when debugging')
parser.add_argument('--input_is_gpd',action='store_true',help='instead of PSL the input is already genepred.')
parser.add_argument('pslfile',help='FILENAME PSL filename to annotate - for STDIN')
parser.add_argument('gpdfile',nargs='+',help='FILENAME(S) genePred file(s) providing annotations')
args = parser.parse_args()
# There is a lot going on so lets fill up this params dictionary with
# variables that we will be using. Starting with the command line args
params = {}
params['args'] = args
if args.specific_tempdir:
params['tdir'] = args.specific_tempdir.rstrip('/')
else:
tbase = args.tempdir
if args.tempdir:
tbase = args.tempdir.rstrip('/')
if not os.path.exists(tbase):
sys.stderr.write("Error: temp directory does not exist. "+args.tempdir)
return
# Store our actual temporary directory that we will create and write to
params['tdir'] = tbase + '/weirathe.ea' + str(random.randint(1,1000000000))
# Convert the overlap fraction string to an array of floats
params['overlap_fraction'] = [float(x) for x in args.minoverlap.split(',')]
sys.stderr.write("Temp directory: "+params['tdir']+"\n")
# make the temporary directory
if not os.path.exists(params['tdir']):
os.makedirs(params['tdir'])
# where to output results to
of = sys.stdout
if args.output:
of = open(args.output,'w')
# where to output results to
if args.gpdout:
#just making sure we can write here so we don't get a surprise later
gpdof = open(args.gpdout,'w')
sys.stderr.write("pslfile: "+args.pslfile+"\n")
sys.stderr.write("Converting psl file to gpd\n")
# write the gpd from the psl file
if not args.input_is_gpd:
print params['args'].closegap
parse_pslfile(params['tdir'],params['args'].pslfile,params['args'].closegap)
else:
parse_gpdfile(params['tdir'],params['args'].pslfile,params['args'].closegap)
# save the genepred if we want it
if args.gpdout:
sys.stderr.write("saving genepred conversion of psl file to: "+args.gpdout+"\n")
with open(params['tdir']+'/longreads.gpd') as gf:
for line in gf: gpdof.write(line)
gpdof.close()
# break the new gpd into jobs
sys.stderr.write("Splitting job\n")
params['num_jobs'] = break_gpdfile(params['tdir'],params['args'].jobsize)
simplenames = [] #names to be used to label columns
for file in params['args'].gpdfile:
m = re.search('([^\/]+$)',file)
name = file
sys.stderr.write(" "+name+"\n")
if m: name = m.group(1)
simplenames.append(name)
params['simplenames'] = simplenames
# convert reference genepreds to bed fies
sys.stderr.write("Parsing reference file\n")
parse_refgpd(params['tdir'],params['args'].gpdfile,params['simplenames'])
if not params['args'].threads: params['args'].threads = multiprocessing.cpu_count()
if params['args'].threads > 1:
p = multiprocessing.Pool(processes=params['args'].threads)
# make a job list
sys.stderr.write("Entering multiprocessing annotations "+str(params['num_jobs'])+" jobs on "+str(params['args'].threads)+" cpus\n")
for j in range(1,params['num_jobs']+1):
# The business happens here with execute job.
if params['args'].threads > 1:
p.apply_async(execute_job,[params['tdir'],j,params['args'].gpdfile,params['overlap_fraction'],params['args'].minmatchbases])
else:
execute_job(params['tdir'],j,params['args'].gpdfile,params['overlap_fraction'],params['args'].minmatchbases)
#execute_job(params['tdir'],j,params['args'].gpdfile,params['overlap_fraction'],params['args'].minmatchbases)
if params['args'].threads > 1:
p.close()
p.join()
# Print out the raw data here if we want it
# and save our best match per read/gpd
read_gpd = {}
columns = {}
ostring = "psl_entry_id\tread_name\tread_exons\treference_exons\tgpd_column_number\t"
ostring += "gpd_name\tgene_name\ttranscript_name\tfragment_report\talignment_classification\tsplit_alignment\ttotal_aligned_bases\ttotal_aligned_exons\tlongest_fragment_bases\tlongest_fragment_exons\treference_length\n"
if params['args'].rawoutput:
of_raw = open(params['args'].rawoutput,'w')
of_raw.write(ostring)
for j in range(1,params['num_jobs']+1):
with open(params['tdir']+"/annotated_match."+str(j)+".txt") as inf:
for line in inf:
f = line.rstrip("\n").split("\t")
my_gpd = f[5]
my_read = f[1]
columns[int(f[4])] = my_gpd
if my_read not in read_gpd:
read_gpd[my_read] = {}
if my_gpd not in read_gpd[my_read]:
read_gpd[my_read][my_gpd] = {}
read_gpd[my_read][my_gpd]['Full'] = {}
read_gpd[my_read][my_gpd]['Full']['best_hit'] = False
read_gpd[my_read][my_gpd]['Full']['matches'] = 0
read_gpd[my_read][my_gpd]['Partial'] = {}
read_gpd[my_read][my_gpd]['Partial']['best_hit'] = False
read_gpd[my_read][my_gpd]['Partial']['matches'] = 0
read_gpd[my_read][my_gpd]['Best'] = False
total_matches = int(f[11])
if f[9] == 'Full' and total_matches > read_gpd[my_read][my_gpd]['Full']['matches']:
read_gpd[my_read][my_gpd]['Full']['matches'] = total_matches
read_gpd[my_read][my_gpd]['Full']['best_hit'] = f
if f[9] == 'Partial' and total_matches > read_gpd[my_read][my_gpd]['Partial']['matches']:
read_gpd[my_read][my_gpd]['Partial']['matches'] = total_matches
read_gpd[my_read][my_gpd]['Partial']['best_hit'] = f
if params['args'].rawoutput: of_raw.write(line.rstrip("\n")+"\n")
if params['args'].bestoutput:
ostring = "psl_entry_id\tread_name\tread_exons\treference_exons\tgpd_column_number\t"
ostring += "gpd_name\tgene_name\ttranscript_name\tfragment_report\talignment_classification\tsplit_alignment\ttotal_aligned_bases\ttotal_aligned_exons\tlongest_fragment_bases\tlongest_fragment_exons\treference_length\n"
ofbest = open(params['args'].bestoutput,'w')
ofbest.write(ostring)
for read in read_gpd:
for gpd in read_gpd[read]:
if read_gpd[read][gpd]['Full']['matches'] > 0:
cov = get_cov(read_gpd[read][gpd]['Full']['best_hit'][11],read_gpd[read][gpd]['Full']['best_hit'][15])
if not params['args'].mincoverage or cov >= params['args'].mincoverage:
if params['args'].bestoutput: ofbest.write("\t".join(read_gpd[read][gpd]['Full']['best_hit'])+"\n")
read_gpd[read][gpd]['Best'] = read_gpd[read][gpd]['Full']['best_hit']
elif read_gpd[read][gpd]['Partial']['matches'] > 0:
cov = get_cov(read_gpd[read][gpd]['Partial']['best_hit'][11],read_gpd[read][gpd]['Partial']['best_hit'][15])
if not params['args'].mincoverage or cov >= params['args'].mincoverage:
if params['args'].bestoutput: ofbest.write("\t".join(read_gpd[read][gpd]['Partial']['best_hit'])+"\n")
read_gpd[read][gpd]['Best'] = read_gpd[read][gpd]['Partial']['best_hit']
if params['args'].bestoutput: ofbest.close()
#Now lets do the final report form output
colnums = sorted(columns.keys())
ostring = "read\t"
for colnum in colnums:
ostring += 'genes:'+columns[colnum]+"\t"
ostring += 'transcripts:'+columns[colnum]+"\t"
ostring += 'classification:'+columns[colnum]+"\t"
ostring = ostring[:-1]
of.write(ostring+"\n")
for read in read_gpd:
ostring = read + "\t"
seen = 0
for colnum in colnums:
done = 0
if columns[colnum] in read_gpd[read]:
if read_gpd[read][columns[colnum]]['Best']:
ostring += read_gpd[read][columns[colnum]]['Best'][6] + "\t"
ostring += read_gpd[read][columns[colnum]]['Best'][7] + "\t"
ostring += read_gpd[read][columns[colnum]]['Best'][9] + "\t"
done = 1
seen = 1
if done == 0:
ostring += "\t"
ostring = ostring[:-1]
if seen == 1:
of.write(ostring+"\n")
of.close()
if not params['args'].debug and not params['args'].specific_tempdir: rmtree(params['tdir'])
def get_cov(f1,f2):
if f1 <= 0 or f2 <= 0: return 0
return min(float(f1)/float(f2),float(f2)/float(f1))
# This is how we call the process of working on one of our results
# Pre: Temporary Directory, job number, list of genepred files, overlap_fraction
# where overlap fraction is an array of the required overlap for the
# first, internal, and last exons
#
def execute_job(tdir,j,geneprednames,overlap_fraction,min_match_bp):
of = open(tdir+'/annotated_match.'+str(j)+'.txt','w')
for i in range(1,len(geneprednames)+1):
# Assign jobid as the job number, and the genepred column number
jobid = str(j)+"_"+str(i)
pre_annotate(tdir,tdir+'/reference.'+str(i)+'.bed',tdir+'/partreads.'+str(j)+'.bed',of,jobid,overlap_fraction,min_match_bp)
of.close()
#annotate(tdir,j,geneprednames)
return jobid
# This pre-annotate is where we actually overlap the files
# We do an intersection opertation, then we read through the
# intersection seeing if it meets criteria for matching
# Pre: Temporary Directory
# Reference genepred bed file
# long reads job bed file
# output file handle for 'unannotated full match'
# jobid jobnumber underscore column number (genepred file number)
# overlap fraction
# Post: writes intersection of beds for each bed file
# into unannotated_match.(job).txt
# 1. Reads PSL entry number
# 2. Read name
# 3. Observed exon count
# 4. Reference exon count
# 5. Reference entry number
# 6. Consecutive exon match(s)
def pre_annotate(tdir,ref_file,obs_file,of,jobid,overlap_fraction,min_match_bp):
#print ref_file + "\t" + obs_file
cmd = "bedtools intersect -wo "
#if overlap_fraction > 0: cmd += "-r -f "+str(overlap_fraction)
cmd += " -a " + ref_file + " -b " + obs_file + " > " + tdir + "/intersect."+jobid+".bed"
#print cmd
os.system(cmd)
# now parse the intersection file
# check for a full length match
results = {}
with open(tdir+'/intersect.'+jobid+'.bed') as inf:
for line in inf:
f = line.rstrip("\n").split("\t")
ref_exon_count = int(f[6])
obs_exon_count = int(f[14])
#if ref_exon_count != obs_exon_count:
# continue
ref_exon = f[0]+':'+f[1]+'-'+f[2]
obs_exon = f[9]+':'+f[10]+'-'+f[11]
ref_id = int(f[3])
obs_id = int(f[12])
obs_name = f[13]
if obs_id not in results:
results[obs_id] = {}
if ref_id not in results[obs_id]:
results[obs_id][ref_id] = {}
results[obs_id][ref_id]['read_name'] = obs_name
results[obs_id][ref_id]['matches'] = set()
results[obs_id][ref_id]['ref_exon_count'] = ref_exon_count
results[obs_id][ref_id]['obs_exon_count'] = obs_exon_count
results[obs_id][ref_id]['name'] = f[4]
results[obs_id][ref_id]['transcript'] = f[5]
results[obs_id][ref_id]['ref_strand'] = f[7]
results[obs_id][ref_id]['exons_ref'] = {}
results[obs_id][ref_id]['exon_overlap'] = {}
results[obs_id][ref_id]['matches'].add(str(ref_exon)+'_'+str(obs_exon))
reflen = int(f[2])-int(f[1])
obslen = int(f[11])-int(f[10])
overlap = int(f[17])
# get the overlap fraction
if reflen == 0 or obslen == 0:
sys.stderr.write(line+"\n")
smallest = 0
else:
smallest = sorted([float(overlap)/float(reflen), float(overlap)/float(obslen)])[0]
ref_exon_number = int(f[8])
obs_exon_number = int(f[16])
results[obs_id][ref_id]['exons_ref'][ref_exon_number] = obs_exon_number
if ref_exon_number not in results[obs_id][ref_id]['exon_overlap']:
results[obs_id][ref_id]['exon_overlap'][ref_exon_number] = {}
results[obs_id][ref_id]['exon_overlap'][ref_exon_number][obs_exon_number] = {}
results[obs_id][ref_id]['exon_overlap'][ref_exon_number][obs_exon_number]['bp'] = overlap
results[obs_id][ref_id]['exon_overlap'][ref_exon_number][obs_exon_number]['frac'] = smallest
d = {}
with open(tdir+"/entries.txt") as inf:
for line in inf:
f = line.rstrip("\n").split("\t")
ref_id = int(f[2])
if ref_id not in d: d[ref_id] = {}
d[ref_id]['column'] = int(f[0])
d[ref_id]['gpdname'] = f[1]
d[ref_id]['gene'] = f[3]
d[ref_id]['transcript'] = f[4]
d[ref_id]['length'] = f[5]
#Go through the results and find the best consecutive exons
for obs_id in results:
for ref_id in results[obs_id]:
overlap_data = results[obs_id][ref_id]['exon_overlap']
refnums = sorted(results[obs_id][ref_id]['exons_ref'].keys())
prevrefval = refnums[0]
prevobsval = results[obs_id][ref_id]['exons_ref'][refnums[0]]
best = []
allconsec = []
best.append([prevrefval,prevobsval])
for n in refnums[1:]:
obs = results[obs_id][ref_id]['exons_ref'][n]
if n != prevrefval+1 or obs != prevobsval+1:
allconsec.append(best)
best = []
best.append([n,obs])
prevrefval = n
prevobsval = obs
if len(best) > 0:
allconsec.append(best)
# now the allconsec contains all the consecutive bests
passing_consec = {}
match_bases = 0
for consec in allconsec:
fracs = [overlap_data[x[0]][x[1]]['frac'] for x in consec]
totalbps = 0
for bp in [overlap_data[x[0]][x[1]]['bp'] for x in consec]: totalbps += bp
passing = True
if len(consec) > 2:
for frac in fracs[1:len(fracs)-1]:
if frac < overlap_fraction[1]:
passing = False
if results[obs_id][ref_id]['ref_strand'] == '+':
if fracs[0] < overlap_fraction[0] and overlap_fraction[0] > 0:
passing = False
if fracs[len(fracs)-1] < overlap_fraction[2] and overlap_fraction[2] > 0:
passing = False
if results[obs_id][ref_id]['ref_strand'] == '-':
if fracs[0] < overlap_fraction[2] and overlap_fraction[2] > 0:
passing = False
if fracs[len(fracs)-1] < overlap_fraction[0] and overlap_fraction[0] > 0:
passing = False
if passing:
passing_consec[json.dumps(consec)] = {}
passing_consec[json.dumps(consec)]['bp'] = totalbps
match_bases += totalbps
passing_consec[json.dumps(consec)]['exons'] = len(consec)
total_aligned_bases = 0
total_aligned_exons = 0
longest_fragment_aligned_bases = 0
longest_fragment_exon_count = 0
for v in passing_consec:
total_aligned_bases += passing_consec[v]['bp']
total_aligned_exons += passing_consec[v]['exons']
# consider longest fragment by base pairs for now
# the alternative would be exon count
if passing_consec[v]['bp'] > longest_fragment_aligned_bases:
longest_fragment_aligned_bases = passing_consec[v]['bp']
longest_fragment_exon_count = passing_consec[v]['exons']
if len(passing_consec) == 0: continue #make sure we passed our criteria
if match_bases < min_match_bp: continue
matchstring = ",".join([str(passing_consec[x]['exons'])+":"+str(passing_consec[x]['bp']) for x in passing_consec])
matchtype = 'Full'
if results[obs_id][ref_id]['ref_exon_count'] != results[obs_id][ref_id]['obs_exon_count'] and results[obs_id][ref_id]['ref_exon_count'] != len(passing_consec):
matchtype = 'Partial'
gappedtype = 'N'
if len(passing_consec) > 1:
gappedtype = 'Y'
of.write(str(obs_id) + "\t" + results[obs_id][ref_id]['read_name'] + "\t" \
+ str(results[obs_id][ref_id]['obs_exon_count']) + "\t" \
+ str(results[obs_id][ref_id]['ref_exon_count']) + "\t" \
+ str(d[ref_id]['column']) + "\t" + d[ref_id]['gpdname'] + "\t" \
+ d[ref_id]['gene'] + "\t" + d[ref_id]['transcript'] + "\t" \
+ matchstring + "\t" \
+ matchtype + "\t" + gappedtype + "\t" \
+ str(total_aligned_bases) + "\t" + str(total_aligned_exons) + "\t" \
+ str(longest_fragment_aligned_bases) + "\t" + str(longest_fragment_exon_count) + "\t" \
+ str(d[ref_id]['length']) + "\n")
#Parse the reference genepred(s) into bed file
#Pre: temporary directory, genepredfilenames, simplenames
# temporary directory - path to temporary directory
# genepredfilenames - list of reference gpd filenames
# simplenames - list of genepred short names
#Post: Bed file with the following format
# 1. chrom
# 2. start 0-base
# 3. end 1-base
# 4. reference gpd entry line number
# 5. gene name
# 6. transcript name
# 7. number of exons in reference gpd entry
# 8. strand
# 9. exon number
# Writes to two places. entries.txt and reference.(column_number).bed
def parse_refgpd(tdir,geneprednames,simplenames):
# get the reference genepreds ready to use in work
column_number = 0
entry_number = 0
of_entries = open(tdir+"/entries.txt",'w')
for file in geneprednames:
column_number += 1
of_ref = open(tdir+"/reference."+str(column_number)+".bed",'w')
gfr = FileBasics.GenericFileReader(file)
while True:
line = gfr.readline()
if not line: break
if re.match('^#',line): continue
entry_number += 1
line = line.rstrip("\n")
entry = GenePredBasics.line_to_entry(line)
entry_length = 0
for i in range(0,len(entry['exonStarts'])): entry_length += entry['exonEnds'][i]-entry['exonStarts'][i]
of_entries.write(str(column_number)+ "\t" + simplenames[column_number-1] + "\t" + str(entry_number) + "\t" + entry['gene_name'] + "\t" + entry['name']+"\t"+str(entry_length)+"\n")
exon_number = 0
for i in range(0,len(entry['exonStarts'])):
exon_number += 1
of_ref.write(entry['chrom'] + "\t" + str(entry['exonStarts'][i]) + "\t" \
+ str(entry['exonEnds'][i]) + "\t" + str(entry_number) + "\t" \
+ entry['gene_name'] + "\t" \
+ entry['name'] + "\t" + str(len(entry['exonStarts'])) + "\t" \
+ entry['strand'] + "\t" + str(exon_number) \
+ "\n")
gfr.close()
of_ref.close()
of_entries.close()
# Break the genpred into jobs
# Pre: Temporary directory, job size (int)
# Post: Write a bed file from each jobs segement of the genepred file
# Bed file format is as follows:
# 1. chrom
# 2. start
# 3. end
# 4. PSL entry number
# 5. read name
# 6. number of exons
# 7. strand
# 8. exon_number
# Write the bed files into partreads.(job).bed
# job is an integer 1-based
# we return the number of jobs also
def break_gpdfile(tdir,job_size):
bfcr = BigFileBasics.BigFileChunkReader(tdir+'/longreads.gpd')
bfcr.set_chunk_size_bytes(job_size)
num_jobs = bfcr.chunk_count
for i in range(0,bfcr.chunk_count):
oc = bfcr.open_chunk(i)
job = i+1
of_bed = open(tdir+'/partreads.'+str(job)+'.bed','w')
while True:
line = oc.read_line()
if not line: break
line = line.rstrip("\n")
entry = GenePredBasics.line_to_entry(line)
exon_number = 0
for i in range(0,len(entry['exonStarts'])):
exon_number += 1
of_bed.write(entry['chrom'] + "\t" + str(entry['exonStarts'][i]) + "\t" \
+ str(entry['exonEnds'][i]) + "\t" + entry['name']+"\t" \
+ entry['gene_name'] + "\t" + str(len(entry['exonStarts'])) + "\t" \
+ entry['strand'] + "\t" + str(exon_number) + "\n")
oc.close()
of_bed.close()
return num_jobs
#Write the genepred
# Pre: temp directory, the psl file, smoothing factor (min intron size)
# Post: into longreads.gpd we write the genepred line
def parse_pslfile(tdir,pslfile,smoothing_factor):
# Go through the long reads and make a genepred
if pslfile != '-':
fr = FileBasics.GenericFileReader(pslfile)
else:
fr = sys.stdin
seennames = {}
longreadnumber = 0
of_gpd = open(tdir+'/longreads.gpd','w')
while True:
line = fr.readline()
if not line: break
if re.match('^#',line): #skip comments
continue
longreadnumber += 1
gpd_line = PSLBasics.convert_entry_to_genepred_line(PSLBasics.line_to_entry(line.rstrip()))
if not gpd_line:
sys.stderr.write("Warning: malformed psl for "+readname+"\n")
continue
entry = GenePredBasics.smooth_gaps( \
GenePredBasics.line_to_entry(gpd_line),smoothing_factor)
readname = entry['name']
if readname in seennames:
sys.stderr.write("Warning: repeat name '"+readname+"'\n")
#set our first name to our bin
entry['name'] = str(longreadnumber)
gline = GenePredBasics.entry_to_line(entry)
of_gpd.write(gline+"\n")
fr.close()
of_gpd.close()
#Write the genepred
# Pre: temp directory, the psl file, smoothing factor (min intron size)
# Post: into longreads.gpd we write the genepred line
def parse_gpdfile(tdir,gpdfile,smoothing_factor):
# Go through the long reads and make a genepred
if gpdfile != '-':
fr = FileBasics.GenericFileReader(gpdfile)
else:
fr = sys.stdin
seennames = {}
longreadnumber = 0
of_gpd = open(tdir+'/longreads.gpd','w')
while True:
line = fr.readline()
if not line: break
if re.match('^#',line): #skip comments
continue
longreadnumber += 1
entry = GenePredBasics.smooth_gaps( \
GenePredBasics.line_to_entry(line.rstrip()) \
,smoothing_factor)
readname = entry['name']
if readname in seennames:
sys.stderr.write("Warning: repeat name '"+readname+"'\n")
#set our first name to our bin
entry['name'] = str(longreadnumber)
gline = GenePredBasics.entry_to_line(entry)
of_gpd.write(gline+"\n")
fr.close()
of_gpd.close()
main()
|
|
"""Implement common widgets layouts as reusable components"""
import re
from collections import defaultdict
from traitlets import Instance, Bool, Unicode, CUnicode, CaselessStrEnum, Tuple
from traitlets import Integer
from traitlets import HasTraits, TraitError
from traitlets import observe, validate
from .widget import Widget
from .widget_box import GridBox
from .docutils import doc_subst
_doc_snippets = {
'style_params' : """
grid_gap : str
CSS attribute used to set the gap between the grid cells
justify_content : str, in ['flex-start', 'flex-end', 'center', 'space-between', 'space-around']
CSS attribute used to align widgets vertically
align_items : str, in ['top', 'bottom', 'center', 'flex-start', 'flex-end', 'baseline', 'stretch']
CSS attribute used to align widgets horizontally
width : str
height : str
width and height"""
}
@doc_subst(_doc_snippets)
class LayoutProperties(HasTraits):
"""Mixin class for layout templates
This class handles mainly style attributes (height, grid_gap etc.)
Parameters
----------
{style_params}
Note
----
This class is only meant to be used in inheritance as mixin with other
classes. It will not work, unless `self.layout` attribute is defined.
"""
# style attributes (passed to Layout)
grid_gap = Unicode(
None,
allow_none=True,
help="The grid-gap CSS attribute.")
justify_content = CaselessStrEnum(
['flex-start', 'flex-end', 'center',
'space-between', 'space-around'],
allow_none=True,
help="The justify-content CSS attribute.")
align_items = CaselessStrEnum(
['top', 'bottom',
'flex-start', 'flex-end', 'center',
'baseline', 'stretch'],
allow_none=True, help="The align-items CSS attribute.")
width = Unicode(
None,
allow_none=True,
help="The width CSS attribute.")
height = Unicode(
None,
allow_none=True,
help="The width CSS attribute.")
def __init__(self, **kwargs):
super(LayoutProperties, self).__init__(**kwargs)
self._property_rewrite = defaultdict(dict)
self._property_rewrite['align_items'] = {'top': 'flex-start',
'bottom': 'flex-end'}
self._copy_layout_props()
self._set_observers()
def _delegate_to_layout(self, change):
"delegate the trait types to their counterparts in self.layout"
value, name = change['new'], change['name']
value = self._property_rewrite[name].get(value, value)
setattr(self.layout, name, value) # pylint: disable=no-member
def _set_observers(self):
"set observers on all layout properties defined in this class"
_props = LayoutProperties.class_trait_names()
self.observe(self._delegate_to_layout, _props)
def _copy_layout_props(self):
_props = LayoutProperties.class_trait_names()
for prop in _props:
value = getattr(self, prop)
if value:
value = self._property_rewrite[prop].get(value, value)
setattr(self.layout, prop, value) #pylint: disable=no-member
@doc_subst(_doc_snippets)
class AppLayout(GridBox, LayoutProperties):
""" Define an application like layout of widgets.
Parameters
----------
header: instance of Widget
left_sidebar: instance of Widget
center: instance of Widget
right_sidebar: instance of Widget
footer: instance of Widget
widgets to fill the positions in the layout
merge: bool
flag to say whether the empty positions should be automatically merged
pane_widths: list of numbers/strings
the fraction of the total layout width each of the central panes should occupy
(left_sidebar,
center, right_sidebar)
pane_heights: list of numbers/strings
the fraction of the width the vertical space that the panes should occupy
(left_sidebar, center, right_sidebar)
{style_params}
Examples
--------
"""
# widget positions
header = Instance(Widget, allow_none=True)
footer = Instance(Widget, allow_none=True)
left_sidebar = Instance(Widget, allow_none=True)
right_sidebar = Instance(Widget, allow_none=True)
center = Instance(Widget, allow_none=True)
# extra args
pane_widths = Tuple(CUnicode(), CUnicode(), CUnicode(),
default_value=['1fr', '2fr', '1fr'])
pane_heights = Tuple(CUnicode(), CUnicode(), CUnicode(),
default_value=['1fr', '3fr', '1fr'])
merge = Bool(default_value=True)
def __init__(self, **kwargs):
super(AppLayout, self).__init__(**kwargs)
self._update_layout()
@staticmethod
def _size_to_css(size):
if re.match(r'\d+\.?\d*(px|fr|%)$', size):
return size
if re.match(r'\d+\.?\d*$', size):
return size + 'fr'
raise TypeError("the pane sizes must be in one of the following formats: "
"'10px', '10fr', 10 (will be converted to '10fr')."
"Got '{}'".format(size))
def _convert_sizes(self, size_list):
return list(map(self._size_to_css, size_list))
def _update_layout(self):
grid_template_areas = [["header", "header", "header"],
["left-sidebar", "center", "right-sidebar"],
["footer", "footer", "footer"]]
grid_template_columns = self._convert_sizes(self.pane_widths)
grid_template_rows = self._convert_sizes(self.pane_heights)
all_children = {'header': self.header,
'footer': self.footer,
'left-sidebar': self.left_sidebar,
'right-sidebar': self.right_sidebar,
'center': self.center}
children = {position : child
for position, child in all_children.items()
if child is not None}
if not children:
return
for position, child in children.items():
child.layout.grid_area = position
if self.merge:
if len(children) == 1:
position = list(children.keys())[0]
grid_template_areas = [[position, position, position],
[position, position, position],
[position, position, position]]
else:
if self.center is None:
for row in grid_template_areas:
del row[1]
del grid_template_columns[1]
if self.left_sidebar is None:
grid_template_areas[1][0] = grid_template_areas[1][1]
if self.right_sidebar is None:
grid_template_areas[1][-1] = grid_template_areas[1][-2]
if (self.left_sidebar is None and
self.right_sidebar is None and
self.center is None):
grid_template_areas = [['header'], ['footer']]
grid_template_columns = ['1fr']
grid_template_rows = ['1fr', '1fr']
if self.header is None:
del grid_template_areas[0]
del grid_template_rows[0]
if self.footer is None:
del grid_template_areas[-1]
del grid_template_rows[-1]
grid_template_areas_css = "\n".join('"{}"'.format(" ".join(line))
for line in grid_template_areas)
self.layout.grid_template_columns = " ".join(grid_template_columns)
self.layout.grid_template_rows = " ".join(grid_template_rows)
self.layout.grid_template_areas = grid_template_areas_css
self.children = tuple(children.values())
@observe("footer", "header", "center", "left_sidebar", "right_sidebar", "merge",
"pane_widths", "pane_heights")
def _child_changed(self, change): #pylint: disable=unused-argument
self._update_layout()
@doc_subst(_doc_snippets)
class GridspecLayout(GridBox, LayoutProperties):
""" Define a N by M grid layout
Parameters
----------
n_rows : int
number of rows in the grid
n_columns : int
number of columns in the grid
{style_params}
Examples
--------
>>> from ipywidgets import GridspecLayout, Button, Layout
>>> layout = GridspecLayout(n_rows=4, n_columns=2, height='200px')
>>> layout[:3, 0] = Button(layout=Layout(height='auto', width='auto'))
>>> layout[1:, 1] = Button(layout=Layout(height='auto', width='auto'))
>>> layout[-1, 0] = Button(layout=Layout(height='auto', width='auto'))
>>> layout[0, 1] = Button(layout=Layout(height='auto', width='auto'))
>>> layout
"""
n_rows = Integer()
n_columns = Integer()
def __init__(self, n_rows=None, n_columns=None, **kwargs):
super(GridspecLayout, self).__init__(**kwargs)
self.n_rows = n_rows
self.n_columns = n_columns
self._grid_template_areas = [['.'] * self.n_columns for i in range(self.n_rows)]
self._grid_template_rows = 'repeat(%d, 1fr)' % (self.n_rows,)
self._grid_template_columns = 'repeat(%d, 1fr)' % (self.n_columns,)
self._children = {}
self._id_count = 0
@validate('n_rows', 'n_columns')
def _validate_integer(self, proposal):
if proposal['value'] > 0:
return proposal['value']
raise TraitError('n_rows and n_columns must be positive integer')
def _get_indices_from_slice(self, row, column):
"convert a two-dimensional slice to a list of rows and column indices"
if isinstance(row, slice):
start, stop, stride = row.indices(self.n_rows)
rows = range(start, stop, stride)
else:
rows = [row]
if isinstance(column, slice):
start, stop, stride = column.indices(self.n_columns)
columns = range(start, stop, stride)
else:
columns = [column]
return rows, columns
def __setitem__(self, key, value):
row, column = key
self._id_count += 1
obj_id = 'widget%03d' % self._id_count
value.layout.grid_area = obj_id
rows, columns = self._get_indices_from_slice(row, column)
for row in rows:
for column in columns:
current_value = self._grid_template_areas[row][column]
if current_value != '.' and current_value in self._children:
del self._children[current_value]
self._grid_template_areas[row][column] = obj_id
self._children[obj_id] = value
self._update_layout()
def __getitem__(self, key):
rows, columns = self._get_indices_from_slice(*key)
obj_id = None
for row in rows:
for column in columns:
new_obj_id = self._grid_template_areas[row][column]
obj_id = obj_id or new_obj_id
if obj_id != new_obj_id:
raise TypeError('The slice spans several widgets, but '
'only a single widget can be retrieved '
'at a time')
return self._children[obj_id]
def _update_layout(self):
grid_template_areas_css = "\n".join('"{}"'.format(" ".join(line))
for line in self._grid_template_areas)
self.layout.grid_template_columns = self._grid_template_columns
self.layout.grid_template_rows = self._grid_template_rows
self.layout.grid_template_areas = grid_template_areas_css
self.children = tuple(self._children.values())
@doc_subst(_doc_snippets)
class TwoByTwoLayout(GridBox, LayoutProperties):
""" Define a layout with 2x2 regular grid.
Parameters
----------
top_left: instance of Widget
top_right: instance of Widget
bottom_left: instance of Widget
bottom_right: instance of Widget
widgets to fill the positions in the layout
merge: bool
flag to say whether the empty positions should be automatically merged
{style_params}
Examples
--------
>>> from ipywidgets import TwoByTwoLayout, Button
>>> TwoByTwoLayout(top_left=Button(description="Top left"),
... top_right=Button(description="Top right"),
... bottom_left=Button(description="Bottom left"),
... bottom_right=Button(description="Bottom right"))
"""
# widget positions
top_left = Instance(Widget, allow_none=True)
top_right = Instance(Widget, allow_none=True)
bottom_left = Instance(Widget, allow_none=True)
bottom_right = Instance(Widget, allow_none=True)
# extra args
merge = Bool(default_value=True)
def __init__(self, **kwargs):
super(TwoByTwoLayout, self).__init__(**kwargs)
self._update_layout()
def _update_layout(self):
grid_template_areas = [["top-left", "top-right"],
["bottom-left", "bottom-right"]]
all_children = {'top-left' : self.top_left,
'top-right' : self.top_right,
'bottom-left' : self.bottom_left,
'bottom-right' : self.bottom_right}
children = {position : child
for position, child in all_children.items()
if child is not None}
if not children:
return
for position, child in children.items():
child.layout.grid_area = position
if self.merge:
if len(children) == 1:
position = list(children.keys())[0]
grid_template_areas = [[position, position],
[position, position]]
else:
columns = ['left', 'right']
for i, column in enumerate(columns):
top, bottom = children.get('top-' + column), children.get('bottom-' + column)
i_neighbour = (i + 1) % 2
if top is None and bottom is None:
# merge each cell in this column with the neighbour on the same row
grid_template_areas[0][i] = grid_template_areas[0][i_neighbour]
grid_template_areas[1][i] = grid_template_areas[1][i_neighbour]
elif top is None:
# merge with the cell below
grid_template_areas[0][i] = grid_template_areas[1][i]
elif bottom is None:
# merge with the cell above
grid_template_areas[1][i] = grid_template_areas[0][i]
grid_template_areas_css = "\n".join('"{}"'.format(" ".join(line))
for line in grid_template_areas)
self.layout.grid_template_columns = '1fr 1fr'
self.layout.grid_template_rows = '1fr 1fr'
self.layout.grid_template_areas = grid_template_areas_css
self.children = tuple(children.values())
@observe("top_left", "bottom_left", "top_right", "bottom_right", "merge")
def _child_changed(self, change): #pylint: disable=unused-argument
self._update_layout()
|
|
"""
This script is a collection of objects and methods used for MDNs: inspired from Theano MLP tutorial http://deeplearning.net/tutorial
"""
__docformat__ = 'restructedtext en'
import cPickle
import os
import sys
import time
import numpy as np
import theano
import theano.tensor as T
import sys
class MDNoutputLayer(object):
def __init__(self, rng, input, n_in, n_out, n_components):
self.input = input
b_values = np.zeros((n_out,n_components), dtype=theano.config.floatX)
W_values = np.asarray(rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_components)),
dtype=theano.config.floatX)
self.W_sigma = theano.shared(value=W_values, name='W_sigma',
borrow=True)
self.W_mixing = theano.shared(value=W_values.copy(), name='W_sigma',
borrow=True)
# b_values = np.zeros((n_components,), dtype=theano.config.floatX)
self.b_sigma = theano.shared(value=b_values.copy(), name='b_sigma',
borrow=True)
self.b_mixing = theano.shared(value=b_values.copy(), name='b_mixing',
borrow=True)
self.sigma = T.nnet.softplus(T.dot(input, self.W_sigma)) #+\
#self.b_sigma.dimshuffle('x',0))
self.mixing = T.nnet.softmax(T.dot(input, self.W_mixing)) #+\
#self.b_mixing.dimshuffle('x',0))
# parameters of the model
# self.params = [self.W_mu, self.b_mu, self.W_sigma, self.b_sigma,
# self.W_mixing, self.b_mixing]
self.params = [self.W_sigma, self.W_mixing]
class NetworkLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical layer of a MLP: units are fully-connected and have
tanh activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
if W is None:
W_values = np.asarray(rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (lin_output if activation is None
else activation(lin_output))
# parameters of the model
self.params = [self.W, self.b]
def set_symbolic_input(self, input):
"""We use this function to bind a symbolic variable with the input
of the network layer. Added to specify that in training time."""
self.input = input
class MDN(object):
"""Mixture Density Network
"""
def __init__(self, input, rng, n_in, n_hiddens, hid_activations,
n_out, out_activation, n_components):
"""Initialize the parameters for the multilayer perceptron
:type rng: np.random.RandomState
:param rng: a random number generator used to initialize weights
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden_list: list of int
:param n_hidden_list: a list of number of units in each hidden layer
:type activations_list: list of lambdas
:param n_hidden_list: a list of activations used in each hidden layer
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
from theano.tensor.shared_randomstreams import RandomStreams
self.srng = RandomStreams(seed=1234)
self.input = input
# We are dealing with multiple hidden layers MLP
layer0 = NetworkLayer(rng=rng, input=input,
n_in=n_in, n_out=n_hiddens[0],
activation=hid_activations[0])
h_layers = [('hiddenLayer0',layer0)]
for i in range(1,len(n_hiddens)):
h_layers.append(('hiddenLayer%d'%i,
NetworkLayer(rng=rng, input=h_layers[i-1][1].output,
n_in=n_hiddens[i-1], n_out=n_hiddens[i],
activation=hid_activations[i])))
self.__dict__.update(dict(h_layers))
# The output layer gets as input the hidden units
# of the hidden layer
self.outputLayer = MDNoutputLayer(rng=rng,
input=h_layers[-1][1].output,
n_in=n_hiddens[-1],
n_out=n_out,
n_components=n_components)
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = (self.outputLayer.W_sigma ** 2).sum() +\
(self.outputLayer.W_mixing ** 2).sum()
for i in range(len(n_hiddens)):
self.L2_sqr += (self.__dict__['hiddenLayer%d'%i].W ** 2).sum()
# the parameters of the model are the parameters of the all layers it
# is made out of
params = self.outputLayer.params
for layer in h_layers:
params.extend(layer[1].params)
self.params = params
def set_symbolic_input(self, input):
"""We use this function to bind a symbolic variable with the input
of the network layer. Added to specify that in training time."""
self.input = input
# def train(self, x, y, training_loss, learning_rate,
def train(self, y, training_loss, learning_rate,
n_epochs, train_x, train_y, valid_x, valid_y, batch_size):
"""Train the MLP using SGD"""
index = T.iscalar() # index to a [mini]batch
lr = T.scalar() # learning rate symbolic
#index.tag.test_value = 1
gparams = []
for param in self.params:
gparam = T.grad(training_loss, param)
gparams.append(gparam)
updates = []
for param, gparam in zip(self.params, gparams):
updates.append((param, param - gparam * \
T.cast(lr,dtype=theano.config.floatX)))
try:
train_model = theano.function(inputs=[index, lr],
outputs=[training_loss],
updates=updates,
givens={
self.input: train_x[index * batch_size:(index+1) * batch_size],
y: train_y[index * batch_size:(index + 1) * batch_size]})
except:
import pdb; pdb.set_trace()
validate_model = theano.function(inputs=[index],
outputs=NLL(sigma = self.outputLayer.sigma,
mixing = self.outputLayer.mixing,
y = y),
givens={
self.input: valid_x[index * batch_size:(index+1) * batch_size],
y: valid_y[index * batch_size:(index + 1) * batch_size]})
# compute number of minibatches for training and validation
n_train_batches = train_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_x.get_value(borrow=True).shape[0] / batch_size
validate_MSE = theano.function(inputs=[index],
outputs=MSE(self.samples(), y = y),
givens={
self.input: valid_x[index * batch_size:(index+1) * batch_size],
y: valid_y[index * batch_size:(index + 1) * batch_size]})
print 'training...'
start_time = time.clock()
epoch = 0
total_training_costs = []
total_validation_costs = []
total_validation_MSE = []
lr_time = 0
lr_step = learning_rate / ((train_x.get_value().shape[0]*1.0/batch_size)*(n_epochs-30))
lr_val = learning_rate
while (epoch < n_epochs):
epoch = epoch + 1
epoch_training_costs = []
#import pdb; pdb.set_trace()
for minibatch_index in xrange(n_train_batches):
# linear annealing after 40 epochs...
if epoch > 40:
# lr_val = learning_rate / (1.0+lr_time)
# lr_time = lr_time + 1
lr_val = lr_val - lr_step
else:
lr_val = learning_rate
loss_value = \
train_model(minibatch_index, lr_val)
epoch_training_costs.append(loss_value)
if np.isnan(loss_value):
import pdb; pdb.set_trace()
print 'got NaN in NLL'
sys.exit(1)
this_training_cost = np.mean(epoch_training_costs)
this_validation_cost = np.mean([validate_model(i) for i
in xrange(n_valid_batches)])
this_validation_MSE = np.mean([validate_MSE(i) for i
in xrange(n_valid_batches)])
total_training_costs.append(this_training_cost)
total_validation_costs.append(this_validation_cost)
total_validation_MSE.append(this_validation_MSE)
print 'epoch %i, training NLL %f, validation NLL %f, MSE %f' %\
(epoch, this_training_cost,this_validation_cost,
this_validation_MSE)
end_time = time.clock()
print "Training took %.2f minutes..."%((end_time-start_time)/60.)
#return losses and parameters..
return total_training_costs, total_validation_costs,total_validation_MSE
def samples(self):
component = self.srng.multinomial(pvals=self.outputLayer.mixing)
component_std = T.sum(self.outputLayer.sigma * \
component, axis=1, keepdims=True)
samples = self.srng.normal(std=component_std)
return samples
def save_model(self,filename='MLP.save',
output_folder='output_folder'):
"""
This function pickles the paramaters in a file for later usage
"""
storage_file = open(os.path.join(output_folder,filename), 'wb')
cPickle.dump(self, storage_file , protocol=cPickle.HIGHEST_PROTOCOL)
storage_file.close()
@staticmethod
def load_model(filename='MLP.save',
output_folder='output_folder'):
"""
This function loads pickled paramaters from a file
"""
storage_file = open(os.path.join(output_folder,filename), 'rb')
model = cPickle.load(storage_file)
storage_file.close()
return model
def Rectifier(x):
"""Implementation of the rectifier activation function"""
return T.switch(x>0, x, 0)
def NLL(sigma, mixing, y):
"""Computes the mean of negative log likelihood for P(y|x)
y = T.matrix('y') # (minibatch_size, output_size)
mu = T.tensor3('mu') # (minibatch_size, output_size, n_components)
sigma = T.matrix('sigma') # (minibatch_size, n_components)
mixing = T.matrix('mixing') # (minibatch_size, n_components)
"""
# multivariate Gaussian
exponent = -0.5 * T.inv(sigma) * T.sum(y**2, axis=1)
normalizer = (2 * np.pi * sigma)
exponent = exponent + T.log(mixing) - (y.shape[1]*.5)*T.log(normalizer)
max_exponent = T.max(exponent, axis=1)
mod_exponent = exponent - max_exponent[:, None]
gauss_mix = T.sum(T.exp(mod_exponent),axis=1)
log_gauss = max_exponent + T.log(gauss_mix)
res = -T.mean(log_gauss)
return res
def MSE(samples, y):
return T.mean((samples - y) ** 2)
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log
from neutron.common import exceptions as exc
from neutron.common import topics
from neutron.db import api as db_api
from neutron.i18n import _LI, _LW
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import helpers
LOG = log.getLogger(__name__)
TUNNEL = 'tunnel'
class TunnelTypeDriver(helpers.SegmentTypeDriver):
"""Define stable abstract interface for ML2 type drivers.
tunnel type networks rely on tunnel endpoints. This class defines abstract
methods to manage these endpoints.
"""
def __init__(self, model):
super(TunnelTypeDriver, self).__init__(model)
self.segmentation_key = next(iter(self.primary_keys))
@abc.abstractmethod
def sync_allocations(self):
"""Synchronize type_driver allocation table with configured ranges."""
@abc.abstractmethod
def add_endpoint(self, ip, host):
"""Register the endpoint in the type_driver database.
param ip: the IP address of the endpoint
param host: the Host name of the endpoint
"""
@abc.abstractmethod
def get_endpoints(self):
"""Get every endpoint managed by the type_driver
:returns a list of dict [{ip_address:endpoint_ip, host:endpoint_host},
..]
"""
@abc.abstractmethod
def get_endpoint_by_host(self, host):
"""Get endpoint for a given host managed by the type_driver
param host: the Host name of the endpoint
if host found in type_driver database
:returns db object for that particular host
else
:returns None
"""
@abc.abstractmethod
def get_endpoint_by_ip(self, ip):
"""Get endpoint for a given tunnel ip managed by the type_driver
param ip: the IP address of the endpoint
if ip found in type_driver database
:returns db object for that particular ip
else
:returns None
"""
@abc.abstractmethod
def delete_endpoint(self, ip):
"""Delete the endpoint in the type_driver database.
param ip: the IP address of the endpoint
"""
def _initialize(self, raw_tunnel_ranges):
self.tunnel_ranges = []
self._parse_tunnel_ranges(raw_tunnel_ranges, self.tunnel_ranges)
self.sync_allocations()
def _parse_tunnel_ranges(self, tunnel_ranges, current_range):
for entry in tunnel_ranges:
entry = entry.strip()
try:
tun_min, tun_max = entry.split(':')
tun_min = tun_min.strip()
tun_max = tun_max.strip()
tunnel_range = int(tun_min), int(tun_max)
except ValueError as ex:
raise exc.NetworkTunnelRangeError(tunnel_range=entry, error=ex)
plugin_utils.verify_tunnel_range(tunnel_range, self.get_type())
current_range.append(tunnel_range)
LOG.info(_LI("%(type)s ID ranges: %(range)s"),
{'type': self.get_type(), 'range': current_range})
def is_partial_segment(self, segment):
return segment.get(api.SEGMENTATION_ID) is None
def validate_provider_segment(self, segment):
physical_network = segment.get(api.PHYSICAL_NETWORK)
if physical_network:
msg = _("provider:physical_network specified for %s "
"network") % segment.get(api.NETWORK_TYPE)
raise exc.InvalidInput(error_message=msg)
for key, value in segment.items():
if value and key not in [api.NETWORK_TYPE,
api.SEGMENTATION_ID]:
msg = (_("%(key)s prohibited for %(tunnel)s provider network"),
{'key': key, 'tunnel': segment.get(api.NETWORK_TYPE)})
raise exc.InvalidInput(error_message=msg)
def reserve_provider_segment(self, session, segment):
if self.is_partial_segment(segment):
alloc = self.allocate_partially_specified_segment(session)
if not alloc:
raise exc.NoNetworkAvailable()
else:
segmentation_id = segment.get(api.SEGMENTATION_ID)
alloc = self.allocate_fully_specified_segment(
session, **{self.segmentation_key: segmentation_id})
if not alloc:
raise exc.TunnelIdInUse(tunnel_id=segmentation_id)
return {api.NETWORK_TYPE: self.get_type(),
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: getattr(alloc, self.segmentation_key),
api.MTU: self.get_mtu()}
def allocate_tenant_segment(self, session):
alloc = self.allocate_partially_specified_segment(session)
if not alloc:
return
return {api.NETWORK_TYPE: self.get_type(),
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: getattr(alloc, self.segmentation_key),
api.MTU: self.get_mtu()}
def release_segment(self, session, segment):
tunnel_id = segment[api.SEGMENTATION_ID]
inside = any(lo <= tunnel_id <= hi for lo, hi in self.tunnel_ranges)
info = {'type': self.get_type(), 'id': tunnel_id}
with session.begin(subtransactions=True):
query = (session.query(self.model).
filter_by(**{self.segmentation_key: tunnel_id}))
if inside:
count = query.update({"allocated": False})
if count:
LOG.debug("Releasing %(type)s tunnel %(id)s to pool",
info)
else:
count = query.delete()
if count:
LOG.debug("Releasing %(type)s tunnel %(id)s outside pool",
info)
if not count:
LOG.warning(_LW("%(type)s tunnel %(id)s not found"), info)
def get_allocation(self, session, tunnel_id):
return (session.query(self.model).
filter_by(**{self.segmentation_key: tunnel_id}).
first())
def get_mtu(self, physical_network=None):
seg_mtu = super(TunnelTypeDriver, self).get_mtu()
mtu = []
if seg_mtu > 0:
mtu.append(seg_mtu)
if cfg.CONF.ml2.path_mtu > 0:
mtu.append(cfg.CONF.ml2.path_mtu)
return min(mtu) if mtu else 0
class EndpointTunnelTypeDriver(TunnelTypeDriver):
def __init__(self, segment_model, endpoint_model):
super(EndpointTunnelTypeDriver, self).__init__(segment_model)
self.endpoint_model = endpoint_model
self.segmentation_key = next(iter(self.primary_keys))
def get_endpoint_by_host(self, host):
LOG.debug("get_endpoint_by_host() called for host %s", host)
session = db_api.get_session()
return (session.query(self.endpoint_model).
filter_by(host=host).first())
def get_endpoint_by_ip(self, ip):
LOG.debug("get_endpoint_by_ip() called for ip %s", ip)
session = db_api.get_session()
return (session.query(self.endpoint_model).
filter_by(ip_address=ip).first())
def delete_endpoint(self, ip):
LOG.debug("delete_endpoint() called for ip %s", ip)
session = db_api.get_session()
with session.begin(subtransactions=True):
(session.query(self.endpoint_model).
filter_by(ip_address=ip).delete())
def _get_endpoints(self):
LOG.debug("_get_endpoints() called")
session = db_api.get_session()
return session.query(self.endpoint_model)
def _add_endpoint(self, ip, host, **kwargs):
LOG.debug("_add_endpoint() called for ip %s", ip)
session = db_api.get_session()
try:
endpoint = self.endpoint_model(ip_address=ip, host=host, **kwargs)
endpoint.save(session)
except db_exc.DBDuplicateEntry:
endpoint = (session.query(self.endpoint_model).
filter_by(ip_address=ip).one())
LOG.warning(_LW("Endpoint with ip %s already exists"), ip)
return endpoint
class TunnelRpcCallbackMixin(object):
def setup_tunnel_callback_mixin(self, notifier, type_manager):
self._notifier = notifier
self._type_manager = type_manager
def tunnel_sync(self, rpc_context, **kwargs):
"""Update new tunnel.
Updates the database with the tunnel IP. All listening agents will also
be notified about the new tunnel IP.
"""
tunnel_ip = kwargs.get('tunnel_ip')
if not tunnel_ip:
msg = _("Tunnel IP value needed by the ML2 plugin")
raise exc.InvalidInput(error_message=msg)
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
msg = _("Network type value needed by the ML2 plugin")
raise exc.InvalidInput(error_message=msg)
host = kwargs.get('host')
driver = self._type_manager.drivers.get(tunnel_type)
if driver:
# The given conditional statements will verify the following
# things:
# 1. If host is not passed from an agent, it is a legacy mode.
# 2. If passed host and tunnel_ip are not found in the DB,
# it is a new endpoint.
# 3. If host is passed from an agent and it is not found in DB
# but the passed tunnel_ip is found, delete the endpoint
# from DB and add the endpoint with (tunnel_ip, host),
# it is an upgrade case.
# 4. If passed host is found in DB and passed tunnel ip is not
# found, delete the endpoint belonging to that host and
# add endpoint with latest (tunnel_ip, host), it is a case
# where local_ip of an agent got changed.
if host:
host_endpoint = driver.obj.get_endpoint_by_host(host)
ip_endpoint = driver.obj.get_endpoint_by_ip(tunnel_ip)
if (ip_endpoint and ip_endpoint.host is None
and host_endpoint is None):
driver.obj.delete_endpoint(ip_endpoint.ip_address)
elif (ip_endpoint and ip_endpoint.host != host):
msg = (_("Tunnel IP %(ip)s in use with host %(host)s"),
{'ip': ip_endpoint.ip_address,
'host': ip_endpoint.host})
raise exc.InvalidInput(error_message=msg)
elif (host_endpoint and host_endpoint.ip_address != tunnel_ip):
# Notify all other listening agents to delete stale tunnels
self._notifier.tunnel_delete(rpc_context,
host_endpoint.ip_address, tunnel_type)
driver.obj.delete_endpoint(host_endpoint.ip_address)
tunnel = driver.obj.add_endpoint(tunnel_ip, host)
tunnels = driver.obj.get_endpoints()
entry = {'tunnels': tunnels}
# Notify all other listening agents
self._notifier.tunnel_update(rpc_context, tunnel.ip_address,
tunnel_type)
# Return the list of tunnels IP's to the agent
return entry
else:
msg = _("Network type value '%s' not supported") % tunnel_type
raise exc.InvalidInput(error_message=msg)
class TunnelAgentRpcApiMixin(object):
def _get_tunnel_update_topic(self):
return topics.get_topic_name(self.topic,
TUNNEL,
topics.UPDATE)
def tunnel_update(self, context, tunnel_ip, tunnel_type):
cctxt = self.client.prepare(topic=self._get_tunnel_update_topic(),
fanout=True)
cctxt.cast(context, 'tunnel_update', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type)
def _get_tunnel_delete_topic(self):
return topics.get_topic_name(self.topic,
TUNNEL,
topics.DELETE)
def tunnel_delete(self, context, tunnel_ip, tunnel_type):
cctxt = self.client.prepare(topic=self._get_tunnel_delete_topic(),
fanout=True)
cctxt.cast(context, 'tunnel_delete', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type)
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction* RPCs."""
from test_framework.address import check_script, script_to_p2sh
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, find_vout_for_address, hex_str_to_bytes
from test_framework.messages import sha256
from test_framework.script import CScript, OP_0, OP_CHECKSIG
from decimal import Decimal
class SignRawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def successful_signing_test(self):
"""Create and sign a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA']
inputs = [
# Valid pay-to-pubkey scripts
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
{'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0,
'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'},
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransactionwithkey(rawTx, privKeys, inputs)
# 1) The transaction has a complete set of signatures
assert rawTxSigned['complete']
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def test_with_lock_outputs(self):
"""Test correct error reporting when trying to sign a locked output"""
self.nodes[0].encryptwallet("password")
rawTx = '020000000156b958f78e3f24e0b2f4e4db1255426b0902027cb37e3ddadb52e37c3557dddb0000000000ffffffff01c0a6b929010000001600149a2ee8c77140a053f36018ac8124a6ececc1668a00000000'
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].signrawtransactionwithwallet, rawTx)
def script_verification_error_test(self):
"""Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
# Make sure decoderawtransaction is at least marginally sane
decodedRawTx = self.nodes[0].decoderawtransaction(rawTx)
for i, inp in enumerate(inputs):
assert_equal(decodedRawTx["vin"][i]["txid"], inp["txid"])
assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"])
# Make sure decoderawtransaction throws if there is extra data
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, rawTx + "00")
rawTxSigned = self.nodes[0].signrawtransactionwithkey(rawTx, privKeys, scripts)
# 3) The transaction has no complete set of signatures
assert not rawTxSigned['complete']
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
assert not rawTxSigned['errors'][0]['witness']
# Now test signing failure for transaction with input witnesses
p2wpkh_raw_tx = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000"
rawTxSigned = self.nodes[0].signrawtransactionwithwallet(p2wpkh_raw_tx)
# 7) The transaction has no complete set of signatures
assert not rawTxSigned['complete']
# 8) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 9) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# Non-empty witness checked here
assert_equal(rawTxSigned['errors'][1]['witness'], ["304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01", "025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357"])
assert not rawTxSigned['errors'][0]['witness']
def witness_script_test(self):
# Now test signing transaction to P2SH-P2WSH addresses without wallet
# Create a new P2SH-P2WSH 1-of-1 multisig address:
embedded_address = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())
embedded_privkey = self.nodes[1].dumpprivkey(embedded_address["address"])
p2sh_p2wsh_address = self.nodes[1].addmultisigaddress(1, [embedded_address["pubkey"]], "", "p2sh-segwit")
# send transaction to P2SH-P2WSH 1-of-1 multisig address
self.nodes[0].generate(101)
self.nodes[0].sendtoaddress(p2sh_p2wsh_address["address"], 49.999)
self.nodes[0].generate(1)
self.sync_all()
# Find the UTXO for the transaction node[1] should have received, check witnessScript matches
unspent_output = self.nodes[1].listunspent(0, 999999, [p2sh_p2wsh_address["address"]])[0]
assert_equal(unspent_output["witnessScript"], p2sh_p2wsh_address["redeemScript"])
p2sh_redeemScript = CScript([OP_0, sha256(hex_str_to_bytes(p2sh_p2wsh_address["redeemScript"]))])
assert_equal(unspent_output["redeemScript"], p2sh_redeemScript.hex())
# Now create and sign a transaction spending that output on node[0], which doesn't know the scripts or keys
spending_tx = self.nodes[0].createrawtransaction([unspent_output], {self.nodes[1].getnewaddress(): Decimal("49.998")})
spending_tx_signed = self.nodes[0].signrawtransactionwithkey(spending_tx, [embedded_privkey], [unspent_output])
# Check the signing completed successfully
assert 'complete' in spending_tx_signed
assert_equal(spending_tx_signed['complete'], True)
self.log.info('Try with a P2PKH script as the witnessScript')
embedded_addr_info = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress('', 'legacy'))
embedded_privkey = self.nodes[1].dumpprivkey(embedded_addr_info['address'])
witness_script = embedded_addr_info['scriptPubKey']
redeem_script = CScript([OP_0, sha256(check_script(witness_script))]).hex()
addr = script_to_p2sh(redeem_script)
script_pub_key = self.nodes[1].validateaddress(addr)['scriptPubKey']
# Fund that address
txid = self.nodes[0].sendtoaddress(addr, 10)
vout = find_vout_for_address(self.nodes[0], txid, addr)
self.nodes[0].generate(1)
# Now create and sign a transaction spending that output on node[0], which doesn't know the scripts or keys
spending_tx = self.nodes[0].createrawtransaction([{'txid': txid, 'vout': vout}], {self.nodes[1].getnewaddress(): Decimal("9.999")})
spending_tx_signed = self.nodes[0].signrawtransactionwithkey(spending_tx, [embedded_privkey], [{'txid': txid, 'vout': vout, 'scriptPubKey': script_pub_key, 'redeemScript': redeem_script, 'witnessScript': witness_script, 'amount': 10}])
# Check the signing completed successfully
assert 'complete' in spending_tx_signed
assert_equal(spending_tx_signed['complete'], True)
self.nodes[0].sendrawtransaction(spending_tx_signed['hex'])
self.log.info('Try with a P2PK script as the witnessScript')
embedded_addr_info = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress('', 'legacy'))
embedded_privkey = self.nodes[1].dumpprivkey(embedded_addr_info['address'])
witness_script = CScript([hex_str_to_bytes(embedded_addr_info['pubkey']), OP_CHECKSIG]).hex()
redeem_script = CScript([OP_0, sha256(check_script(witness_script))]).hex()
addr = script_to_p2sh(redeem_script)
script_pub_key = self.nodes[1].validateaddress(addr)['scriptPubKey']
# Fund that address
txid = self.nodes[0].sendtoaddress(addr, 10)
vout = find_vout_for_address(self.nodes[0], txid, addr)
self.nodes[0].generate(1)
# Now create and sign a transaction spending that output on node[0], which doesn't know the scripts or keys
spending_tx = self.nodes[0].createrawtransaction([{'txid': txid, 'vout': vout}], {self.nodes[1].getnewaddress(): Decimal("9.999")})
spending_tx_signed = self.nodes[0].signrawtransactionwithkey(spending_tx, [embedded_privkey], [{'txid': txid, 'vout': vout, 'scriptPubKey': script_pub_key, 'redeemScript': redeem_script, 'witnessScript': witness_script, 'amount': 10}])
# Check the signing completed successfully
assert 'complete' in spending_tx_signed
assert_equal(spending_tx_signed['complete'], True)
self.nodes[0].sendrawtransaction(spending_tx_signed['hex'])
script_pub_key = self.nodes[1].validateaddress(addr)['scriptPubKey']
# Fund that address
txid = self.nodes[0].sendtoaddress(addr, 10)
vout = find_vout_for_address(self.nodes[0], txid, addr)
self.nodes[0].generate(1)
# Now create and sign a transaction spending that output on node[0], which doesn't know the scripts or keys
spending_tx = self.nodes[0].createrawtransaction([{'txid': txid, 'vout': vout}], {self.nodes[1].getnewaddress(): Decimal("9.999")})
spending_tx_signed = self.nodes[0].signrawtransactionwithkey(spending_tx, [embedded_privkey], [{'txid': txid, 'vout': vout, 'scriptPubKey': script_pub_key, 'redeemScript': redeem_script, 'witnessScript': witness_script, 'amount': 10}])
# Check the signing completed successfully
assert 'complete' in spending_tx_signed
assert_equal(spending_tx_signed['complete'], True)
self.nodes[0].sendrawtransaction(spending_tx_signed['hex'])
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
self.witness_script_test()
self.test_with_lock_outputs()
self.test_fully_signed_tx()
if __name__ == '__main__':
SignRawTransactionsTest().main()
|
|
#!/usr/bin/env python3
################################################################################
#
# Copyright 2017 Proyectos y Sistemas de Mantenimiento SL (eProsima).
# Copyright (c) 2018-2019 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
# This script can generate the client and agent code based on a set of topics
# to sent and set to receive. It uses fastrtpsgen to generate the code from the
# IDL for the topic messages. The PX4 msg definitions are used to create the IDL
# used by fastrtpsgen using templates.
import sys
import os
import argparse
import shutil
import px_generate_uorb_topic_files
import px_generate_uorb_topic_helper
from uorb_rtps_classifier import Classifier
import subprocess
import glob
import errno
import re
try:
from six.moves import input
except ImportError as e:
print("Failed to import six: " + e)
print("")
print("You may need to install it using:")
print(" pip3 install --user six")
print("")
sys.exit(1)
try:
from packaging import version
except ImportError as e:
print("Failed to import packaging: " + str(e))
print("")
print("You may need to install it using:")
print(" pip3 install --user packaging")
print("")
sys.exit(1)
def check_rtps_id_uniqueness(classifier):
"""
Checks if there are no ID's for different msgs repeated on the map
"""
repeated_ids = dict()
full_send_list = dict(list(msg for msg in list(classifier.msgs_to_send.items(
))) + list(list(msg[0].items())[0] for msg in classifier.alias_msgs_to_send))
full_receive_list = dict(list(msg for msg in list(classifier.msgs_to_receive.items(
))) + list(list(msg[0].items())[0] for msg in classifier.alias_msgs_to_receive))
full_ignore_list = dict(list(msg for msg in list(classifier.msgs_to_ignore.items(
))) + list(list(msg[0].items())[0] for msg in classifier.alias_msgs_to_ignore))
# check if there are repeated ID's on the messages to send
for key, value in list(full_send_list.items()):
if list(full_send_list.values()).count(value) > 1:
repeated_ids.update({key: value})
# check if there are repeated ID's on the messages to receive
for key, value in list(full_receive_list.items()):
if list(full_receive_list.values()).count(value) > 1:
repeated_ids.update({key: value})
# check if there are repeated ID's on the messages to ignore
for key, value in list(full_ignore_list.items()):
if list(full_ignore_list.values()).count(value) > 1:
repeated_ids.update({key: value})
# check if there are repeated IDs between classified and unclassified msgs
# check send and ignore lists
send_ignore_common_ids = list(set(full_ignore_list.values(
)).intersection(list(full_send_list.values())))
for item in list(full_send_list.items()):
for repeated in send_ignore_common_ids:
if item[1] == repeated:
repeated_ids.update({item[0]: item[1]})
for item in list(full_ignore_list.items()):
for repeated in send_ignore_common_ids:
if item[1] == repeated:
repeated_ids.update({item[0]: item[1]})
# check receive and ignore lists
receive_ignore_common_ids = list(set(full_ignore_list.values(
)).intersection(list(full_receive_list.values())))
for item in list(full_receive_list.items()):
for repeated in receive_ignore_common_ids:
if item[1] == repeated:
repeated_ids.update({item[0]: item[1]})
for item in list(full_ignore_list.items()):
for repeated in receive_ignore_common_ids:
if item[1] == repeated:
repeated_ids.update({item[0]: item[1]})
all_msgs = {}
all_msgs.update(full_send_list)
all_msgs.update(full_receive_list)
all_msgs.update(full_ignore_list)
all_ids = list()
all_ids = list(all_msgs.values())
all_ids.sort()
if not repeated_ids:
print("All good. RTPS ID's are unique")
else:
raise AssertionError(", ".join('%s' % msgs for msgs in list(repeated_ids.keys())) +
" have their ID's repeated. Please choose from the following pool:\n" +
", ".join('%d' % id for id in px_generate_uorb_topic_helper.check_available_ids(all_ids)))
default_client_out = "src/modules/micrortps_bridge/micrortps_client"
default_agent_out = "src/modules/micrortps_bridge/micrortps_agent"
default_uorb_templates_dir = "templates/uorb_microcdr"
default_urtps_templates_dir = "templates/urtps"
default_rtps_id_file = "tools/uorb_rtps_message_ids.yaml"
default_package_name = px_generate_uorb_topic_files.PACKAGE
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--agent", dest='agent', action="store_true",
help="Flag for generate the agent, by default is true if -c is not specified")
parser.add_argument("-c", "--client", dest='client', action="store_true",
help="Flag for generate the client, by default is true if -a is not specified")
parser.add_argument("-i", "--generate-idl", dest='gen_idl',
action="store_true", help="Flag for generate idl files for each msg")
parser.add_argument("-j", "--idl-dir", dest='idl_dir',
type=str, help="IDL files dir", default='')
parser.add_argument("-m", "--mkdir-build", dest='mkdir_build',
action="store_true", help="Flag to create 'build' dir")
parser.add_argument("-l", "--generate-cmakelists", dest='cmakelists',
action="store_true", help="Flag to generate a CMakeLists.txt file for the micro-RTPS agent")
parser.add_argument("-t", "--topic-msg-dir", dest='msgdir', type=str,
help="Topics message, by default using relative path 'msg/'", default="msg")
parser.add_argument("-b", "--uorb-templates-dir", dest='uorb_templates', type=str,
help="uORB templates, by default using relative path to msgdir 'templates/uorb_microcdr'", default=default_uorb_templates_dir)
parser.add_argument("-q", "--urtps-templates-dir", dest='urtps_templates', type=str,
help="uRTPS templates, by default using relative path to msgdir 'templates/urtps'", default=default_urtps_templates_dir)
parser.add_argument("-y", "--rtps-ids-file", dest='yaml_file', type=str,
help="RTPS msg IDs definition path, by default using relative path to msgdir 'tools/uorb_rtps_message_ids.yaml'", default=default_rtps_id_file)
parser.add_argument("-p", "--package", dest='package', type=str,
help="Msg package naming, by default px4", default=default_package_name)
parser.add_argument("-o", "--agent-outdir", dest='agentdir', type=str,
help="Agent output dir, by default using relative path 'src/modules/micrortps_bridge/micrortps_agent'", default=default_agent_out)
parser.add_argument("-u", "--client-outdir", dest='clientdir', type=str,
help="Client output dir, by default using relative path 'src/modules/micrortps_bridge/micrortps_client'", default=default_client_out)
parser.add_argument("-f", "--fastrtpsgen-dir", dest='fastrtpsgen', type=str, nargs='?',
help="fastrtpsgen installation dir, only needed if fastrtpsgen is not in PATH, by default empty", default="")
parser.add_argument("-g", "--fastrtpsgen-include", dest='fastrtpsgen_include', type=str,
help="directory(ies) to add to preprocessor include paths of fastrtpsgen, by default empty", default="")
parser.add_argument("-r", "--ros2-distro", dest='ros2_distro', type=str, nargs='?',
help="ROS2 distro, only required if generating the agent for usage with ROS2 nodes, by default empty", default="")
parser.add_argument("--delete-tree", dest='del_tree',
action="store_true", help="Delete dir tree output dir(s)")
if len(sys.argv) <= 1:
parser.print_usage()
exit(-1)
# Parse arguments
args = parser.parse_args()
agent = args.agent
client = args.client
cmakelists = args.cmakelists
del_tree = args.del_tree
gen_idl = args.gen_idl
mkdir_build = args.mkdir_build
package = args.package
# Msg files path
msg_dir = os.path.abspath(args.msgdir)
px_generate_uorb_topic_files.append_to_include_path(
{msg_dir}, px_generate_uorb_topic_files.INCL_DEFAULT, package)
# Agent files output path
agent_out_dir = os.path.abspath(args.agentdir)
# Client files output path
client_out_dir = os.path.abspath(args.clientdir)
# IDL files path
idl_dir = args.idl_dir
if idl_dir != '':
idl_dir = os.path.abspath(args.idl_dir)
else:
idl_dir = os.path.join(agent_out_dir, "idl")
if args.fastrtpsgen is None or args.fastrtpsgen == '':
# Assume fastrtpsgen is in PATH
fastrtpsgen_path = 'fastrtpsgen'
for dirname in os.environ['PATH'].split(':'):
candidate = os.path.join(dirname, 'fastrtpsgen')
if os.path.isfile(candidate):
fastrtpsgen_path = candidate
else:
# Path to fastrtpsgen is explicitly specified
if os.path.isdir(args.fastrtpsgen):
fastrtpsgen_path = os.path.join(
os.path.abspath(args.fastrtpsgen), 'fastrtpsgen')
else:
fastrtpsgen_path = args.fastrtpsgen
fastrtpsgen_include = args.fastrtpsgen_include
if fastrtpsgen_include is not None and fastrtpsgen_include != '':
fastrtpsgen_include = "-I " + \
os.path.abspath(
args.fastrtpsgen_include) + " "
# get FastRTPSGen version
# .. note:: since Fast-RTPS 1.8.0 release, FastRTPSGen is a separated repository
# and not included in the Fast-RTPS project.
# The starting version since this separation is 1.0.0, which follows its own
# versioning
fastrtpsgen_version = version.Version("1.0.0")
if(os.path.exists(fastrtpsgen_path)):
try:
fastrtpsgen_version_out = subprocess.check_output(
[fastrtpsgen_path, "-version"]).decode("utf-8").strip()[-5:]
except OSError:
raise
try:
fastrtpsgen_version = version.parse(fastrtpsgen_version_out)
except version.InvalidVersion:
raise Exception(
"'fastrtpsgen -version' returned None or an invalid version")
else:
raise Exception(
"FastRTPSGen not found. Specify the location of fastrtpsgen with the -f flag")
# get ROS 2 version, if exists
ros2_distro = ''
ros_version = os.environ.get('ROS_VERSION')
if ros_version == '2':
if args.ros2_distro != '':
ros2_distro = args.ros2_distro
else:
ros2_distro = os.environ.get('ROS_DISTRO')
# get FastRTPS version
fastrtps_version = ''
if not ros2_distro:
# grab the version installed system wise
fastrtps_version = subprocess.check_output(
"ldconfig -v 2>/dev/null | grep libfastrtps", shell=True).decode("utf-8").strip().split('so.')[-1]
else:
# grab the version of the ros-<ros_distro>-fastrtps package
fastrtps_version = re.search(r'Version:\s*([\dd.]+)', subprocess.check_output(
"dpkg -s ros-" + ros2_distro + "-fastrtps 2>/dev/null | grep -i version", shell=True).decode("utf-8").strip()).group(1)
# If nothing specified it's generated both
if agent == False and client == False:
agent = True
client = True
if del_tree:
if agent:
_continue = str(input("\nFiles in " + agent_out_dir +
" will be erased, continue?[Y/n]\n"))
if _continue == "N" or _continue == "n":
print("Aborting execution...")
exit(-1)
else:
if agent and os.path.isdir(agent_out_dir):
shutil.rmtree(agent_out_dir)
if client:
_continue = str(input(
"\nFiles in " + client_out_dir + " will be erased, continue?[Y/n]\n"))
if _continue.strip() in ("N", "n"):
print("Aborting execution...")
exit(-1)
else:
if client and os.path.isdir(client_out_dir):
shutil.rmtree(client_out_dir)
if agent and os.path.isdir(os.path.join(agent_out_dir, "idl")):
shutil.rmtree(os.path.join(agent_out_dir, "idl"))
# uORB templates path
uorb_templates_dir = (args.uorb_templates if os.path.isabs(args.uorb_templates)
else os.path.join(msg_dir, args.uorb_templates))
# uRTPS templates path
urtps_templates_dir = (args.urtps_templates if os.path.isabs(args.urtps_templates)
else os.path.join(msg_dir, args.urtps_templates))
# parse yaml file into a map of ids
classifier = (Classifier(os.path.abspath(args.yaml_file), msg_dir) if os.path.isabs(args.yaml_file)
else Classifier(os.path.join(msg_dir, args.yaml_file), msg_dir))
# check if there are no ID's repeated
check_rtps_id_uniqueness(classifier)
uRTPS_CLIENT_TEMPL_FILE = 'microRTPS_client.cpp.em'
uRTPS_AGENT_TOPICS_H_TEMPL_FILE = 'RtpsTopics.h.em'
uRTPS_AGENT_TOPICS_SRC_TEMPL_FILE = 'RtpsTopics.cpp.em'
uRTPS_AGENT_TEMPL_FILE = 'microRTPS_agent.cpp.em'
uRTPS_TIMESYNC_CPP_TEMPL_FILE = 'microRTPS_timesync.cpp.em'
uRTPS_TIMESYNC_H_TEMPL_FILE = 'microRTPS_timesync.h.em'
uRTPS_AGENT_CMAKELISTS_TEMPL_FILE = 'microRTPS_agent_CMakeLists.txt.em'
uRTPS_PUBLISHER_SRC_TEMPL_FILE = 'Publisher.cpp.em'
uRTPS_PUBLISHER_H_TEMPL_FILE = 'Publisher.h.em'
uRTPS_SUBSCRIBER_SRC_TEMPL_FILE = 'Subscriber.cpp.em'
uRTPS_SUBSCRIBER_H_TEMPL_FILE = 'Subscriber.h.em'
def generate_agent(out_dir):
global fastrtps_version
if classifier.msgs_to_send:
for msg_file in classifier.msgs_to_send:
if gen_idl:
if out_dir != agent_out_dir:
px_generate_uorb_topic_files.generate_idl_file(msg_file, msg_dir, "", os.path.join(out_dir, "/idl"), urtps_templates_dir,
package, px_generate_uorb_topic_files.INCL_DEFAULT, fastrtps_version, ros2_distro, classifier.msg_id_map)
else:
px_generate_uorb_topic_files.generate_idl_file(msg_file, msg_dir, "", idl_dir, urtps_templates_dir,
package, px_generate_uorb_topic_files.INCL_DEFAULT, fastrtps_version, ros2_distro, classifier.msg_id_map)
px_generate_uorb_topic_files.generate_topic_file(msg_file, msg_dir, "", out_dir, urtps_templates_dir,
package, px_generate_uorb_topic_files.INCL_DEFAULT, classifier.msg_id_map, fastrtps_version, ros2_distro, uRTPS_PUBLISHER_SRC_TEMPL_FILE)
px_generate_uorb_topic_files.generate_topic_file(msg_file, msg_dir, "", out_dir, urtps_templates_dir,
package, px_generate_uorb_topic_files.INCL_DEFAULT, classifier.msg_id_map, fastrtps_version, ros2_distro, uRTPS_PUBLISHER_H_TEMPL_FILE)
if classifier.alias_msgs_to_send:
for msg_file in classifier.alias_msgs_to_send:
msg_alias = list(msg_file[0].keys())[0]
msg_name = msg_file[1]
if gen_idl:
if out_dir != agent_out_dir:
px_generate_uorb_topic_files.generate_idl_file(msg_name, msg_dir, msg_alias, os.path.join(out_dir, "/idl"), urtps_templates_dir,
package, px_generate_uorb_topic_files.INCL_DEFAULT, fastrtps_version, ros2_distro, classifier.msg_id_map)
else:
px_generate_uorb_topic_files.generate_idl_file(msg_name, msg_dir, msg_alias, idl_dir, urtps_templates_dir,
package, px_generate_uorb_topic_files.INCL_DEFAULT, fastrtps_version, ros2_distro, classifier.msg_id_map)
px_generate_uorb_topic_files.generate_topic_file(msg_name, msg_dir, msg_alias, out_dir, urtps_templates_dir,
package, px_generate_uorb_topic_files.INCL_DEFAULT, classifier.msg_id_map, fastrtps_version, ros2_distro, uRTPS_PUBLISHER_SRC_TEMPL_FILE)
px_generate_uorb_topic_files.generate_topic_file(msg_name, msg_dir, msg_alias, out_dir, urtps_templates_dir,
package, px_generate_uorb_topic_files.INCL_DEFAULT, classifier.msg_id_map, fastrtps_version, ros2_distro, uRTPS_PUBLISHER_H_TEMPL_FILE)
if classifier.msgs_to_receive:
for msg_file in classifier.msgs_to_receive:
if gen_idl:
if out_dir != agent_out_dir:
px_generate_uorb_topic_files.generate_idl_file(msg_file, msg_dir, "", os.path.join(out_dir, "/idl"), urtps_templates_dir,
package, px_generate_uorb_topic_files.INCL_DEFAULT, fastrtps_version, ros2_distro, classifier.msg_id_map)
else:
px_generate_uorb_topic_files.generate_idl_file(msg_file, msg_dir, "", idl_dir, urtps_templates_dir,
package, px_generate_uorb_topic_files.INCL_DEFAULT, fastrtps_version, ros2_distro, classifier.msg_id_map)
px_generate_uorb_topic_files.generate_topic_file(msg_file, msg_dir, "", out_dir, urtps_templates_dir,
package, px_generate_uorb_topic_files.INCL_DEFAULT, classifier.msg_id_map, fastrtps_version, ros2_distro, uRTPS_SUBSCRIBER_SRC_TEMPL_FILE)
px_generate_uorb_topic_files.generate_topic_file(msg_file, msg_dir, "", out_dir, urtps_templates_dir,
package, px_generate_uorb_topic_files.INCL_DEFAULT, classifier.msg_id_map, fastrtps_version, ros2_distro, uRTPS_SUBSCRIBER_H_TEMPL_FILE)
if classifier.alias_msgs_to_receive:
for msg_file in classifier.alias_msgs_to_receive:
msg_alias = list(msg_file[0].keys())[0]
msg_name = msg_file[1]
if gen_idl:
if out_dir != agent_out_dir:
px_generate_uorb_topic_files.generate_idl_file(msg_name, msg_dir, msg_alias, os.path.join(out_dir, "/idl"), urtps_templates_dir,
package, px_generate_uorb_topic_files.INCL_DEFAULT, fastrtps_version, ros2_distro, classifier.msg_id_map)
else:
px_generate_uorb_topic_files.generate_idl_file(msg_name, msg_dir, msg_alias, idl_dir, urtps_templates_dir,
package, px_generate_uorb_topic_files.INCL_DEFAULT, fastrtps_version, ros2_distro, classifier.msg_id_map)
px_generate_uorb_topic_files.generate_topic_file(msg_name, msg_dir, msg_alias, out_dir, urtps_templates_dir,
package, px_generate_uorb_topic_files.INCL_DEFAULT, classifier.msg_id_map, fastrtps_version, ros2_distro, uRTPS_SUBSCRIBER_SRC_TEMPL_FILE)
px_generate_uorb_topic_files.generate_topic_file(msg_name, msg_dir, msg_alias, out_dir, urtps_templates_dir,
package, px_generate_uorb_topic_files.INCL_DEFAULT, classifier.msg_id_map, fastrtps_version, ros2_distro, uRTPS_SUBSCRIBER_H_TEMPL_FILE)
px_generate_uorb_topic_files.generate_uRTPS_general(classifier.msgs_to_send, classifier.alias_msgs_to_send, classifier.msgs_to_receive, classifier.alias_msgs_to_receive, msg_dir, out_dir,
urtps_templates_dir, package, px_generate_uorb_topic_files.INCL_DEFAULT, classifier.msg_id_map, fastrtps_version, ros2_distro, uRTPS_AGENT_TEMPL_FILE)
px_generate_uorb_topic_files.generate_uRTPS_general(classifier.msgs_to_send, classifier.alias_msgs_to_send, classifier.msgs_to_receive, classifier.alias_msgs_to_receive, msg_dir, out_dir,
urtps_templates_dir, package, px_generate_uorb_topic_files.INCL_DEFAULT, classifier.msg_id_map, fastrtps_version, ros2_distro, uRTPS_TIMESYNC_CPP_TEMPL_FILE)
px_generate_uorb_topic_files.generate_uRTPS_general(classifier.msgs_to_send, classifier.alias_msgs_to_send, classifier.msgs_to_receive, classifier.alias_msgs_to_receive, msg_dir, out_dir,
urtps_templates_dir, package, px_generate_uorb_topic_files.INCL_DEFAULT, classifier.msg_id_map, fastrtps_version, ros2_distro, uRTPS_TIMESYNC_H_TEMPL_FILE)
px_generate_uorb_topic_files.generate_uRTPS_general(classifier.msgs_to_send, classifier.alias_msgs_to_send, classifier.msgs_to_receive, classifier.alias_msgs_to_receive, msg_dir, out_dir,
urtps_templates_dir, package, px_generate_uorb_topic_files.INCL_DEFAULT, classifier.msg_id_map, fastrtps_version, ros2_distro, uRTPS_AGENT_TOPICS_H_TEMPL_FILE)
px_generate_uorb_topic_files.generate_uRTPS_general(classifier.msgs_to_send, classifier.alias_msgs_to_send, classifier.msgs_to_receive, classifier.alias_msgs_to_receive, msg_dir, out_dir,
urtps_templates_dir, package, px_generate_uorb_topic_files.INCL_DEFAULT, classifier.msg_id_map, fastrtps_version, ros2_distro, uRTPS_AGENT_TOPICS_SRC_TEMPL_FILE)
if cmakelists:
px_generate_uorb_topic_files.generate_uRTPS_general(classifier.msgs_to_send, classifier.alias_msgs_to_send, classifier.msgs_to_receive, classifier.alias_msgs_to_receive, msg_dir, os.path.dirname(out_dir),
urtps_templates_dir, package, px_generate_uorb_topic_files.INCL_DEFAULT, classifier.msg_id_map, fastrtps_version, ros2_distro, uRTPS_AGENT_CMAKELISTS_TEMPL_FILE)
# Final steps to install agent
mkdir_p(os.path.join(out_dir, "fastrtpsgen"))
prev_cwd_path = os.getcwd()
os.chdir(os.path.join(out_dir, "fastrtpsgen"))
if not glob.glob(os.path.join(idl_dir, "*.idl")):
raise Exception("No IDL files found in %s" % idl_dir)
# If it is generating the bridge code for interfacing with ROS2, then set
# the '-typeros2' option in fastrtpsgen.
# .. note:: This is only available in FastRTPSGen 1.0.4 and above
gen_ros2_typename = ""
if ros2_distro and ros2_distro in ['dashing', 'eloquent', 'foxy'] and fastrtpsgen_version >= version.Version("1.0.4"):
gen_ros2_typename = "-typeros2 "
for idl_file in glob.glob(os.path.join(idl_dir, "*.idl")):
try:
ret = subprocess.check_call(fastrtpsgen_path + " -d " + out_dir +
"/fastrtpsgen -example x64Linux2.6gcc " + gen_ros2_typename + fastrtpsgen_include + idl_file, shell=True)
except OSError:
raise
rm_wildcard(os.path.join(out_dir, "fastrtpsgen/*PubSubMain*"))
rm_wildcard(os.path.join(out_dir, "fastrtpsgen/makefile*"))
rm_wildcard(os.path.join(out_dir, "fastrtpsgen/*Publisher*"))
rm_wildcard(os.path.join(out_dir, "fastrtpsgen/*Subscriber*"))
for f in glob.glob(os.path.join(out_dir, "fastrtpsgen/*.cxx")):
os.rename(f, f.replace(".cxx", ".cpp"))
cp_wildcard(os.path.join(out_dir, "fastrtpsgen/*"), out_dir)
if os.path.isdir(os.path.join(out_dir, "fastrtpsgen")):
shutil.rmtree(os.path.join(out_dir, "fastrtpsgen"))
cp_wildcard(os.path.join(urtps_templates_dir,
"microRTPS_transport.*"), agent_out_dir)
if cmakelists:
os.rename(os.path.join(os.path.dirname(out_dir), "microRTPS_agent_CMakeLists.txt"),
os.path.join(os.path.dirname(out_dir), "CMakeLists.txt"))
if (mkdir_build):
mkdir_p(os.path.join(os.path.dirname(out_dir), "build"))
os.chdir(prev_cwd_path)
return 0
def rm_wildcard(pattern):
for f in glob.glob(pattern):
os.remove(f)
def cp_wildcard(pattern, destdir):
for f in glob.glob(pattern):
shutil.copy(f, destdir)
def mkdir_p(dirpath):
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(dirpath):
pass
else:
raise
def generate_client(out_dir):
global fastrtps_version
# Rename work in the default path
if default_client_out != out_dir:
def_file = os.path.join(default_client_out, "microRTPS_client.cpp")
if os.path.isfile(def_file):
os.rename(def_file, def_file.replace(".cpp", ".cpp_"))
def_file = os.path.join(default_client_out, "microRTPS_transport.cpp")
if os.path.isfile(def_file):
os.rename(def_file, def_file.replace(".cpp", ".cpp_"))
def_file = os.path.join(default_client_out, "microRTPS_transport.h")
if os.path.isfile(def_file):
os.rename(def_file, def_file.replace(".h", ".h_"))
px_generate_uorb_topic_files.generate_uRTPS_general(classifier.msgs_to_send, classifier.alias_msgs_to_send, classifier.msgs_to_receive, classifier.alias_msgs_to_receive, msg_dir,
out_dir, uorb_templates_dir, package, px_generate_uorb_topic_files.INCL_DEFAULT, classifier.msg_id_map, fastrtps_version, ros2_distro, uRTPS_CLIENT_TEMPL_FILE)
# Final steps to install client
cp_wildcard(os.path.join(urtps_templates_dir,
"microRTPS_transport.*"), out_dir)
return 0
if agent:
generate_agent(agent_out_dir)
print(("\nAgent created in: " + agent_out_dir))
if client:
generate_client(client_out_dir)
print(("\nClient created in: " + client_out_dir))
|
|
import os
import sys
import shutil
import tensorflow as tf
import numpy as np
import png
from e2eflow.core.flow_util import flow_to_color, flow_error_avg, outlier_pct
from e2eflow.core.flow_util import flow_error_image
from e2eflow.util import config_dict
from e2eflow.core.image_warp import image_warp
from e2eflow.kitti.input import KITTIInput
from e2eflow.kitti.data import KITTIData
from e2eflow.chairs.data import ChairsData
from e2eflow.chairs.input import ChairsInput
from e2eflow.sintel.data import SintelData
from e2eflow.sintel.input import SintelInput
from e2eflow.middlebury.input import MiddleburyInput
from e2eflow.middlebury.data import MiddleburyData
from e2eflow.core.unsupervised import unsupervised_loss
from e2eflow.core.input import resize_input, resize_output_crop, resize_output, resize_output_flow
from e2eflow.core.train import restore_networks
from e2eflow.ops import forward_warp
from e2eflow.gui import display
from e2eflow.core.losses import DISOCC_THRESH, occlusion, create_outgoing_mask
from e2eflow.util import convert_input_strings
tf.app.flags.DEFINE_string('dataset', 'kitti',
'Name of dataset to evaluate on. One of {kitti, sintel, chairs, mdb}.')
tf.app.flags.DEFINE_string('variant', 'train_2012',
'Name of variant to evaluate on.'
'If dataset = kitti, one of {train_2012, train_2015, test_2012, test_2015}.'
'If dataset = sintel, one of {train_clean, train_final}.'
'If dataset = mdb, one of {train, test}.')
tf.app.flags.DEFINE_string('ex', '',
'Experiment name(s) (can be comma separated list).')
tf.app.flags.DEFINE_integer('num', 10,
'Number of examples to evaluate. Set to -1 to evaluate all.')
tf.app.flags.DEFINE_integer('num_vis', 100,
'Number of evalutations to visualize. Set to -1 to visualize all.')
tf.app.flags.DEFINE_string('gpu', '0',
'GPU device to evaluate on.')
tf.app.flags.DEFINE_boolean('output_benchmark', False,
'Output raw flow files.')
tf.app.flags.DEFINE_boolean('output_visual', False,
'Output flow visualization files.')
tf.app.flags.DEFINE_boolean('output_backward', False,
'Output backward flow files.')
tf.app.flags.DEFINE_boolean('output_png', True, # TODO finish .flo output
'Raw output format to use with output_benchmark.'
'Outputs .png flow files if true, output .flo otherwise.')
FLAGS = tf.app.flags.FLAGS
NUM_EXAMPLES_PER_PAGE = 4
def write_rgb_png(z, path, bitdepth=8):
z = z[0, :, :, :]
with open(path, 'wb') as f:
writer = png.Writer(width=z.shape[1], height=z.shape[0], bitdepth=bitdepth)
z2list = z.reshape(-1, z.shape[1]*z.shape[2]).tolist()
writer.write(f, z2list)
def flow_to_int16(flow):
_, h, w, _ = tf.unstack(tf.shape(flow))
u, v = tf.unstack(flow, num=2, axis=3)
r = tf.cast(tf.maximum(0.0, tf.minimum(u * 64.0 + 32768.0, 65535.0)), tf.uint16)
g = tf.cast(tf.maximum(0.0, tf.minimum(v * 64.0 + 32768.0, 65535.0)), tf.uint16)
b = tf.ones([1, h, w], tf.uint16)
return tf.stack([r, g, b], axis=3)
def write_flo(flow, filename):
"""
write optical flow in Middlebury .flo format
:param flow: optical flow map
:param filename: optical flow file path to be saved
:return: None
"""
flow = flow[0, :, :, :]
f = open(filename, 'wb')
magic = np.array([202021.25], dtype=np.float32)
height, width = flow.shape[:2]
magic.tofile(f)
np.int32(width).tofile(f)
np.int32(height).tofile(f)
data = np.float32(flow).flatten()
data.tofile(f)
f.close()
def _evaluate_experiment(name, input_fn, data_input):
normalize_fn = data_input._normalize_image
resized_h = data_input.dims[0]
resized_w = data_input.dims[1]
current_config = config_dict('../config.ini')
exp_dir = os.path.join(current_config['dirs']['log'], 'ex', name)
config_path = os.path.join(exp_dir, 'config.ini')
if not os.path.isfile(config_path):
config_path = '../config.ini'
if not os.path.isdir(exp_dir) or not tf.train.get_checkpoint_state(exp_dir):
exp_dir = os.path.join(current_config['dirs']['checkpoints'], name)
config = config_dict(config_path)
params = config['train']
convert_input_strings(params, config_dict('../config.ini')['dirs'])
dataset_params_name = 'train_' + FLAGS.dataset
if dataset_params_name in config:
params.update(config[dataset_params_name])
ckpt = tf.train.get_checkpoint_state(exp_dir)
if not ckpt:
raise RuntimeError("Error: experiment must contain a checkpoint")
ckpt_path = exp_dir + "/" + os.path.basename(ckpt.model_checkpoint_path)
with tf.Graph().as_default(): #, tf.device('gpu:' + FLAGS.gpu):
inputs = input_fn()
im1, im2, input_shape = inputs[:3]
truth = inputs[3:]
height, width, _ = tf.unstack(tf.squeeze(input_shape), num=3, axis=0)
im1 = resize_input(im1, height, width, resized_h, resized_w)
im2 = resize_input(im2, height, width, resized_h, resized_w) # TODO adapt train.py
_, flow, flow_bw = unsupervised_loss(
(im1, im2),
normalization=data_input.get_normalization(),
params=params, augment=False, return_flow=True)
im1 = resize_output(im1, height, width, 3)
im2 = resize_output(im2, height, width, 3)
flow = resize_output_flow(flow, height, width, 2)
flow_bw = resize_output_flow(flow_bw, height, width, 2)
flow_fw_int16 = flow_to_int16(flow)
flow_bw_int16 = flow_to_int16(flow_bw)
im1_pred = image_warp(im2, flow)
im1_diff = tf.abs(im1 - im1_pred)
#im2_diff = tf.abs(im1 - im2)
#flow_bw_warped = image_warp(flow_bw, flow)
if len(truth) == 4:
flow_occ, mask_occ, flow_noc, mask_noc = truth
flow_occ = resize_output_crop(flow_occ, height, width, 2)
flow_noc = resize_output_crop(flow_noc, height, width, 2)
mask_occ = resize_output_crop(mask_occ, height, width, 1)
mask_noc = resize_output_crop(mask_noc, height, width, 1)
#div = divergence(flow_occ)
#div_bw = divergence(flow_bw)
occ_pred = 1 - (1 - occlusion(flow, flow_bw)[0])
def_pred = 1 - (1 - occlusion(flow, flow_bw)[1])
disocc_pred = forward_warp(flow_bw) < DISOCC_THRESH
disocc_fw_pred = forward_warp(flow) < DISOCC_THRESH
image_slots = [((im1 * 0.5 + im2 * 0.5) / 255, 'overlay'),
(im1_diff / 255, 'brightness error'),
#(im1 / 255, 'first image', 1, 0),
#(im2 / 255, 'second image', 1, 0),
#(im2_diff / 255, '|first - second|', 1, 2),
(flow_to_color(flow), 'flow'),
#(flow_to_color(flow_bw), 'flow bw prediction'),
#(tf.image.rgb_to_grayscale(im1_diff) > 20, 'diff'),
#(occ_pred, 'occ'),
#(def_pred, 'disocc'),
#(disocc_pred, 'reverse disocc'),
#(disocc_fw_pred, 'forward disocc prediction'),
#(div, 'div'),
#(div < -2, 'neg div'),
#(div > 5, 'pos div'),
#(flow_to_color(flow_occ, mask_occ), 'flow truth'),
(flow_error_image(flow, flow_occ, mask_occ, mask_noc),
'flow error') # (blue: correct, red: wrong, dark: occluded)
]
# list of (scalar_op, title)
scalar_slots = [(flow_error_avg(flow_noc, flow, mask_noc), 'EPE_noc'),
(flow_error_avg(flow_occ, flow, mask_occ), 'EPE_all'),
(outlier_pct(flow_noc, flow, mask_noc), 'outliers_noc'),
(outlier_pct(flow_occ, flow, mask_occ), 'outliers_all')]
elif len(truth) == 2:
flow_gt, mask = truth
flow_gt = resize_output_crop(flow_gt, height, width, 2)
mask = resize_output_crop(mask, height, width, 1)
image_slots = [((im1 * 0.5 + im2 * 0.5) / 255, 'overlay'),
(im1_diff / 255, 'brightness error'),
(flow_to_color(flow), 'flow'),
(flow_to_color(flow_gt, mask), 'gt'),
]
# list of (scalar_op, title)
scalar_slots = [(flow_error_avg(flow_gt, flow, mask), 'EPE_all')]
else:
image_slots = [(im1 / 255, 'first image'),
#(im1_pred / 255, 'warped second image', 0, 1),
(im1_diff / 255, 'warp error'),
#(im2 / 255, 'second image', 1, 0),
#(im2_diff / 255, '|first - second|', 1, 2),
(flow_to_color(flow), 'flow prediction')]
scalar_slots = []
num_ims = len(image_slots)
image_ops = [t[0] for t in image_slots]
scalar_ops = [t[0] for t in scalar_slots]
image_names = [t[1] for t in image_slots]
scalar_names = [t[1] for t in scalar_slots]
all_ops = image_ops + scalar_ops
image_lists = []
averages = np.zeros(len(scalar_ops))
sess_config = tf.ConfigProto(allow_soft_placement=True)
exp_out_dir = os.path.join('../out', name)
if FLAGS.output_visual or FLAGS.output_benchmark:
if os.path.isdir(exp_out_dir):
shutil.rmtree(exp_out_dir)
os.makedirs(exp_out_dir)
shutil.copyfile(config_path, os.path.join(exp_out_dir, 'config.ini'))
with tf.Session(config=sess_config) as sess:
saver = tf.train.Saver(tf.global_variables())
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
restore_networks(sess, params, ckpt, ckpt_path)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,
coord=coord)
# TODO adjust for batch_size > 1 (also need to change image_lists appending)
max_iter = FLAGS.num if FLAGS.num > 0 else None
try:
num_iters = 0
while not coord.should_stop() and (max_iter is None or num_iters != max_iter):
all_results = sess.run([flow, flow_bw, flow_fw_int16, flow_bw_int16] + all_ops)
flow_fw_res, flow_bw_res, flow_fw_int16_res, flow_bw_int16_res = all_results[:4]
all_results = all_results[4:]
image_results = all_results[:num_ims]
scalar_results = all_results[num_ims:]
iterstr = str(num_iters).zfill(6)
if FLAGS.output_visual:
path_col = os.path.join(exp_out_dir, iterstr + '_flow.png')
path_overlay = os.path.join(exp_out_dir, iterstr + '_img.png')
path_error = os.path.join(exp_out_dir, iterstr + '_err.png')
write_rgb_png(image_results[0] * 255, path_overlay)
write_rgb_png(image_results[1] * 255, path_col)
write_rgb_png(image_results[2] * 255, path_error)
if FLAGS.output_benchmark:
path_fw = os.path.join(exp_out_dir, iterstr)
if FLAGS.output_png:
write_rgb_png(flow_fw_int16_res, path_fw + '_10.png', bitdepth=16)
else:
write_flo(flow_fw_res, path_fw + '_10.flo')
if FLAGS.output_backward:
path_fw = os.path.join(exp_out_dir, iterstr + '_01.png')
write_rgb_png(flow_bw_int16_res, path_bw, bitdepth=16)
if num_iters < FLAGS.num_vis:
image_lists.append(image_results)
averages += scalar_results
if num_iters > 0:
sys.stdout.write('\r')
num_iters += 1
sys.stdout.write("-- evaluating '{}': {}/{}"
.format(name, num_iters, max_iter))
sys.stdout.flush()
print()
except tf.errors.OutOfRangeError:
pass
averages /= num_iters
coord.request_stop()
coord.join(threads)
for t, avg in zip(scalar_slots, averages):
_, scalar_name = t
print("({}) {} = {}".format(name, scalar_name, avg))
return image_lists, image_names
def main(argv=None):
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
print("-- evaluating: on {} pairs from {}/{}"
.format(FLAGS.num, FLAGS.dataset, FLAGS.variant))
default_config = config_dict()
dirs = default_config['dirs']
if FLAGS.dataset == 'kitti':
data = KITTIData(dirs['data'], development=True)
data_input = KITTIInput(data, batch_size=1, normalize=False,
dims=(384,1280))
inputs = getattr(data_input, 'input_' + FLAGS.variant)()
elif FLAGS.dataset == 'chairs':
data = ChairsData(dirs['data'], development=True)
data_input = ChairsInput(data, batch_size=1, normalize=False,
dims=(384,512))
if FLAGS.variant == 'test_2015' and FLAGS.num == -1:
FLAGS.num = 200
elif FLAGS.variant == 'test_2012' and FLAGS.num == -1:
FLAGS.num = 195
elif FLAGS.dataset == 'sintel':
data = SintelData(dirs['data'], development=True)
data_input = SintelInput(data, batch_size=1, normalize=False,
dims=(512,1024))
if FLAGS.variant in ['test_clean', 'test_final'] and FLAGS.num == -1:
FLAGS.num = 552
elif FLAGS.dataset == 'mdb':
data = MiddleburyData(dirs['data'], development=True)
data_input = MiddleburyInput(data, batch_size=1, normalize=False,
dims=(512,640))
if FLAGS.variant == 'test' and FLAGS.num == -1:
FLAGS.num = 12
input_fn = getattr(data_input, 'input_' + FLAGS.variant)
results = []
for name in FLAGS.ex.split(','):
result, image_names = _evaluate_experiment(name, input_fn, data_input)
results.append(result)
display(results, image_names)
if __name__ == '__main__':
tf.app.run()
|
|
# _*_ coding: utf-8 _*_
"""
Implements a base class for all Synchrony test suites to quickly create peer nodes.
"""
import time
import pprint
import random
import hashlib
import unittest
import binascii
import urlparse
from copy import deepcopy
from synchrony import app
from synchrony.models import Revision
from synchrony.controllers import dht
from synchrony.controllers import utils
class BaseSuite(unittest.TestCase):
dfp = 0.0 # 0% of peers are malicious by default
alpha = 0.0
beta = 0.85 # Normalisation factor
ksize = 20
iterations = 100 # calculate_trust iterates 100 times by default
peer_amount = 25
storage_method = "rpc_append"
def setUp(self):
print "\nCreating %i peers configured to use %s." % \
(self.peer_amount, self.storage_method)
self.peers = create_peers(self.peer_amount, self.ksize, self.storage_method)
count = int(len(self.peers) * self.dfp)
# dht.log("%i peers will be malicious for this test." % count)
# for i in range(count):
# self.peers[i] ...
# All remaining peers considered honest are automatically
# added to one anothers' bucket of pre-trusted peers.
#
# Your test suite(s) will want to adjust this manually after the fact.
self.honest_peers = {}
honest_count = len(self.peers) - count
dht.log("Creating %i pre-trusted peers." % honest_count)
for j in range(honest_count):
self.honest_peers[self.peers[count+j].node.long_id] = self.peers[count+j]
for router in self.honest_peers.values():
for r in self.honest_peers.values():
if router.node.threeple == r.node.threeple: continue
node = router.get_existing_node(r.node.threeple)
if not node: continue
node.trust += router.protocol.epsilon
router.tbucket[node.long_id] = node
# dht.log("Introduced %s to %s as a pre-trusted peer." % (node, router))
# We add these RoutingTable objects as an attributes of mocked methods
# so they can find other nodes and work on their protocol instances.
#
# Even though we're emulating dht.SynchronyProtocol to avoid network
# and database calls, we still stick our mock functions on those instances
# for the time being.
mock_transmit.peers = self.peers
dht.transmit = mock_transmit
mock_get.peers = self.peers
dht.get = mock_get
#mock_fetch_revision.peers = self.peers
#for key in self.peers:
# self.peers[key].protocol.fetch_revision = mock_fetch_revision
def create_peers(peer_amount, ksize, storage_method):
peers = {}
for i in range(peer_amount):
peers[i] = dht.RoutingTable(
"127.0.0.1",
random.randint(0, 999999),
app.key.publickey().exportKey(),
None,
)
peers[i].buckets = [dht.KBucket(0, 2**160, 20)]
rpcmethod = getattr(peers[i].protocol, storage_method, None)
if not rpcmethod:
raise Exception("Unknown storage method: %s" % storage_method)
peers[i].storage_method = rpcmethod
# Our TestProtocol is a subclass that calls methods on peer routing
# tables directly instead of making network calls and database commits.
peers[i].protocol = TestProtocol(
peers[i],
dht.Storage(),
peers[i].ksize,
peers
)
# Attempted pings in add_contact would cause some previously
# added peers to be promptly removed. We manually swap the method
# for a mockup and reintroduce the original once we have our set of peers.
peers[i].protocol.original_ping = peers[i].protocol.rpc_ping
peers[i].protocol.rpc_ping = mock_ping
log = dht.log
# We check for unique port numbers because addr is /usually/ an (ip, port)
# tuple when calling dht.transmit.
ports = [p.node.port for p in peers.values()]
unique_ports = len(set(ports)) == len(peers.keys())
dht.log("Unique port numbers: %s" % str("Yes." if unique_ports else "No. Recreating."))
if not unique_ports:
return create_peers(peer_amount, storage_method)
dht.log("Introducing peers to one another.")
dht.log = lambda x, y=None: x
for peer in peers.values():
[peer.add_contact(router.node) for router in peers.values()]
dht.log = log
print pprint.pformat(peers)
# Please god, forgive this fourth loop?
for p in peers.values():
peer.protocol.rpc_ping = peer.protocol.original_ping
return peers
def mock_ping(addr):
return
def mock_transmit(routes, addr, data):
"""
Put dht.RoutingTable instances through to one another without calling out
to the network.
"""
# Test case setup method should set a peers attr on this function beforehand
if not hasattr(mock_transmit, "peers"):
dht.log("Can't find test peers.")
dht.log("synchrony.test.utils.mock_transmit is missing a peers dictionary.")
return
if isinstance(addr, dht.Node):
addr = (addr.ip, addr.port)
# Filter for everyone who isn't the intended recipient
peer_routes = filter(
lambda r: r if r.node.port == addr[1] else None,
[r for r in mock_transmit.peers.values()]
)
if not peer_routes:
dht.log("Unknown peer %s:%i" % addr)
return
peer_routes = peer_routes[0]
data = dht.envelope(routes, data)
for field in data.keys():
if field.startswith('rpc_'):
rpc_name = 'handle_%s' % field.replace('rpc_', '')
rpc_method = getattr(peer_routes.protocol, rpc_name, None)
if not rpc_method:
dht.log("%s tried to call unknown procedure %s." % \
(routes.node, rpc_name), "warning")
return
return rpc_method(data)
def mock_get(addr, path, field="data", all=True):
if isinstance(addr, dht.Node):
f = lambda f: f.node.threeple == addr.threeple
addr = filter(f, mock_get.peers.values())
if addr:
if '/' in path:
node_id = long(path.split('/')[1])
for peer in addr[0]:
if peer.long_id == node_id:
return peer.jsonify()
return {}
return [p.jsonify() for p in addr[0]]
return []
def mock_fetch_revision(url, content_hash, nodes):
first_node = mock_fetch_revision.peers.values()[0].node
f = lambda x: x.node.threeple == first_node.threeple
c = filter(f, nodes)
if not c: return
c = c[0]
return Revision.query.filter(Revision.hash == content_hash).first()
class TestProtocol(dht.SynchronyProtocol):
def __init__(self, router, storage, ksize, peers):
self.peers = peers
self.ksize = ksize
self.router = router
self.epsilon = 0.0001
self.storage = storage
self.source_node = router.node
self.downloads = dht.ForgetfulStorage() # content_hash -> (n.ip, n.port)
self.received_keys = dht.ForgetfulStorage(bound=2) # node -> [republish_messages,..]
super(dht.SynchronyProtocol, self).__init__()
def get_refresh_ids(self):
"""
Get ids to search for to keep old buckets up to date.
"""
ids = []
for bucket in self.router.get_lonely_buckets():
ids.append(random.randint(*bucket.range))
return ids
def rpc_ping(self, addr):
# "addr" may be an (addr, port) tuple
data = dht.transmit(self.router, addr, {"rpc_ping":True})
# Remove peer
if not data:
if isinstance(addr, Node):
self.router.remove_node(addr)
return
node = dht.Node(*data['node'], pubkey=data['pubkey'], router=self.router)
self.router.add_contact(node)
# FIXME: Ping nodes in the 'peers' part of the response.
# Don't let malicious nodes fill the routing table with
# information for peers who won't respond.
if 'peers' in data:
for peer in data['peers']:
if peer['node'][0] == self.source_node.long_id:
continue
peer = dht.Node(*peer['node'],
pubkey=peer['pubkey'],
router=self.router)
self.router.add_contact(peer)
# self.rpc_ping(node)
return node
def rpc_report_trust(self, node_to_rate, node_to_tell):
"""
The equivalent is a GET request to /v1/peers/node_id
"""
pass
def rpc_add_friend(self, local_uid, addr):
"""
addr is of the form "network_name/node_id/remote_user_id"
Implements ADD_FRIEND where we find the node in addr and
tell them a local user wants to add the remote UID as a friend.
"""
if addr.count("/") != 2:
return False, None
network, node_id, remote_uid = addr.split("/")
if network != self.router.network:
return False, None
node = dht.Node(long(node_id))
nearest = self.router.find_neighbours(node)
if len(nearest) == 0:
dht.log("There are no neighbours to help us add users on %s as friends." % node_id)
return False, None
spider = NodeSpider(self, node, nearest, self.ksize, self.router.alpha)
nodes = spider.find()
if len(nodes) != 1:
return False, None
node = nodes[0]
# Sometimes spidering doesn't get us all the way there.
# Check who we already know:
if node.long_id != long(node_id):
nodes = [n for n in self.router if n.long_id == long(node_id)]
if len(nodes) != 1:
return False, None
node = nodes[0]
dht.log(node_id, "debug")
dht.log(node.long_id, "debug")
dht.log("Found remote instance %s." % node)
message = {"rpc_add_friend": {"from": local_uid, "to": remote_uid}}
response = dht.transmit(self.router, node, message)
if not isinstance(response, dict) or not "response" in response:
return False, None
return response['response'], node
def rpc_chat(self, nodeple, data):
"""
Implements CHAT where we encrypt a message destined for the user with
UID on the receiving node.
Message data should be of the form
{
'to': 'uid',
'from': ['uid', 'username'],
'type': Can be any of "message", "init", "close"
'body': {'m':'content'}
}
"""
# Worth retaining this ping call for the routing information we get.
node = self.rpc_ping(nodeple)
if node == None:
return
data = base64.b64encode(json.dumps(data))
key = RSA.importKey(node.pubkey)
data = key.encrypt(data, 32)
data = base64.b64encode(data[0])
response = dht.transmit(self.router, node, {'rpc_chat': data})
dht.log(response, "debug")
return response
def rpc_edit(self, node, data):
"""
Implements inter-instance EDIT.
Message data should be of the form
{
'stream': 'stream_id',
'from': ['uid', 'username'],
'edit': '<span>Some DOM nodes to match and replace</span>'
}
"""
data = base64.b64encode(json.dumps(data))
key = RSA.importKey(node.pubkey)
data = key.encrypt(data, 32)
dht.transmit(self.router, addr, {'rpc_edit': data})
def rpc_leaving(self, node):
addr = self.get_address(node)
return dht.transmit(self.router, addr, {"rpc_leaving":True})
def rpc_append(self, node, url_hash, content_hash):
"""
Allows senders to tell peers they have the data for the hash of a path.
Hash here is the hash made from the content the peer has stored.
They're letting you know they have data that corresponds to the hash.
{
peers: [],
pubkey: "",
signature: "",
time: 1440064069.0,
rpc_append: {url_hash: content_hash},
}
"""
# Append this peer to the list of nodes storing data for this path
# urls[data['url']].append(sender)
addr = self.get_address(node)
data = {"rpc_append": {url_hash: content_hash}}
# if addr.threeple == self.source_node.threeple:
# self.storage[url_hash] = (content_hash, addr)
return dht.transmit(self.router, addr, data)
def rpc_find_node(self, node_to_ask, node_to_find):
address = (node_to_ask.ip, node_to_ask.port)
message = {'key': node_to_find.id}
message = dht.envelope(self.router, message)
return self.handle_find_node(message)
def rpc_find_value(self, node_to_ask, node_to_find):
address = (node_to_ask.ip, node_to_ask.port)
message = {'rpc_find_value': binascii.hexlify(node_to_find.id)}
return dht.transmit(self.router, address, message)
def rpc_transfer_routing_table(self, sender, nodeid):
pass
def rpc_republish(self, node, data):
"""
Please refer to SynchronyProtocol.republish_keys to see what's really going
on here.
The data argument here is a list that looks like this:
[{'node': [[nodeple], 'pubkey'],'keys': {signature: key_data}}, ...]
Where "key_data" is a b64encoded JSON dump of the return value for
self.storage.get_entries_for(self.source_node).
Peers save this message, as we remember when they send rpc_republish
messages to us. We forward previous rpc_republish messages for peers
we still have as a contact.
"""
addr = self.get_address(node)
data = {'rpc_republish': data}
return dht.transmit(self.router, addr, data)
def rpc_transfer_storage_table(self, node):
"""
Given a new node, send it all the keys/values it should be storing.
node here is a new node that's just joined or that we've just found
out about.
For each key in storage, get k closest nodes. If newnode is closer
than the furthest in that list, and the node for this server
is closer than the closest in that list, then store the key/value
on the new node (per section 2.5 of the paper)
"""
ds = []
for key, value in self.storage.iteritems():
keynode = dht.Node(sha1(key).digest())
neighbours = self.router.find_neighbours(keynode)
if len(neighbours) > 0:
new_node_close = node.distance_to(keynode) < neighbours[-1].distance_to(keynode)
this_node_closest = self.source_node.distance_to(keynode) < neighbours[0].distance_to(keynode)
if len(neighbours) == 0 or (new_node_close and this_node_closest):
ds.append(self.call_store(node, key, value))
return ds
# handle_* methods (generally) indicate a request initiated by a peer node.
def handle_ping(self, data):
node = self.read_envelope(data)
dht.log("Received rpc_ping from %s." % node)
return dht.envelope(self.router, {'ping':"pong"})
def handle_add_friend(self, data):
"""
Match to UID and return a new Friend instance representing our side.
"""
assert "rpc_add_friend" in data
dht.log(data, "debug")
request = data['rpc_add_friend']
if not "from" in request or not "to" in request:
return False
node = self.read_envelope(data)
user = User.query.filter(User.uid == request['to']).first()
if not user: return None
from_addr = "/".join([self.router.network, str(node.long_id), request['from']])
friend = Friend.query.filter(
and_(Friend.address == from_addr, Friend.user == user)
).first()
if friend:
# This permits the remote side to see if they're added or blocked.
return dht.envelope(self.router, {"response": friend.jsonify()})
node = dht.Node(*data['node'])
network = Network.query.filter(Network.name == self.router.network).first()
if network != None:
network = Network(name = self.router.network)
peer = Peer.query.filter(
and_(Peer.network == network,
Peer.ip == node.ip,
Peer.port == node.port)
).first()
if peer == None:
peer = Peer()
peer.load_node(node)
peer.network = network
friend = Friend(address=from_addr)
friend.state = 1
friend.received = True
# TODO: Make this correspond to the existing rpc_friend method.
user.friends.append(friend)
peer.friends.append(friend)
db.session.add(user)
db.session.add(peer)
db.session.add(friend)
db.session.add(network)
db.session.commit()
return dht.envelope(self.router, {"response": friend.jsonify()})
def handle_chat(self, data):
"""
Move a message from a remote node up to the UI if the recipient
UID has an active connection to the chat stream.
"""
node = self.read_envelope(data)
# With the ciphertext being a binary string we also b64encode it
message_content = base64.b64decode(data['rpc_chat'])
message_content = app.key.decrypt((message_content,))
data = json.loads(base64.b64decode(message_content))
dht.log(message_content, "debug")
dht.log(data, "debug")
user = User.query.filter(User.uid == data['to']).first()
if user == None:
return {"error": "No such user."}
friend = Friend.query.filter(and_(Friend.user == user,
Friend.network == self.router.network,
Friend.node_id == str(node.long_id),
Friend.uid == data['from'][0])
).first()
if friend:
available = utils.check_availability(self.router.httpd, "chat", user)
if not available:
return {"error": "The intended recipient isn't connected to chat."}
if data['type'] == "init":
# Enable the recipient to reply by forcing them into the channel
dht.log("Changing chat channel of %s to %s." % \
(user.username, friend.address), "debug")
utils.change_channel(self.router.httpd,
"chat",
user,
friend.address)
utils.broadcast(self.router.httpd,
"chat",
"rpc_chat_init",
data['from'],
user=user)
if data['type'] == "message":
utils.broadcast(self.router.httpd,
"chat",
"rpc_chat",
data,
user=user)
return {"state": "delivered"}
def handle_edit(self, data):
self.read_envelope(data)
data = app.key.decrypt(data['rpc_edit'])
pass
def handle_leaving(self, data):
conscientous_objector = self.read_envelope(data)
self.router.remove_node(conscientous_objector)
def handle_find_node(self, data):
"""
Used for finding existing nodes near to a target ID.
"""
source = self.read_envelope(data)
if not 'key' in data:
return "No target key specified.", 400
node = dht.Node(data['key'])
dht.log("Finding neighbours of %s." % node.long_id)
nodes = {'nodes': [p.jsonify() for p in \
self.router.find_neighbours(node, exclude=source)]}
return dht.envelope(self.router, nodes)
def handle_find_value(self, data):
source = self.read_envelope(data)
if not source: return
if not 'rpc_find_value' in data: return
# usually comes in as unicode
if not isinstance(data['rpc_find_value'], (unicode, str)):
return
key = data['rpc_find_value']
dht.log("Finding value for %s" % key)
value = self.storage.get(key, None)
if key is None:
dht.log("No value found for %s" % key)
return self.rpc_find_node(sender, nodeid, key)
dht.log("Found %s" % value)
return dht.envelope(self.router,{'value':value})
def handle_append(self, data):
"""
Handle messages of the form {'rpc_append': {'url_hash': 'content_hash'}}
We do this by inserting the data into a structure that looks like
{ 'url_hash': {'content_hash': [(ts,nodeple)]}}
"""
node = self.read_envelope(data)
if max(node.trust, 0) == 0:
dht.log("%s with negative trust rating tried to append." % node, "warning")
return False
url_hash, content_hash = data['rpc_append'].items()[0]
dht.log("Received rpc_append request from %s." % node)
dht.log("Adjusting known peers for %s." % url_hash)
self.storage[url_hash] = (content_hash, node)
# if self.router.options and self.router.options.autoreplicate:
# if not Revision.query.filter(Revision.hash == content_hash).first():
# revision = self.fetch_revision(content_hash, [source])
# if revision:
# db.session.add(revision)
# db.session.commit()
return True
def handle_republish(self, data):
"""
Retain signed messages here so they can be relayed.
"""
node = self.read_envelope(data)
if max(node.trust, 0) == 0:
dht.log("%s with negative trust rating tried to republish." % node, "warning")
return False
dht.log("Received rpc_republish from %s." % node)
republished_keys = data['rpc_republish']
for message in republished_keys:
if max(node.trust, 0) == 0:
return False
signature = (long(message['keys'].keys()[0]),)
data = message['keys'].values()[0]
hash = SHA256.new(data).digest()
key = RSA.importKey(message['node'][1])
if not key.verify(hash, signature):
dht.log("Invalid signatures for keys provided by %s." % node, "warning")
node.trust -= self.epsilon
continue
try:
keys = json.loads(base64.b64decode(data))
except Exception, e:
dht.log("Error unserialising republished keys: %s" % e.message, "error")
continue
referee = dht.Node(*message['node'][0], pubkey=message['node'][1])
if self.router.is_new_node(referee):
self.rpc_ping(referee)
# Get the trust rating of this referee
referee = self.router.get_existing_node(referee)
if not referee or referee.trust < 0:
dht.log("%s is currently republishing for %s." % (node, referee), "warning")
continue
self.storage.merge(keys)
self.received_keys[referee.id] = message
return True
def fetch_revision(self, url, content_hash, nodeples):
"""
Accesses the most trustworthy peers' routing table and see if they have
any references to themselves for the desired url/content_hash pair.
Updates local download references and then returns the revision.
"""
urls = []
nodes = []
routers = []
hashed_url = hashlib.sha1(url).digest()
revision = Revision.query.filter(Revision.hash == content_hash).first()
if revision == None:
dht.log("No match for %s could be found in the current database.", "error")
for n in nodeples:
if n[1] == self.source_node.ip and n[2] == self.source_node.port:
continue
# Get local references to peer nodes
node = self.router.get_existing_node(n)
if node:
nodes.append(node)
continue
node = self.rpc_ping(n)
if node == None:
continue
# Get remote peer by its own routing table
# TestProtocol.peers is test suite shorthand for all peers
for node in dht.sort_nodes_by_trust(nodes):
for router in self.peers.values():
if node.threeple == router.node.threeple:
routers.append(router)
if not any(routers):
return None
for router in routers:
node = self.router.get_existing_node(router.node.threeple)
node.trust += self.epsilon
references = router.protocol.storage\
.get(binascii.hexlify(hashed_url), None)
if not references: continue
# downloads is a list of (timestamp, node_triple) pairs.
downloads = references.get(content_hash)
if not any(downloads): continue
for download in downloads:
if download[1] == router.node.threeple:
self.downloads[url] = {revision.hash: (node.ip, node.port)}
return revision
return None
def republish_keys(self):
"""
This means retransmitting url and content hashes we're serving for and
signing the message.
Signing permits peers to relay the message and permits us to republish
for our peers.
"""
data = self.storage.get_entries_for(self.source_node)
messages = []
threads = []
# Organise our keys for republishing
if data:
data = base64.b64encode(json.dumps(data))
hash = SHA256.new(data).digest()
signature = app.key.sign(hash, '')[0]
messages.append(
{'node': [self.source_node.threeple, self.source_node.pubkey],
'keys': {signature: data}}
)
# Grab keys we've seen our peers republish
for key in self.received_keys:
messages.append(self.received_keys[key][-1])
# Tell everyone
if messages:
dht.log("Republishing keys.")
for node in self.router:
threads.append(gevent.spawn(self.rpc_republish, node, messages))
gevent.joinall(threads)
def read_envelope(self, data):
"""
Take an incoming message and either update the last_seen time for the
sender or add the sender as a new contact.
peers.py should also call this method once it's determined the network
a message is for. That way we can inspect the 'peers' attribute and
use read_envelope to also learn of new peers.
"""
# Validate the senders' node ID
seed = "%s:%i:%s" % (data['node'][1],data['node'][2],data['pubkey'])
if data['node'][0] != long(utils.generate_node_id(seed).encode('hex'), 16):
dht.log("\"%s\" is using an incorrect node ID." % data['node'][1], "warning")
return
# Learn of peers
# TODO: Spawn green threads to ping these nodes.
# NOTE: Don't permit a spammer to abuse the routing topology.
# This can include decrementing the senders trust rating for
# referring us to dead nodes.
if 'peers' in data:
for peer in data['peers']:
node = dht.Node(*peer['node'], pubkey=peer['pubkey'], router=self.router)
if node != self.source_node and self.router.is_new_node(node):
self.router.add_contact(node)
# Update last_seen times for contacts or add if new
node = dht.Node(*data['node'], pubkey=data['pubkey'], router=self.router)
existing_node = self.router.get_existing_node(node)
if existing_node:
existing_node.last_seen = time.time()
return existing_node
elif node != self.source_node:
self.router.add_contact(node)
return node
def decrement_trust(self, addr, severity=1):
"""
Implements the feedback mechanism for our trust metric.
"addr" is an (ip, port) tuple to match to a known peer.
"severity" is a floating point severity level indicating how bad the
content in question was perceived to be.
Notes on how this all works in sum as a distributed system are here:
http://nlp.stanford.edu/pubs/eigentrust.pdf
http://www.cc.gatech.edu/~lingliu/papers/2012/XinxinFan-EigenTrust++.pdf
http://dimacs.rutgers.edu/Workshops/InformationSecurity/slides/gamesandreputation.pdf
The second PDF is recommended.
"""
for node in self.router:
if node.ip == addr[0] and node.port == addr[1]:
amount = severity / 100.0
dht.log("Decrementing trust rating for %s by %f." % (node, amount), "warning")
node.trust -= 2 * self.epsilon
# peer = Peer.query.filter(
# and_(Peer.network == self.network,
# Peer.ip == addr[0],
# Peer.port == addr[1])
# ).first()
# if peer:
# peer.trust -= amount
# db.session.add(peer)
# db.session.commit()
return True
return False
def get_address(self, node):
if node.ip == self.source_node.ip:
address = ('127.0.0.1', node.port)
else:
address = (node.ip, node.port)
return address
|
|
#
# Collective Knowledge: CK-powered TensorFlow crowdbenchmarking (very early prototyping)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, [email protected], http://fursin.net
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
line='================================================================'
ffstat='ck-stat-flat-characteristics.json'
form_name='wa_web_form'
onchange='document.'+form_name+'.submit();'
hextra='<i><center>\n'
hextra+='This is an on-going long-term project. Please check our vision [ '
hextra+='<a href="http://doi.acm.org/10.1145/2909437.2909449">IWOCL\'16</a>, \n'
hextra+='<a href="http://arxiv.org/abs/1506.06256">CPC\'15</a>, \n'
hextra+='<a href="https://www.youtube.com/watch?v=Q94yWxXUMP0">YouTube</a>, \n'
hextra+='<a href="http://ctuning.org/cm/wiki/index.php?title=CM:data:45741e3fbcf4024b:1db78910464c9d05">wiki</a> ] '
hextra+=' and <a href="https://github.com/ctuning/ck-tensorflow">CK-TensorFlow GitHub repo</a> for more details!'
hextra+='</center></i>\n'
hextra+='<br>\n'
selector=[{'name':'Type', 'key':'tensorflow_type'},
{'name':'Network', 'key':'nn_type'},
{'name':'Platform', 'key':'plat_name'},
{'name':'CPU', 'key':'cpu_name', 'new_line':'yes'},
{'name':'OS', 'key':'os_name'},
{'name':'GPGPU', 'key':'gpgpu_name'}]
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# crowdsource these experiments
def crowdsource(i):
"""
Input: {
(local) - if 'yes', local crowd-benchmarking, instead of public
(user) - force different user ID/email for demos
(choices) - force different choices to program pipeline
(repetitions) - statistical repetitions (default=1), for now statistical analysis is not used (TBD)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import copy
import os
# Setting output
o=i.get('out','')
oo=''
if o=='con': oo='con'
quiet=i.get('quiet','')
er=i.get('exchange_repo','')
if er=='': er=ck.cfg['default_exchange_repo_uoa']
esr=i.get('exchange_subrepo','')
if esr=='': esr=ck.cfg['default_exchange_subrepo_uoa']
if i.get('local','')=='yes':
er='local'
esr=''
la=i.get('local_autotuning','')
repetitions=i.get('repetitions','')
if repetitions=='': repetitions=3
repetitions=int(repetitions)
record='no'
# Check if any input has . and convert to dict
for k in list(i.keys()):
if k.find('.')>0:
v=i[k]
kk='##'+k.replace('.','#')
del(i[k])
r=ck.set_by_flat_key({'dict':i, 'key':kk, 'value':v})
if r['return']>0: return r
choices=i.get('choices',{})
xchoices=copy.deepcopy(choices)
# Get user
user=''
mcfg={}
ii={'action':'load',
'module_uoa':'module',
'data_uoa':cfg['module_deps']['program.optimization']}
r=ck.access(ii)
if r['return']==0:
mcfg=r['dict']
dcfg={}
ii={'action':'load',
'module_uoa':mcfg['module_deps']['cfg'],
'data_uoa':mcfg['cfg_uoa']}
r=ck.access(ii)
if r['return']>0 and r['return']!=16: return r
if r['return']!=16:
dcfg=r['dict']
user=dcfg.get('user_email','')
# Initialize local environment for program optimization ***********************************************************
pi=i.get('platform_info',{})
if len(pi)==0:
ii=copy.deepcopy(i)
ii['action']='initialize'
ii['module_uoa']=cfg['module_deps']['program.optimization']
ii['data_uoa']='tensorflow'
ii['exchange_repo']=er
ii['exchange_subrepo']=esr
ii['skip_welcome']='yes'
ii['skip_log_wait']='yes'
ii['crowdtuning_type']='tensorflow-crowd-benchmarking'
r=ck.access(ii)
if r['return']>0: return r
pi=r['platform_info']
user=r.get('user','')
hos=pi['host_os_uoa']
hosd=pi['host_os_dict']
tos=pi['os_uoa']
tosd=pi['os_dict']
tbits=tosd.get('bits','')
remote=tosd.get('remote','')
tdid=pi['device_id']
features=pi.get('features',{})
fplat=features.get('platform',{})
fos=features.get('os',{})
fcpu=features.get('cpu',{})
fgpu=features.get('gpu',{})
plat_name=fplat.get('name','')
plat_uid=features.get('platform_uid','')
os_name=fos.get('name','')
os_uid=features.get('os_uid','')
cpu_name=fcpu.get('name','')
if cpu_name=='': cpu_name='unknown-'+fcpu.get('cpu_abi','')
cpu_uid=features.get('cpu_uid','')
gpu_name=fgpu.get('name','')
gpgpu_name=''
sn=fos.get('serial_number','')
# Ask for cmd
tp=['cpu', 'cuda', 'opencl']
ck.out(line)
ck.out('Select TensorFlow library type:')
ck.out('')
r=ck.access({'action':'select_list',
'module_uoa':cfg['module_deps']['choice'],
'choices':tp})
if r['return']>0: return r
xtp=r['choice']
# Get extra platform features if "cuda" or "opencl"
run_cmd='default'
tags='lib,tensorflow,tensorflow-'+xtp
gpgpu_uid=''
if xtp=='cuda' or xtp=='opencl':
r=ck.access({'action':'detect',
'module_uoa':cfg['module_deps']['platform.gpgpu'],
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'type':xtp,
'share':'yes',
'exchange_repo':er,
'exchange_subrepo':esr})
if r['return']>0: return r
gfeat=r.get('features',{})
gpgpus=gfeat.get('gpgpu',[])
if len(gpgpus)>0:
gpgpu_name=gpgpus[0].get('gpgpu',{}).get('name','')
gpgpu_uid=gpgpus[0].get('gpgpu_uoa','')
# Get deps from TensorFlow program
r=ck.access({'action':'load',
'module_uoa':cfg['module_deps']['program'],
'data_uoa':'tensorflow'})
if r['return']>0: return r
dd=r['dict']
deps=dd['compile_deps']
pp=r['path']
lib_dep=deps['lib-tensorflow']
lib_dep['tags']=tags
# Get explicit choices (batch size, num batches)
env=i.get('env',{})
echoices=dd['run_vars']
for k in echoices:
if env.get(k,'')!='':
echoices[k]=env[k]
# Check environment for selected type
r=ck.access({'action':'resolve',
'module_uoa':cfg['module_deps']['env'],
'deps':deps,
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'out':o})
if r['return']>0: return r
deps=r['deps']
# Prepare CK pipeline for a given workload
ii={'action':'pipeline',
'module_uoa':cfg['module_deps']['program'],
'data_uoa':'tensorflow',
'prepare':'yes',
'env':env,
'choices':choices,
'dependencies':deps,
'cmd_key':run_cmd,
'no_state_check':'yes',
'no_compiler_description':'yes',
'skip_info_collection':'yes',
'skip_calibration':'yes',
'cpu_freq':'max',
'gpu_freq':'max',
'env_speed':'yes',
'energy':'no',
'skip_print_timers':'yes',
'generate_rnd_tmp_dir':'no',
'out':oo}
rr=ck.access(ii)
if rr['return']>0: return rr
# ck.save_json_to_file({'json_file':'/tmp/xyz3.json','dict':rr, 'sort_keys':'yes'})
# exit(1)
fail=rr.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+rr.get('fail_reason','')+')'}
ready=rr.get('ready','')
if ready!='yes':
return {'return':11, 'error':'couldn\'t prepare universal CK program workflow'}
state=rr['state']
tmp_dir=state['tmp_dir']
# Clean pipeline
if 'ready' in rr: del(rr['ready'])
if 'fail' in rr: del(rr['fail'])
if 'return' in rr: del(rr['return'])
# Check if aggregted stats
aggregated_stats={} # Pre-load statistics ...
# Prepare high-level experiment meta
meta={'cpu_name':cpu_name,
'os_name':os_name,
'plat_name':plat_name,
'gpu_name':gpu_name,
'tensorflow_type':xtp,
'gpgpu_name':gpgpu_name,
'cmd_key':run_cmd,
'echoices':echoices}
# Process deps
xdeps={}
xnn=''
xblas=''
for k in deps:
dp=deps[k]
xdeps[k]={'name':dp.get('name',''),
'data_name':dp.get('dict',{}).get('data_name',''),
'ver':dp.get('ver','')}
meta['xdeps']=xdeps
meta['nn_type']='alexnet'
mmeta=copy.deepcopy(meta)
# Extra meta which is not used to search similar case ...
mmeta['platform_uid']=plat_uid
mmeta['os_uid']=os_uid
mmeta['cpu_uid']=cpu_uid
mmeta['gpgpu_uid']=gpgpu_uid
mmeta['user']=user
# Check if already exists
# tbd
# Run CK pipeline *****************************************************
pipeline=copy.deepcopy(rr)
if len(choices)>0:
r=ck.merge_dicts({'dict1':pipeline['choices'], 'dict2':xchoices})
if r['return']>0: return r
ii={'action':'autotune',
'module_uoa':cfg['module_deps']['pipeline'],
'iterations':1,
'repetitions':repetitions,
'collect_all':'yes',
'process_multi_keys':['##characteristics#*'],
'tmp_dir':tmp_dir,
'pipeline':pipeline,
'stat_flat_dict':aggregated_stats,
"features_keys_to_process":["##choices#*"],
"record_params": {
"search_point_by_features":"yes"
},
'out':oo}
rrr=ck.access(ii)
if rrr['return']>0: return rrr
ls=rrr.get('last_iteration_output',{})
state=ls.get('state',{})
xchoices=copy.deepcopy(ls.get('choices',{}))
lsa=rrr.get('last_stat_analysis',{})
lsad=lsa.get('dict_flat',{})
ddd={'meta':mmeta}
ddd['choices']=xchoices
features=ls.get('features',{})
deps=ls.get('dependencies',{})
fail=ls.get('fail','')
fail_reason=ls.get('fail_reason','')
ch=ls.get('characteristics',{})
# Save pipeline
ddd['state']={'fail':fail, 'fail_reason':fail_reason}
ddd['characteristics']=ch
ddd['user']=user
if o=='con':
ck.out('')
ck.out('Saving results to the remote public repo ...')
ck.out('')
# Find remote entry
rduid=''
ii={'action':'search',
'module_uoa':work['self_module_uid'],
'repo_uoa':er,
'remote_repo_uoa':esr,
'search_dict':{'meta':meta}}
rx=ck.access(ii)
if rx['return']>0: return rx
lst=rx['lst']
if len(lst)==1:
rduid=lst[0]['data_uid']
else:
rx=ck.gen_uid({})
if rx['return']>0: return rx
rduid=rx['data_uid']
# Update meta
rx=ck.access({'action':'update',
'module_uoa':work['self_module_uid'],
'data_uoa':rduid,
'repo_uoa':er,
'remote_repo_uoa':esr,
'dict':ddd,
'substitute':'yes',
'sort_keys':'yes'})
if rx['return']>0: return rx
# Push statistical characteristics
fstat=os.path.join(pp,tmp_dir,ffstat)
r=ck.save_json_to_file({'json_file':fstat, 'dict':lsad})
if r['return']>0: return r
rx=ck.access({'action':'push',
'module_uoa':work['self_module_uid'],
'data_uoa':rduid,
'repo_uoa':er,
'remote_repo_uoa':esr,
'filename':fstat,
'overwrite':'yes'})
if rx['return']>0: return rx
os.remove(fstat)
# Info
if o=='con':
ck.out('Succesfully recorded results in remote repo (Entry UID='+rduid+')')
# Check host URL prefix and default module/action
url='http://cknowledge.org/repo/web.php?template=cknowledge&action=index&module_uoa=wfe&native_action=show&native_module_uoa=program.optimization&scenario=155b6fa5a4012a93&highlight_uid='+rduid
ck.out('')
ck.out('You can see your results at the following URL:')
ck.out('')
ck.out(url)
return {'return':0}
##############################################################################
# show results
def show(i):
"""
Input: {
(crowd_module_uoa) - if rendered from experiment crowdsourcing
(crowd_key) - add extra name to Web keys to avoid overlapping with original crowdsourcing HTML
(crowd_on_change) - reuse onchange doc from original crowdsourcing HTML
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import os
st=''
cmuoa=i.get('crowd_module_uoa','')
ckey=i.get('crowd_key','')
conc=i.get('crowd_on_change','')
if conc=='':
conc=onchange
hi_uid=i.get('highlight_uid','')
h='<hr>\n'
h+='<center>\n'
h+='\n\n<script language="JavaScript">function copyToClipboard (text) {window.prompt ("Copy to clipboard: Ctrl+C, Enter", text);}</script>\n\n'
h+='<h2>Aggregated results from TensorFlow crowd-benchmarking (time, accuracy, energy, cost, ...)</h2>\n'
h+=hextra
# Check host URL prefix and default module/action
rx=ck.access({'action':'form_url_prefix',
'module_uoa':'wfe',
'host':i.get('host',''),
'port':i.get('port',''),
'template':i.get('template','')})
if rx['return']>0: return rx
url0=rx['url']
template=rx['template']
url=url0
action=i.get('action','')
muoa=i.get('module_uoa','')
st=''
url+='action=index&module_uoa=wfe&native_action='+action+'&'+'native_module_uoa='+muoa
url1=url
# List entries
ii={'action':'search',
'module_uoa':work['self_module_uid'],
'add_meta':'yes'}
if cmuoa!='':
ii['module_uoa']=cmuoa
r=ck.access(ii)
if r['return']>0: return r
lst=r['lst']
# Check unique entries
choices={}
wchoices={}
for q in lst:
d=q['meta']
meta=d.get('meta',{})
for kk in selector:
kx=kk['key']
k=ckey+kx
if k not in choices:
choices[k]=[]
wchoices[k]=[{'name':'','value':''}]
v=meta.get(kx,'')
if v!='':
if v not in choices[k]:
choices[k].append(v)
wchoices[k].append({'name':v, 'value':v})
# Prepare query div ***************************************************************
if cmuoa=='':
# Start form + URL (even when viewing entry)
r=ck.access({'action':'start_form',
'module_uoa':cfg['module_deps']['wfe'],
'url':url1,
'name':form_name})
if r['return']>0: return r
h+=r['html']
for kk in selector:
k=ckey+kk['key']
n=kk['name']
nl=kk.get('new_line','')
if nl=='yes':
h+='<br>\n<div id="ck_entries_space8"></div>\n'
v=''
if i.get(k,'')!='':
v=i[k]
kk['value']=v
# Show hardware
ii={'action':'create_selector',
'module_uoa':cfg['module_deps']['wfe'],
'data':wchoices.get(k,[]),
'name':k,
'onchange':conc,
'skip_sort':'no',
'selected_value':v}
r=ck.access(ii)
if r['return']>0: return r
h+='<b>'+n+':</b> '+r['html'].strip()+'\n'
# Check hidden
if hi_uid!='':
h+='<input type="hidden" name="highlight_uid" value="'+hi_uid+'">\n'
h+='<br><br>'
# Prune list
plst=[]
for q in lst:
d=q['meta']
meta=d.get('meta',{})
# Check selector
skip=False
for kk in selector:
k=kk['key']
n=kk['name']
v=kk.get('value','')
if v!='' and meta.get(k,'')!=v:
skip=True
if not skip:
plst.append(q)
# Check if too many
lplst=len(plst)
if lplst==0:
h+='<b>No results found!</b>'
return {'return':0, 'html':h, 'style':st}
elif lplst>50:
h+='<b>Too many entries to show ('+str(lplst)+') - please, prune list further!</b>'
return {'return':0, 'html':h, 'style':st}
# Prepare table
h+='<table border="1" cellpadding="7" cellspacing="0">\n'
ha='align="center" valign="top"'
hb='align="left" valign="top"'
h+=' <tr style="background-color:#dddddd">\n'
h+=' <td '+ha+'><b>All raw files</b></td>\n'
h+=' <td '+ha+'><b>Type</b></td>\n'
h+=' <td '+ha+'><b>Network</b></td>\n'
h+=' <td '+ha+'><b>Batch size</b></td>\n'
h+=' <td '+ha+'><b>Num batches</b></td>\n'
h+=' <td '+ha+'><b>Total time (sec.)</b></td>\n'
h+=' <td '+ha+'><b>Chars</b></td>\n'
h+=' <td '+ha+'><b>Platform</b></td>\n'
h+=' <td '+ha+'><b>CPU</b></td>\n'
h+=' <td '+ha+'><b>GPGPU</b></td>\n'
h+=' <td '+ha+'><b>OS</b></td>\n'
h+=' <td '+ha+'><b>Fail?</b></td>\n'
h+=' <td '+ha+'><b>User</b></td>\n'
h+=' <td '+ha+'><b>Replay</b></td>\n'
h+=' <tr>\n'
# Dictionary to hold target meta
tm={}
ix=0
bgraph={'0':[]} # Just for graph demo
if hi_uid!='':
bgraph['1']=[]
# Sort
splst=sorted(plst, key=lambda x: x.get('meta',{}).get('characteristics',{}).get('run',{}).get('total_execution_time',0))
for q in splst:
ix+=1
duid=q['data_uid']
path=q['path']
d=q['meta']
meta=d.get('meta',{})
params=d.get('choices',{}).get('params',{}).get('params',{})
tp=meta.get('tensorflow_type','')
nn=meta.get('nn_type','')
plat_name=meta.get('plat_name','')
cpu_name=meta.get('cpu_name','')
os_name=meta.get('os_name','')
gpgpu_name=meta.get('gpgpu_name','')
plat_uid=meta.get('platform_uid','')
cpu_uid=meta.get('cpu_uid','')
os_uid=meta.get('os_uid','')
gpu_uid=meta.get('gpu_uid','')
gpgpu_uid=meta.get('gpgpu_uid','')
echoices=meta.get('echoices',{})
bs=echoices.get('BATCH_SIZE','')
nb=echoices.get('NUM_BATCHES','')
user=meta.get('user','')
te=d.get('characteristics',{}).get('run',{})
# bgc='afffaf'
bgc='dfffdf'
fail=d.get('state',{}).get('fail','')
fail_reason=d.get('state',{}).get('fail_reason','')
if fail=='yes':
if fail_reason=='': fail_reason='yes'
bgc='ffafaf'
elif hi_uid!='' and duid==hi_uid:
bgc='9fff9f'
bgraph['0'].append([ix,None])
bgraph['1'].append([ix,x0])
bg=' style="background-color:#'+bgc+';"'
h+=' <tr'+bg+'>\n'
x=work['self_module_uid']
if cmuoa!='': x=cmuoa
h+=' <td '+ha+'>'+str(ix)+') <a href="'+url0+'&wcid='+x+':'+duid+'">'+duid+'</a></td>\n'
h+=' <td '+ha+'>'+tp+'</a></td>\n'
h+=' <td '+ha+'>'+nn+'</a></td>\n'
# Characteristics
# Check if has statistics
dstat={}
fstat=os.path.join(path,'ck-stat-flat-characteristics.json')
if os.path.isfile(fstat):
r=ck.load_json_file({'json_file':fstat, 'dict':dstat})
if r['return']>0: return r
dstat=r['dict']
h+=' <td '+ha+'>'+str(bs)+'</td>\n'
h+=' <td '+ha+'>'+str(nb)+'</td>\n'
x=''
# Check if has stats
x0=dstat.get("##characteristics#run#total_execution_time#min",None)
x0e=dstat.get("##characteristics#run#total_execution_time#exp",None)
x1=dstat.get("##characteristics#run#total_execution_time#center",None)
x2=dstat.get("##characteristics#run#total_execution_time#halfrange",None)
if x1!=None and x2!=None:
x=('%.2f'%x1)+' ± '+('%.2f'%x2)
h+=' <td '+ha+'>'+x+'</td>\n'
if fail!='yes' and x0!=None and duid!=hi_uid:
bgraph['0'].append([ix,x0])
if hi_uid!='': bgraph['1'].append([ix,None])
# Check all characteristics
x=''
x5=''
for k in sorted(te):
v=te[k]
kx="##characteristics#run#"+k
kx1=dstat.get(kx+'#center',None)
kx2=dstat.get(kx+'#halfrange',None)
x6=''
if type(v)==int:
if kx1!=None and kx2!=None:
x6=str(kx1)+' +- '+str(kx2)
else:
x6=str(v)
elif type(v)==float:
if kx1!=None and kx2!=None:
x6=('%.1f'%kx1)+' +- '+('%.1f'%kx2)
else:
x6=('%.1f'%v)
if x6!='':
x5+=str(k)+'='+x6+'\n'
# x5=x5.replace("'","\'").replace('"',"\\'").replace('\n','\\n')
x5=x5.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
if x5!='':
x+='<input type="button" class="ck_small_button" onClick="alert(\''+x5+'\');" value="All">'
h+=' <td '+ha+'>'+x+'</td>\n'
# Platform, etc ...
x=plat_name
if plat_uid!='':
x='<a href="'+url0+'&wcid='+cfg['module_deps']['platform']+':'+plat_uid+'">'+x+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
x=cpu_name
if cpu_uid!='':
x='<a href="'+url0+'&wcid='+cfg['module_deps']['platform.cpu']+':'+cpu_uid+'">'+x+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
x=gpgpu_name
if gpgpu_uid!='':
x='<a href="'+url0+'&wcid='+cfg['module_deps']['platform.gpgpu']+':'+gpgpu_uid+'">'+x+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
x=os_name
if os_uid!='':
x='<a href="'+url0+'&wcid='+cfg['module_deps']['platform']+':'+os_uid+'">'+x+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
x=fail_reason
if x=='':
x='No'
else:
fail_reason=fail_reason.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
x='Yes <input type="button" class="ck_small_button" onClick="alert(\''+fail_reason+'\');" value="Log">'
h+=' <td '+ha+'>'+x+'</td>\n'
# Params
# x='<table border="0" cellpadding="0" cellspacing="2">\n'
x=''
for k in sorted(params):
v=params[k]
x+=str(k)+'='+str(v)+'\n'
# x+='<tr><td>'+str(k)+'=</td><td>'+str(v)+'</td></tr>\n'
# x+='</table>\n'
# x=x.replace("'","\'").replace('"',"\\'").replace('\n','\\n')
x=x.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
x1=''
if x!='':
x1='<input type="button" class="ck_small_button" onClick="alert(\''+x+'\');" value="See">'
# h+=' <td '+ha+'>'+x1+'</td>\n'
h+=' <td '+ha+'><a href="'+url0+'&action=index&module_uoa=wfe&native_action=show&native_module_uoa=experiment.user">'+user+'</a></td>\n'
h+=' <td '+ha+'><input type="button" class="ck_small_button" onClick="copyToClipboard(\'ck replay tensorflow\');" value="Replay"></td>\n'
h+=' <tr>\n'
h+='</table>\n'
h+='</center>\n'
if cmuoa=='':
h+='</form>\n'
if len(bgraph['0'])>0:
ii={'action':'plot',
'module_uoa':cfg['module_deps']['graph'],
"table":bgraph,
"h_lines":[1.0],
"ymin":0,
"ignore_point_if_none":"yes",
"plot_type":"d3_2d_bars",
"display_y_error_bar":"no",
"title":"Powered by Collective Knowledge",
"axis_x_desc":"Experiment",
"axis_y_desc":"Neural network total time (sec.)",
"plot_grid":"yes",
"d3_div":"ck_interactive",
"image_width":"900",
"image_height":"400",
"wfe_url":url0}
r=ck.access(ii)
if r['return']==0:
x=r.get('html','')
if x!='':
st+=r.get('style','')
h+='<br>\n'
h+='<center>\n'
h+='<div id="ck_box_with_shadow" style="width:920px;">\n'
h+=' <div id="ck_interactive" style="text-align:center">\n'
h+=x+'\n'
h+=' </div>\n'
h+='</div>\n'
h+='</center>\n'
return {'return':0, 'html':h, 'style':st}
##############################################################################
# replay experiment (TBD)
def replay(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
# TBD - take params from remote/local experiment and pre-set ...
# Run locally, i.e. do not share stats unless requested ...
i['action']='crowdsource'
i['module_uoa']=cfg['module_deps']['experiment.bench.tensorflow']
return ck.access(i)
|
|
#
# Module which deals with pickling of objects.
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from abc import ABCMeta
import copyreg
import functools
import io
import os
import pickle
import socket
import sys
from . import context
__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump']
HAVE_SEND_HANDLE = (sys.platform == 'win32' or
(hasattr(socket, 'CMSG_LEN') and
hasattr(socket, 'SCM_RIGHTS') and
hasattr(socket.socket, 'sendmsg')))
#
# Pickler subclass
#
class ForkingPickler(pickle.Pickler):
'''Pickler subclass used by multiprocessing.'''
_extra_reducers = {}
_copyreg_dispatch_table = copyreg.dispatch_table
def __init__(self, *args):
super().__init__(*args)
self.dispatch_table = self._copyreg_dispatch_table.copy()
self.dispatch_table.update(self._extra_reducers)
@classmethod
def register(cls, type, reduce):
'''Register a reduce function for a type.'''
cls._extra_reducers[type] = reduce
@classmethod
def dumps(cls, obj, protocol=None):
buf = io.BytesIO()
cls(buf, protocol).dump(obj)
return buf.getbuffer()
loads = pickle.loads
register = ForkingPickler.register
def dump(obj, file, protocol=None):
'''Replacement for pickle.dump() using ForkingPickler.'''
ForkingPickler(file, protocol).dump(obj)
#
# Platform specific definitions
#
if sys.platform == 'win32':
# Windows
__all__ += ['DupHandle', 'duplicate', 'steal_handle']
import _winapi
def duplicate(handle, target_process=None, inheritable=False,
*, source_process=None):
'''Duplicate a handle. (target_process is a handle not a pid!)'''
current_process = _winapi.GetCurrentProcess()
if source_process is None:
source_process = current_process
if target_process is None:
target_process = current_process
return _winapi.DuplicateHandle(
source_process, handle, target_process,
0, inheritable, _winapi.DUPLICATE_SAME_ACCESS)
def steal_handle(source_pid, handle):
'''Steal a handle from process identified by source_pid.'''
source_process_handle = _winapi.OpenProcess(
_winapi.PROCESS_DUP_HANDLE, False, source_pid)
try:
return _winapi.DuplicateHandle(
source_process_handle, handle,
_winapi.GetCurrentProcess(), 0, False,
_winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(source_process_handle)
def send_handle(conn, handle, destination_pid):
'''Send a handle over a local connection.'''
dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid)
conn.send(dh)
def recv_handle(conn):
'''Receive a handle over a local connection.'''
return conn.recv().detach()
class DupHandle(object):
'''Picklable wrapper for a handle.'''
def __init__(self, handle, access, pid=None):
if pid is None:
# We just duplicate the handle in the current process and
# let the receiving process steal the handle.
pid = os.getpid()
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
try:
self._handle = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(),
handle, proc, access, False, 0)
finally:
_winapi.CloseHandle(proc)
self._access = access
self._pid = pid
def detach(self):
'''Get the handle. This should only be called once.'''
# retrieve handle from process which currently owns it
if self._pid == os.getpid():
# The handle has already been duplicated for this process.
return self._handle
# We must steal the handle from the process whose pid is self._pid.
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
self._pid)
try:
return _winapi.DuplicateHandle(
proc, self._handle, _winapi.GetCurrentProcess(),
self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(proc)
else:
# Unix
__all__ += ['DupFd', 'sendfds', 'recvfds']
import array
# On MacOSX we should acknowledge receipt of fds -- see Issue14669
ACKNOWLEDGE = sys.platform == 'darwin'
def sendfds(sock, fds):
'''Send an array of fds over an AF_UNIX socket.'''
fds = array.array('i', fds)
msg = bytes([len(fds) % 256])
sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
if ACKNOWLEDGE and sock.recv(1) != b'A':
raise RuntimeError('did not receive acknowledgement of fd')
def recvfds(sock, size):
'''Receive an array of fds over an AF_UNIX socket.'''
a = array.array('i')
bytes_size = a.itemsize * size
msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size))
if not msg and not ancdata:
raise EOFError
try:
if ACKNOWLEDGE:
sock.send(b'A')
if len(ancdata) != 1:
raise RuntimeError('received %d items of ancdata' %
len(ancdata))
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
if len(cmsg_data) % a.itemsize != 0:
raise ValueError
a.frombytes(cmsg_data)
if len(a) % 256 != msg[0]:
raise AssertionError(
"Len is {0:n} but msg[0] is {1!r}".format(
len(a), msg[0]))
return list(a)
except (ValueError, IndexError):
pass
raise RuntimeError('Invalid data received')
def send_handle(conn, handle, destination_pid):
'''Send a handle over a local connection.'''
with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
sendfds(s, [handle])
def recv_handle(conn):
'''Receive a handle over a local connection.'''
with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
return recvfds(s, 1)[0]
def DupFd(fd):
'''Return a wrapper for an fd.'''
popen_obj = context.get_spawning_popen()
if popen_obj is not None:
return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
elif HAVE_SEND_HANDLE:
from . import resource_sharer
return resource_sharer.DupFd(fd)
else:
raise ValueError('SCM_RIGHTS appears not to be available')
#
# Try making some callable types picklable
#
def _reduce_method(m):
if m.__self__ is None:
return getattr, (m.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
class _C:
def f(self):
pass
register(type(_C().f), _reduce_method)
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
register(type(list.append), _reduce_method_descriptor)
register(type(int.__add__), _reduce_method_descriptor)
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return functools.partial(func, *args, **keywords)
register(functools.partial, _reduce_partial)
#
# Make sockets picklable
#
if sys.platform == 'win32':
def _reduce_socket(s):
from .resource_sharer import DupSocket
return _rebuild_socket, (DupSocket(s),)
def _rebuild_socket(ds):
return ds.detach()
register(socket.socket, _reduce_socket)
else:
def _reduce_socket(s):
df = DupFd(s.fileno())
return _rebuild_socket, (df, s.family, s.type, s.proto)
def _rebuild_socket(df, family, type, proto):
fd = df.detach()
return socket.socket(family, type, proto, fileno=fd)
register(socket.socket, _reduce_socket)
class AbstractReducer(metaclass=ABCMeta):
'''Abstract base class for use in implementing a Reduction class
suitable for use in replacing the standard reduction mechanism
used in multiprocessing.'''
ForkingPickler = ForkingPickler
register = register
dump = dump
send_handle = send_handle
recv_handle = recv_handle
if sys.platform == 'win32':
steal_handle = steal_handle
duplicate = duplicate
DupHandle = DupHandle
else:
sendfds = sendfds
recvfds = recvfds
DupFd = DupFd
_reduce_method = _reduce_method
_reduce_method_descriptor = _reduce_method_descriptor
_rebuild_partial = _rebuild_partial
_reduce_socket = _reduce_socket
_rebuild_socket = _rebuild_socket
def __init__(self, *args):
register(type(_C().f), _reduce_method)
register(type(list.append), _reduce_method_descriptor)
register(type(int.__add__), _reduce_method_descriptor)
register(functools.partial, _reduce_partial)
register(socket.socket, _reduce_socket)
|
|
# -*- coding: utf-8 -*-
"""
PropertySet: Definition of properties for ``Mark`` objects and labels of
``Axis``objects
"""
from .core import _assert_is_type, grammar, GrammarClass
from .values import ValueRef
from ._compat import str_types
class PropertySet(GrammarClass):
"""Definition of properties for ``Mark`` objects and labels of ``Axis``
objects
These define the appearance details for marks and axes.
All properties are defined by ``ValueRef`` classes. As a warning,
validation of the values is only performed on the ``value`` field of the
class, which is ignored by Vega if the ``field`` property is set.
"""
@grammar(ValueRef)
def x(value):
"""ValueRef : number, left-most x-coordinate
For most marks, this will be equal to the field of the independent
variable. For example,
``{"scale": "x", "field": "data.x"}``
will place a mark with its left-most coordinate at the x-values of
the data. Something like
``{"scale": "x", "value": 10}``
will place a single mark at given x-coordinate.
"""
@grammar(ValueRef)
def x2(value):
"""ValueRef : number, right-most x-coordinate
Generally, for marks where the width is significant, it's better to
use the ``width`` property.
"""
@grammar(ValueRef)
def width(value):
"""ValueRef : number, width of the mark
Set the ``band`` property of the ``ValueRef`` to True to use the
full width.
"""
@grammar(ValueRef)
def y(value):
"""ValueRef : number, top-most y-coordinate
The same remarks for the ``x`` property apply here.
"""
@grammar(ValueRef)
def y2(value):
"""ValueRef : number, bottom-most y-coordinate
The same remarks for the ``x2`` property apply here.
"""
@grammar(ValueRef)
def height(value):
"""ValueRef : number, height of the mark
"""
@grammar(ValueRef)
def opacity(value):
"""ValueRef : number, overall opacity (0 to 1)
"""
@grammar(ValueRef)
def fill(value):
"""ValueRef : string, fill color for the mark
Colors can be specified in standard HTML hex notation or as CSS3
compatible strings. The color string is not validated due to its
large number of valid values.
"""
if value.value:
_assert_is_type('fill.value', value.value, str_types)
@grammar(grammar_type=ValueRef, grammar_name='fillOpacity')
def fill_opacity(value):
"""ValueRef : int or float, opacity of the fill (0 to 1)
"""
if value.value:
_assert_is_type('fill_opacity.value', value.value,
(float, int))
if value.value < 0 or value.value > 1:
raise ValueError(
'fill_opacity must be between 0 and 1')
@grammar(ValueRef)
def stroke(value):
"""ValueRef : color, stroke color for the mark
Colors can be specified in standard HTML hex notation or as CSS3
compatible strings. The color string is not validated due to its
large number of valid values.
"""
if value.value:
_assert_is_type('stroke.value', value.value, str)
@grammar(grammar_type=ValueRef, grammar_name='strokeWidth')
def stroke_width(value):
"""ValueRef : int, width of the stroke in pixels
"""
if value.value:
_assert_is_type('stroke_width.value', value.value, int)
if value.value < 0:
raise ValueError('stroke width cannot be negative')
@grammar(grammar_type=ValueRef, grammar_name='strokeOpacity')
def stroke_opacity(value):
"""ValueRef : number, opacity of the stroke (0 to 1)
"""
if value.value:
_assert_is_type('stroke_opacity.value', value.value,
(float, int))
if value.value < 0 or value.value > 1:
raise ValueError(
'stroke_opacity must be between 0 and 1')
@grammar(ValueRef)
def size(value):
"""ValueRef : number, area of the mark in pixels
This is the total area of a symbol. For example, a value of 500 and
a ``shape`` of ``'circle'`` would result in circles with an area of
500 square pixels. Only used if ``type`` is ``'symbol'``.
"""
if value.value:
_assert_is_type('size.value', value.value, int)
if value.value < 0:
raise ValueError('size cannot be negative')
_valid_shapes = frozenset([
"circle", "square", "cross", "diamond", "triangle-up", "triangle-down"
])
@grammar(ValueRef)
def shape(value):
"""ValueRef : string, type of symbol to use
Possible values are ``'circle'`` (default), ``'square'``,
``'cross'``, ``'diamond'``, ``'triangle-up'``, and
``'triangle-down'``. Only used if ``type`` is ``'symbol'``.
"""
if value.value:
_assert_is_type('shape.value', value.value, str_types)
if value.value not in PropertySet._valid_shapes:
raise ValueError(value.value + ' is not a valid shape')
@grammar(ValueRef)
def path(value):
"""ValueRef : string, SVG path string
This would typically be used for maps and other things where the
path is taken from the data.
"""
if value.value:
_assert_is_type('path.value', value.value, str_types)
@grammar(grammar_type=ValueRef, grammar_name='innerRadius')
def inner_radius(value):
"""ValueRef : number, inner radius of arc in pixels
Only used if ``type`` is ``'arc'``."""
@grammar(grammar_type=ValueRef, grammar_name='outerRadius')
def outer_radius(value):
"""ValueRef : number, outer radius of the arc in pixels
Only used if ``type`` is ``'arc'``."""
@grammar(grammar_type=ValueRef, grammar_name='startAngle')
def start_angle(value):
"""ValueRef : number, start angle of the arc in radians
Only used if ``type`` is ``'arc'``."""
@grammar(grammar_type=ValueRef, grammar_name='endAngle')
def end_angle(value):
"""ValueRef : number, end angle of the arc in radians
Only used if ``type`` is ``'arc'``."""
_area_methods = [
"linear", "step-before", "step-after", "basis", "basis-open",
"cardinal", "cardinal-open", "monotone"
]
_line_methods = [
"linear", "step-before", "step-after", "basis", "basis-open",
"basis-closed", "bundle", "cardinal", "cardinal-open",
"cardinal-closed", "monotone"
]
_valid_methods = frozenset(_area_methods + _line_methods)
@grammar(ValueRef)
def interpolate(value):
"""ValueRef : string, line interpolation method to use
Possible values for ``area`` types are `'linear'`,
``'step-before'``, ``'step-after'``, ``'basis'``, ``'basis-open'``,
``'cardinal'``, ``'cardinal-open'``, ``'monotone'``. ``line`` types
have all values for ``area`` as well as ``'basis-closed'``,
``'bundle'``, and ``'cardinal-closed'``.
Only used if ``type`` is ``'area'`` or ``'line'``.
"""
if value.value:
_assert_is_type('shape.value', value.value, str_types)
if value.value not in PropertySet._valid_methods:
raise ValueError(value.value + ' is not a valid method')
@grammar(ValueRef)
def tension(value):
"""ValueRef : number, tension used for interpolation
Only used if ``type`` is ``'area'`` or ``'line'``.
"""
@grammar(ValueRef)
def url(value):
"""ValueRef : string, url of image
Only used if ``type`` is ``'image'``.
"""
_valid_align = frozenset(["left", "right", "center"])
@grammar(ValueRef)
def align(value):
"""ValueRef : string, horizontal alignment of mark
Possible values are ``'left'``, ``'right'``, and ``'center'``. Only
used if ``type`` is ``'image'`` or ``'text'``.
"""
if value.value:
_assert_is_type('shape.value', value.value, str_types)
if value.value not in PropertySet._valid_align:
raise ValueError(value.value + ' is not a valid alignment')
_valid_baseline = frozenset(["top", "middle", "bottom"])
@grammar(ValueRef)
def baseline(value):
"""ValueRef : string, vertical alignment of mark
Possible values are ``'top'``, ``'middle'``, and ``'bottom'``. Only
used if ``type`` is ``'image'`` or ``'text'``.
"""
if value.value:
_assert_is_type('shape.value', value.value, str_types)
if value.value not in PropertySet._valid_baseline:
raise ValueError(value.value + ' is not a valid baseline')
@grammar(ValueRef)
def text(value):
"""ValueRef : string, text to display
Only used if ``type`` is ``'text'``."""
@grammar(ValueRef)
def dx(value):
"""ValueRef : number, horizontal margin between text and anchor
point in pixels
Ignored if ``align`` is ``'center'``. Only used if ``type`` is
``'text'``.
"""
@grammar(ValueRef)
def dy(value):
"""ValueRef : number, vertical margin between text and anchor
point in pixels
Ignored if ``baseline`` is ``'middle'``. Only used if ``type`` is
``'text'``.
"""
@grammar(ValueRef)
def angle(value):
"""ValueRef : number, rotation of text in degrees
Only used if ``type`` is ``'text'``.
"""
@grammar(ValueRef)
def font(value):
"""ValueRef : string, typeface for text
Only used if ``type`` is ``'text'``.
"""
@grammar(grammar_type=ValueRef, grammar_name='fontSize')
def font_size(value):
"""ValueRef : number, font size in pixels
Only used if ``type`` is ``'text'``.
"""
@grammar(grammar_type=ValueRef, grammar_name='fontWeight')
def font_weight(value):
"""ValueRef : string, font weight
Should be a valid SVG font weight. Only used if ``type`` is
``'text'``.
"""
@grammar(grammar_type=ValueRef, grammar_name='fontStyle')
def font_style(value):
"""ValueRef : string, font style
Should be a valid SVG font style. Only used if ``type`` is
``'text'``.
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.