gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""Tests for :module:`flocker.node.gear`."""
from zope.interface.verify import verifyObject
from twisted.trial.unittest import TestCase
from ...testtools import random_name, make_with_init_tests
from ..gear import (
IGearClient, FakeGearClient, AlreadyExists, PortMap, Unit, GearEnvironment)
def make_igearclient_tests(fixture):
"""
Create a TestCase for IGearClient.
:param fixture: A fixture that returns a :class:`IGearClient`
provider.
"""
class IGearClientTests(TestCase):
"""
Tests for :class:`IGearClientTests`.
These are functional tests if run against a real geard.
"""
def test_interface(self):
"""The tested object provides :class:`IGearClient`."""
client = fixture(self)
self.assertTrue(verifyObject(IGearClient, client))
def test_add_and_remove(self):
"""An added unit can be removed without an error."""
client = fixture(self)
name = random_name()
d = client.add(name, u"busybox")
d.addCallback(lambda _: client.remove(name))
return d
def test_no_double_add(self):
"""Adding a unit with name that already exists results in error."""
client = fixture(self)
name = random_name()
d = client.add(name, u"busybox")
def added(_):
self.addCleanup(client.remove, name)
return client.add(name, u"busybox")
d.addCallback(added)
d = self.assertFailure(d, AlreadyExists)
d.addCallback(lambda exc: self.assertEqual(exc.args[0], name))
return d
def test_remove_nonexistent_is_ok(self):
"""Removing a non-existent unit does not result in a error."""
client = fixture(self)
name = random_name()
return client.remove(name)
def test_double_remove_is_ok(self):
"""Removing a unit twice in a row does not result in error."""
client = fixture(self)
name = random_name()
d = client.add(name, u"busybox")
d.addCallback(lambda _: client.remove(name))
d.addCallback(lambda _: client.remove(name))
return d
def test_unknown_does_not_exist(self):
"""A unit that was never added does not exist."""
client = fixture(self)
name = random_name()
d = client.exists(name)
d.addCallback(self.assertFalse)
return d
def test_added_exists(self):
"""An added unit exists."""
client = fixture(self)
name = random_name()
d = client.add(name, u"busybox")
def added(_):
self.addCleanup(client.remove, name)
return client.exists(name)
d.addCallback(added)
d.addCallback(self.assertTrue)
return d
def test_removed_does_not_exist(self):
"""A removed unit does not exist."""
client = fixture(self)
name = random_name()
d = client.add(name, u"openshift/busybox-http-app")
d.addCallback(lambda _: client.remove(name))
d.addCallback(lambda _: client.exists(name))
d.addCallback(self.assertFalse)
return d
def test_added_is_listed(self):
"""An added unit is included in the output of ``list()``."""
client = fixture(self)
name = random_name()
self.addCleanup(client.remove, name)
d = client.add(name, u"openshift/busybox-http-app")
d.addCallback(lambda _: client.list())
def got_list(units):
# XXX: GearClient.list should also return container_image
# information
# See https://github.com/ClusterHQ/flocker/issues/207
activating = Unit(name=name, activation_state=u"activating",
sub_state=u"start-pre")
active = Unit(name=name, activation_state=u"active")
self.assertTrue((activating in units) or
(active in units),
"Added unit not in %r: %r, %r" % (
units, active, activating))
d.addCallback(got_list)
return d
def test_removed_is_not_listed(self):
"""A removed unit is not included in the output of ``list()``."""
client = fixture(self)
name = random_name()
d = client.add(name, u"openshift/busybox-http-app")
d.addCallback(lambda _: client.remove(name))
d.addCallback(lambda _: client.list())
def got_list(units):
self.assertNotIn(name, [unit.name for unit in units])
d.addCallback(got_list)
return d
return IGearClientTests
class FakeIGearClientTests(make_igearclient_tests(lambda t: FakeGearClient())):
"""
``IGearClient`` tests for ``FakeGearClient``.
"""
class FakeGearClientImplementationTests(TestCase):
"""
Tests for implementation details of ``FakeGearClient``.
"""
def test_units_default(self):
"""
``FakeGearClient._units`` is an empty dict by default.
"""
self.assertEqual({}, FakeGearClient()._units)
def test_units_override(self):
"""
``FakeGearClient._units`` can be supplied in the constructor.
"""
units = {u'foo': Unit(name=u'foo', activation_state=u'active',
container_image=u'flocker/flocker:v1.0.0')}
self.assertEqual(units, FakeGearClient(units=units)._units)
class PortMapInitTests(
make_with_init_tests(
record_type=PortMap,
kwargs=dict(
internal_port=5678,
external_port=910,
)
)
):
"""
Tests for ``PortMap.__init__``.
"""
class PortMapTests(TestCase):
"""
Tests for ``PortMap``.
XXX: The equality tests in this case are incomplete. See
https://github.com/hynek/characteristic/issues/4 for a proposed solution to
this.
"""
def test_repr(self):
"""
``PortMap.__repr__`` shows the internal and external ports.
"""
self.assertEqual(
"<PortMap(internal_port=5678, external_port=910)>",
repr(PortMap(internal_port=5678, external_port=910))
)
def test_equal(self):
"""
``PortMap`` instances with the same internal and external ports compare
equal.
"""
self.assertEqual(
PortMap(internal_port=5678, external_port=910),
PortMap(internal_port=5678, external_port=910),
)
def test_not_equal(self):
"""
``PortMap`` instances with the different internal and external ports do
not compare equal.
"""
self.assertNotEqual(
PortMap(internal_port=5678, external_port=910),
PortMap(internal_port=1516, external_port=1718)
)
class UnitInitTests(
make_with_init_tests(
record_type=Unit,
kwargs=dict(
name=u'site-example.com',
activation_state=u'active',
container_image=u'flocker/flocker:v1.0.0',
ports=(PortMap(internal_port=80, external_port=8080),),
links=(PortMap(internal_port=3306, external_port=103306),),
environment=GearEnvironment(
id=u'site-example.com', variables={u'foo': u'bar'})
),
expected_defaults=dict(
ports=(), links=(), container_image=None, environment=None)
)
):
"""
Tests for ``Unit.__init__``.
"""
class UnitTests(TestCase):
"""
Tests for ``Unit``.
XXX: The equality tests in this case are incomplete. See
https://github.com/hynek/characteristic/issues/4 for a proposed solution to
this.
"""
def test_repr(self):
"""
``Unit.__repr__`` shows the name, activation_state, container_image,
ports and links.
"""
self.assertEqual(
"<Unit(name=u'site-example.com', "
"activation_state=u'active', sub_state=u'running', "
"container_image=u'flocker/flocker:v1.0.0', ports=[], links=[], "
"environment=None)>",
repr(Unit(name=u'site-example.com',
activation_state=u'active', sub_state=u'running',
container_image=u'flocker/flocker:v1.0.0',
ports=[], links=[], environment=None))
)
class GearEnvironmentInitTests(
make_with_init_tests(
record_type=GearEnvironment,
kwargs=dict(
id=u'site-example.com',
variables=dict(foo="bar"),
),
)
):
"""
Tests for ``GearEnvironment.__init__``.
"""
class GearEnvironmentTests(TestCase):
"""
Tests for ``GearEnvironment``.
"""
def test_to_dict(self):
"""
``GearEnvironment.to_dict`` returns a dictionary containing the
environment ID and the variables in name, value pairs.
"""
expected_id = u'site-example.com'
expected_dict = {
'id': expected_id,
'variables': [
{'name': 'foo', 'value': 'bar'},
{'name': 'baz', 'value': 'qux'},
]
}
self.assertEqual(
expected_dict,
GearEnvironment(
id=expected_id, variables=dict(foo='bar', baz='qux')).to_dict()
)
def test_repr(self):
"""
``GearEnvironment.__repr__`` shows the id and variables.
"""
self.assertEqual(
"<GearEnvironment("
"id=u'site-example.com', "
"variables={'foo': 'bar'})>",
repr(
GearEnvironment(
id=u'site-example.com', variables=dict(foo="bar")
)
)
)
|
|
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import logging
import os
import time
from copy import deepcopy
import docker
import requests
from fuel_upgrade.clients import SupervisorClient
from fuel_upgrade.engines.base import UpgradeEngine
from fuel_upgrade.health_checker import FuelUpgradeVerify
from fuel_upgrade.version_file import VersionFile
from fuel_upgrade import errors
from fuel_upgrade import utils
logger = logging.getLogger(__name__)
class DockerUpgrader(UpgradeEngine):
"""Docker management system for upgrades
"""
def __init__(self, *args, **kwargs):
super(DockerUpgrader, self).__init__(*args, **kwargs)
self.working_directory = self.config.working_directory
utils.create_dir_if_not_exists(self.working_directory)
self.docker_client = docker.Client(
base_url=self.config.docker['url'],
version=self.config.docker['api_version'],
timeout=self.config.docker['http_timeout'])
self.new_release_images = self.make_new_release_images_list()
self.new_release_containers = self.make_new_release_containers_list()
self.cobbler_config_path = self.config.cobbler_config_path.format(
working_directory=self.working_directory)
self.upgrade_verifier = FuelUpgradeVerify(self.config)
self.from_version = self.config.from_version
self.supervisor = SupervisorClient(self.config, self.from_version)
self.version_file = VersionFile(self.config)
self.version_file.save_current()
def upgrade(self):
"""Method with upgarde logic
"""
# Preapre env for upgarde
self.save_db()
self.save_cobbler_configs()
self.save_astute_keys()
# NOTE(akislitsky): fix for bug
# https://bugs.launchpad.net/fuel/+bug/1354465
# supervisord can restart old container even if it already stopped
# and new container start will be failed. We switch configs
# before upgrade, so if supervisord will try to start container
# it will be new container.
self.switch_to_new_configs()
# Stop all of the services
self.supervisor.stop_all_services()
self.stop_fuel_containers()
# Upload docker images
self.upload_images()
# Generate config with autostart is false
# not to run new services on supervisor restart
self.generate_configs(autostart=False)
self.version_file.switch_to_new()
# Restart supervisor to read new configs
self.supervisor.restart_and_wait()
# Create container and start it under supervisor
self.create_and_start_new_containers()
# Update configs in order to start all
# services on supervisor restart
self.generate_configs(autostart=True)
# Verify that all services up and running
self.upgrade_verifier.verify()
def rollback(self):
"""Method which contains rollback logic
"""
self.version_file.switch_to_previous()
self.supervisor.switch_to_previous_configs()
self.supervisor.stop_all_services()
self.stop_fuel_containers()
self.supervisor.restart_and_wait()
def on_success(self):
"""Remove saved version files for all upgrades
NOTE(eli): It solves several problems:
1. user runs upgrade 5.0 -> 5.1 which fails
upgrade system saves version which we upgrade
from in file working_dir/5.1/version.yaml.
Then user runs upgrade 5.0 -> 5.0.1 which
successfully upgraded. Then user runs again
upgrade 5.0.1 -> 5.1, but there is saved file
working_dir/5.1/version.yaml which contains
5.0 version, and upgrade system thinks that
it's upgrading from 5.0 version, as result
it tries to make database dump from wrong
version of container.
2. without this hack user can run upgrade
second time and loose his data, this hack
prevents this case because before upgrade
checker will use current version instead
of saved version to determine version which
we run upgrade from.
"""
for version_file in glob.glob(self.config.version_files_mask):
utils.remove(version_file)
@property
def required_free_space(self):
"""Required free space to run upgrade
* space for docker
* several megabytes for configs
* reserve several megabytes for working directory
where we keep postgresql dump and cobbler configs
:returns: dict where key is path to directory
and value is required free space
"""
return {
self.config.docker['dir']: self._calculate_images_size(),
self.config.supervisor['configs_prefix']: 10,
self.config.fuel_config_path: 10,
self.working_directory: 150}
def _calculate_images_size(self):
images_list = [i['docker_image'] for i in self.new_release_images]
return utils.files_size(images_list)
def save_db(self):
"""Saves postgresql database into the file
"""
logger.debug(u'Backup database')
pg_dump_path = os.path.join(self.working_directory, 'pg_dump_all.sql')
pg_dump_files = utils.VersionedFile(pg_dump_path)
pg_dump_tmp_path = pg_dump_files.next_file_name()
utils.wait_for_true(
lambda: self.make_pg_dump(pg_dump_tmp_path, pg_dump_path),
timeout=self.config.db_backup_timeout,
interval=self.config.db_backup_interval)
valid_dumps = filter(utils.verify_postgres_dump,
pg_dump_files.sorted_files())
if valid_dumps:
utils.hardlink(valid_dumps[0], pg_dump_path, overwrite=True)
map(utils.remove_if_exists,
valid_dumps[self.config.keep_db_backups_count:])
else:
raise errors.DatabaseDumpError(
u'Failed to make database dump, there '
'are no valid database backup '
'files, {0}'.format(pg_dump_path))
def make_pg_dump(self, pg_dump_tmp_path, pg_dump_path):
"""Run postgresql dump in container
:param str pg_dump_tmp_path: path to temporary dump file
:param str pg_dump_path: path to dump which will be restored
in the new container, if this file is
exists, it means the user already
ran upgrade and for some reasons it
failed
:returns: True if db was successfully dumped or if dump exists
False if container isn't running or dump isn't succeed
"""
try:
container_name = self.make_container_name(
'postgres', self.from_version)
self.exec_cmd_in_container(
container_name,
u"su postgres -c 'pg_dumpall --clean' > {0}".format(
pg_dump_tmp_path))
except (errors.ExecutedErrorNonZeroExitCode,
errors.CannotFindContainerError) as exc:
utils.remove_if_exists(pg_dump_tmp_path)
if not utils.file_exists(pg_dump_path):
logger.debug('Failed to make database dump %s', exc)
return False
logger.debug(
u'Failed to make database dump, '
'will be used dump from previous run: %s', exc)
return True
def save_astute_keys(self):
"""Copy any astute generated keys."""
container_name = self.make_container_name('astute', self.from_version)
utils.remove(self.config.astute_keys_path)
try:
utils.exec_cmd('docker cp {0}:{1} {2}'.format(
container_name,
self.config.astute_container_keys_path,
self.config.working_directory))
except errors.ExecutedErrorNonZeroExitCode as exc:
# If there was error, mostly it's because of error
#
# Error: Could not find the file /var/lib/astute
# in container fuel-core-5.0-astute
#
# It means that user didn't run deployment on his
# env, because this directory is created by orchestrator
# during the first deployment.
# Also it can fail if there was no running container
# in both case we should create empty directory to copy
# it in after container creation section
logger.debug(
'Cannot copy astute keys, creating empty directory '
'%s: %s', self.config.astute_keys_path, exc)
if not utils.file_exists(self.config.astute_keys_path):
os.mkdir(self.config.astute_keys_path)
def save_cobbler_configs(self):
"""Copy config files from container
"""
container_name = self.make_container_name(
'cobbler', self.from_version)
try:
utils.exec_cmd('docker cp {0}:{1} {2}'.format(
container_name,
self.config.cobbler_container_config_path,
self.cobbler_config_path))
except errors.ExecutedErrorNonZeroExitCode:
utils.rmtree(self.cobbler_config_path)
raise
self.verify_cobbler_configs()
def verify_cobbler_configs(self):
"""Verify that cobbler config directory
contains valid data
"""
configs = glob.glob(
self.config.cobbler_config_files_for_verifier.format(
cobbler_config_path=self.cobbler_config_path))
# NOTE(eli): cobbler config directory should
# contain at least one file (default.json)
if len(configs) < 1:
raise errors.WrongCobblerConfigsError(
u'Cannot find json files in directory {0}'.format(
self.cobbler_config_path))
for config in configs:
if not utils.check_file_is_valid_json(config):
raise errors.WrongCobblerConfigsError(
u'Invalid json config {0}'.format(config))
def upload_images(self):
"""Uploads images to docker
"""
logger.info(u'Start image uploading')
for image in self.new_release_images:
logger.debug(u'Try to upload docker image %s', image)
docker_image = image['docker_image']
if not os.path.exists(docker_image):
logger.warn(u'Cannot find docker image "%s"', docker_image)
continue
# NOTE(eli): docker-py binding
# doesn't have equal call for
# image importing which equals to
# `docker load`
utils.exec_cmd(u'docker load < "{0}"'.format(docker_image))
def create_and_start_new_containers(self):
"""Create containers in the right order
"""
logger.info(u'Started containers creation')
graph = self.build_dependencies_graph(self.new_release_containers)
logger.debug(u'Built dependencies graph %s', graph)
containers_to_creation = utils.topological_sorting(graph)
logger.debug(u'Resolved creation order %s', containers_to_creation)
for container_id in containers_to_creation:
container = self.container_by_id(container_id)
logger.debug(u'Start container %s', container)
links = self.get_container_links(container)
created_container = self.create_container(
container['image_name'],
name=container.get('container_name'),
volumes=container.get('volumes'),
ports=container.get('ports'),
detach=False)
volumes_from = []
for container_id in container.get('volumes_from', []):
volume_container = self.container_by_id(container_id)
volumes_from.append(volume_container['container_name'])
self.start_container(
created_container,
port_bindings=container.get('port_bindings'),
links=links,
volumes_from=volumes_from,
binds=container.get('binds'),
privileged=container.get('privileged', False))
if container.get('after_container_creation_command'):
self.run_after_container_creation_command(container)
if container.get('supervisor_config'):
self.start_service_under_supervisor(
self.make_service_name(container['id']))
self.clean_iptables_rules(container)
def run_after_container_creation_command(self, container):
"""Runs command in container with retries in
case of error
:param container: dict with container information
"""
command = container['after_container_creation_command']
def execute():
self.exec_cmd_in_container(container['container_name'], command)
self.exec_with_retries(
execute, errors.ExecutedErrorNonZeroExitCode,
'', retries=30, interval=4)
def exec_cmd_in_container(self, container_name, cmd):
"""Execute command in running container
:param name: name of the container, like fuel-core-5.1-nailgun
"""
db_container_id = self.container_docker_id(container_name)
# NOTE(eli): we don't use dockerctl shell
# instead of lxc-attach here because
# release 5.0 has a bug which doesn't
# allow us to use quotes in command
# https://bugs.launchpad.net/fuel/+bug/1324200
utils.exec_cmd(
"lxc-attach --name {0} -- {1}".format(
db_container_id, cmd))
def get_ports(self, container):
"""Docker binding accepts ports as tuple,
here we convert from list to tuple.
FIXME(eli): https://github.com/dotcloud/docker-py/blob/
73434476b32136b136e1cdb0913fd123126f2a52/
docker/client.py#L111-L114
"""
ports = container.get('ports')
if ports is None:
return
return [port if not isinstance(port, list) else tuple(port)
for port in ports]
def start_service_under_supervisor(self, service_name):
"""Start service under supervisor
:param str service_name: name of the service
"""
self.supervisor.start(service_name)
def exec_with_retries(
self, func, exceptions, message, retries=0, interval=0):
# TODO(eli): refactor it and make retries
# as a decorator
intervals = retries * [interval]
for interval in intervals:
try:
return func()
except exceptions as exc:
if str(exc).endswith(message):
time.sleep(interval)
continue
raise
return func()
def get_container_links(self, container):
links = []
if container.get('links'):
for container_link in container.get('links'):
link_container = self.container_by_id(
container_link['id'])
links.append((
link_container['container_name'],
container_link['alias']))
return links
@classmethod
def build_dependencies_graph(cls, containers):
"""Builds graph which based on
`volumes_from` and `link` parameters
of container.
:returns: dict where keys are nodes and
values are lists of dependencies
"""
graph = {}
for container in containers:
graph[container['id']] = sorted(set(
container.get('volumes_from', []) +
[link['id'] for link in container.get('links', [])]))
return graph
def generate_configs(self, autostart=True):
"""Generates supervisor configs
and saves them to configs directory
"""
configs = []
for container in self.new_release_containers:
params = {
'config_name': container['id'],
'service_name': self.make_service_name(container['id']),
'command': u'docker start -a {0}'.format(
container['container_name']),
'autostart': autostart
}
if container['supervisor_config']:
configs.append(params)
self.supervisor.generate_configs(configs)
cobbler_container = self.container_by_id('cobbler')
self.supervisor.generate_cobbler_config(
cobbler_container['id'],
self.make_service_name(cobbler_container['id']),
cobbler_container['container_name'],
autostart=autostart)
def make_service_name(self, container_name):
return 'docker-{0}'.format(container_name)
def switch_to_new_configs(self):
"""Switches supervisor to new configs
"""
self.supervisor.switch_to_new_configs()
def build_images(self):
"""Use docker API to build new containers
"""
self.remove_new_release_images()
for image in self.new_release_images:
logger.info(u'Start image building: %s', image)
self.docker_client.build(
path=image['docker_file'],
tag=image['name'],
nocache=True)
# NOTE(eli): 0.10 and early versions of
# Docker api dont't return correct http
# response in case of failed build, here
# we check if build succed and raise error
# if it failed i.e. image was not created
if not self.docker_client.images(name=image):
raise errors.DockerFailedToBuildImageError(
u'Failed to build image {0}'.format(image))
def volumes_dependencies(self, container):
"""Get list of `volumes` dependencies
:param contaienr: dict with information about container
"""
return self.dependencies_names(container, 'volumes_from')
def link_dependencies(self, container):
"""Get list of `link` dependencies
:param contaienr: dict with information about container
"""
return self.dependencies_names(container, 'link')
def dependencies_names(self, container, key):
"""Returns list of dependencies for specified key
:param contaienr: dict with information about container
:param key: key which will be used for dependencies retrieving
:returns: list of container names
"""
names = []
if container.get(key):
for container_id in container.get(key):
container = self.container_by_id(container_id)
names.append(container['container_name'])
return names
def stop_fuel_containers(self):
"""Use docker API to shutdown containers
"""
containers = self.docker_client.containers(limit=-1)
containers_to_stop = filter(
lambda c: c['Image'].startswith(self.config.image_prefix),
containers)
for container in containers_to_stop:
logger.debug(u'Stop container: %s', container)
self.stop_container(container['Id'])
def _get_docker_container_public_ports(self, containers):
"""Returns public ports
:param containers: list of dicts with information about
containers which have `Ports` list
with items where exist `PublicPort`
field
:returns: list of public ports
"""
container_ports = []
for container in containers:
container_ports.extend(container['Ports'])
return [container_port['PublicPort']
for container_port in container_ports]
def clean_iptables_rules(self, container):
"""Sometimes when we run docker stop
(version dc9c28f/0.10.0) it doesn't clean
iptables rules, as result when we run new
container on the same port we have two rules
with the same port but with different IPs,
we have to clean this rules to prevent services
unavailability.
Example of the problem:
$ iptables -t nat -S
...
-A DOCKER -p tcp -m tcp --dport 443 -j DNAT \
--to-destination 172.17.0.7:443
-A DOCKER -p tcp -m tcp --dport 443 -j DNAT \
--to-destination 172.17.0.3:443
-A DOCKER -d 10.108.0.2/32 -p tcp -m tcp --dport \
8777 -j DNAT --to-destination 172.17.0.10:8777
-A DOCKER -d 127.0.0.1/32 -p tcp -m tcp --dport \
8777 -j DNAT --to-destination 172.17.0.11:8777
-A DOCKER -d 10.108.0.2/32 -p tcp -m tcp --dport \
8777 -j DNAT --to-destination 172.17.0.11:8777
"""
if not container.get('port_bindings'):
return
self._log_iptables()
utils.safe_exec_cmd('dockerctl post_start_hooks {0}'.format(
container['id']))
utils.safe_exec_cmd('service iptables save')
self._log_iptables()
def _log_iptables(self):
"""Method for additional logging of iptables rules
NOTE(eli): Sometimes there are problems with
iptables rules like this
https://bugs.launchpad.net/fuel/+bug/1349287
"""
utils.safe_exec_cmd('iptables -t nat -S')
utils.safe_exec_cmd('iptables -S')
utils.safe_exec_cmd('cat /etc/sysconfig/iptables.save')
def stop_container(self, container_id):
"""Stop docker container
:param container_id: container id
"""
logger.debug(u'Stop container: %s', container_id)
try:
self.docker_client.stop(
container_id, self.config.docker['stop_container_timeout'])
except requests.exceptions.Timeout:
# NOTE(eli): docker use SIGTERM signal
# to stop container if timeout expired
# docker use SIGKILL to stop container.
# Here we just want to make sure that
# container was stopped.
logger.warn(
u'Couldn\'t stop ctonainer, try '
'to stop it again: %s', container_id)
self.docker_client.stop(
container_id, self.config.docker['stop_container_timeout'])
def start_container(self, container, **params):
"""Start containers
:param container: container name
:param params: dict of arguments for container starting
"""
logger.debug(u'Start container "%s": %s', container['Id'], params)
self.docker_client.start(container['Id'], **params)
def create_container(self, image_name, **params):
"""Create container
:param image_name: name of image
:param params: parameters format equals to
create_container call of docker
client
"""
# We have to delete container because we cannot
# have several containers with the same name
container_name = params.get('name')
if container_name is not None:
self._delete_container_if_exist(container_name)
new_params = deepcopy(params)
new_params['ports'] = self.get_ports(new_params)
logger.debug(u'Create container from image %s: %s',
image_name, new_params)
def func_create():
return self.docker_client.create_container(
image_name,
**new_params)
return self.exec_with_retries(
func_create,
docker.errors.APIError,
"Can't set cookie",
retries=3,
interval=2)
def make_new_release_containers_list(self):
"""Returns list of dicts with information
for new containers.
"""
new_containers = []
for container in self.config.containers:
new_container = deepcopy(container)
new_container['image_name'] = self.make_image_name(
container['from_image'])
new_container['container_name'] = self.make_container_name(
container['id'])
new_containers.append(new_container)
return new_containers
def make_container_name(self, container_id, version=None):
"""Returns container name
:params container_id: container's id
:returns: name of the container
"""
if version is None:
version = self.config.new_version
return u'{0}{1}-{2}'.format(
self.config.container_prefix, version, container_id)
def make_new_release_images_list(self):
"""Returns list of dicts with information
for new images.
"""
new_images = []
for image in self.config.images:
new_image = deepcopy(image)
new_image['name'] = self.make_image_name(image['id'])
new_image['type'] = image['type']
new_image['docker_image'] = image['docker_image']
new_image['docker_file'] = image['docker_file']
new_images.append(new_image)
return new_images
def make_image_name(self, image_id):
"""Makes full image name
:param image_id: image id from config file
:returns: full name
"""
images = filter(
lambda i: i['id'] == image_id,
self.config.images)
if not images:
raise errors.CannotFindImageError(
'Cannot find image with id: {0}'.format(image_id))
image = images[0]
self._check_image_type(image['type'])
if image['type'] == 'base':
return image['id']
return u'{0}{1}_{2}'.format(
self.config.image_prefix,
image['id'],
self.config.new_version)
def _check_image_type(self, image_type):
"""Check if image type is valid
:param image_type: string, type of image
:raises UnsupportedImageTypeError:
"""
if not image_type in ('base', 'fuel'):
raise errors.UnsupportedImageTypeError(
'Unsupported umage type: {0}'.format(image_type))
def container_by_id(self, container_id):
"""Get container from new release by id
:param container_id: id of container
"""
filtered_containers = filter(
lambda c: c['id'] == container_id,
self.new_release_containers)
if not filtered_containers:
raise errors.CannotFindContainerError(
'Cannot find container with id {0}'.format(container_id))
return filtered_containers[0]
def container_docker_id(self, name):
"""Returns running container with specified name
:param name: name of the container
:returns: id of the container or None if not found
:raises CannotFindContainerError:
"""
containers_with_name = self._get_containers_by_name(name)
running_containers = filter(
lambda c: c['Status'].startswith('Up'),
containers_with_name)
if not running_containers:
raise errors.CannotFindContainerError(
'Cannot find running container with name "{0}"'.format(name))
return running_containers[0]['Id']
def remove_new_release_images(self):
"""We need to remove images for current release
because this script can be run several times
and we have to delete images before images
building
"""
# Don't remove base images because we cannot
# determine what version they belong to
images = filter(
lambda i: i.get('type') == 'fuel',
self.new_release_images)
image_names = [c['name'] for c in images]
for image in image_names:
self._delete_containers_for_image(image)
if self.docker_client.images(name=image):
logger.info(u'Remove image for new version %s', image)
self.docker_client.remove_image(image)
def _delete_container_if_exist(self, container_name):
"""Deletes docker container if it exists
:param container_name: name of container
"""
found_containers = self._get_containers_by_name(container_name)
for container in found_containers:
self.stop_container(container['Id'])
logger.debug(u'Delete container %s', container)
# TODO(eli): refactor it and make retries
# as a decorator
def func_remove():
self.docker_client.remove_container(container['Id'])
self.exec_with_retries(
func_remove,
docker.errors.APIError,
'Error running removeDevice',
retries=3,
interval=2)
def _get_containers_by_name(self, container_name):
return filter(
lambda c: u'/{0}'.format(container_name) in c['Names'],
self.docker_client.containers(all=True))
def _delete_containers_for_image(self, image):
"""Deletes docker containers for specified image
:param image: name of image
"""
all_containers = self.docker_client.containers(all=True)
containers = filter(
# NOTE(eli): We must use convertation to
# str because in some cases Image is integer
lambda c: str(c.get('Image')).startswith(image),
all_containers)
for container in containers:
logger.debug(u'Try to stop container %s which '
'depends on image %s', container['Id'], image)
self.docker_client.stop(container['Id'])
logger.debug(u'Delete container %s which '
'depends on image %s', container['Id'], image)
self.docker_client.remove_container(container['Id'])
class DockerInitializer(DockerUpgrader):
"""Initial implementation of docker initializer
will be used for master node initialization
"""
def upgrade(self):
self.upload_images()
self.stop_fuel_containers()
self.create_containers()
self.stop_fuel_containers()
self.generate_configs()
self.switch_to_new_configs()
# Reload configs and run new services
self.supervisor.restart_and_wait()
def rollback(self):
logger.warn(u"DockerInitializer doesn't support rollback")
|
|
"""Automation using nox.
"""
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
import glob
import os
import shutil
import sys
import nox
sys.path.append(".")
from tools.automation import release # isort:skip # noqa
sys.path.pop()
nox.options.reuse_existing_virtualenvs = True
nox.options.sessions = ["lint"]
LOCATIONS = {
"common-wheels": "tests/data/common_wheels",
"protected-pip": "tools/tox_pip.py",
}
REQUIREMENTS = {
"tests": "tools/requirements/tests.txt",
"common-wheels": "tools/requirements/tests-common_wheels.txt",
}
AUTHORS_FILE = "AUTHORS.txt"
VERSION_FILE = "src/pip/__init__.py"
def run_with_protected_pip(session, *arguments):
"""Do a session.run("pip", *arguments), using a "protected" pip.
This invokes a wrapper script, that forwards calls to original virtualenv
(stable) version, and not the code being tested. This ensures pip being
used is not the code being tested.
"""
env = {"VIRTUAL_ENV": session.virtualenv.location}
command = ("python", LOCATIONS["protected-pip"]) + arguments
kwargs = {"env": env, "silent": True}
session.run(*command, **kwargs)
def should_update_common_wheels():
# If the cache hasn't been created, create it.
if not os.path.exists(LOCATIONS["common-wheels"]):
return True
# If the requirements was updated after cache, we'll repopulate it.
cache_last_populated_at = os.path.getmtime(LOCATIONS["common-wheels"])
requirements_updated_at = os.path.getmtime(REQUIREMENTS["common-wheels"])
need_to_repopulate = requirements_updated_at > cache_last_populated_at
# Clear the stale cache.
if need_to_repopulate:
shutil.rmtree(LOCATIONS["common-wheels"], ignore_errors=True)
return need_to_repopulate
# -----------------------------------------------------------------------------
# Development Commands
# These are currently prototypes to evaluate whether we want to switch over
# completely to nox for all our automation. Contributors should prefer using
# `tox -e ...` until this note is removed.
# -----------------------------------------------------------------------------
@nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8", "pypy", "pypy3"])
def test(session):
# Get the common wheels.
if should_update_common_wheels():
run_with_protected_pip(
session,
"wheel",
"-w", LOCATIONS["common-wheels"],
"-r", REQUIREMENTS["common-wheels"],
)
else:
msg = (
"Re-using existing common-wheels at {}."
.format(LOCATIONS["common-wheels"])
)
session.log(msg)
# Build source distribution
sdist_dir = os.path.join(session.virtualenv.location, "sdist")
if os.path.exists(sdist_dir):
shutil.rmtree(sdist_dir, ignore_errors=True)
session.run(
"python", "setup.py", "sdist",
"--formats=zip", "--dist-dir", sdist_dir,
silent=True,
)
generated_files = os.listdir(sdist_dir)
assert len(generated_files) == 1
generated_sdist = os.path.join(sdist_dir, generated_files[0])
# Install source distribution
run_with_protected_pip(session, "install", generated_sdist)
# Install test dependencies
run_with_protected_pip(session, "install", "-r", REQUIREMENTS["tests"])
# Parallelize tests as much as possible, by default.
arguments = session.posargs or ["-n", "auto"]
# Run the tests
# LC_CTYPE is set to get UTF-8 output inside of the subprocesses that our
# tests use.
session.run("pytest", *arguments, env={"LC_CTYPE": "en_US.UTF-8"})
@nox.session
def docs(session):
session.install(".")
session.install("-r", REQUIREMENTS["docs"])
def get_sphinx_build_command(kind):
# Having the conf.py in the docs/html is weird but needed because we
# can not use a different configuration directory vs source directory
# on RTD currently. So, we'll pass "-c docs/html" here.
# See https://github.com/rtfd/readthedocs.org/issues/1543.
return [
"sphinx-build",
"-W",
"-c", "docs/html", # see note above
"-d", "docs/build/doctrees/" + kind,
"-b", kind,
"docs/" + kind,
"docs/build/" + kind,
]
session.run(*get_sphinx_build_command("html"))
session.run(*get_sphinx_build_command("man"))
@nox.session
def lint(session):
session.install("pre-commit")
if session.posargs:
args = session.posargs + ["--all-files"]
else:
args = ["--all-files", "--show-diff-on-failure"]
session.run("pre-commit", "run", *args)
# -----------------------------------------------------------------------------
# Release Commands
# -----------------------------------------------------------------------------
@nox.session(name="prepare-release")
def prepare_release(session):
version = release.get_version_from_arguments(session.posargs)
if not version:
session.error("Usage: nox -s prepare-release -- YY.N[.P]")
session.log("# Ensure nothing is staged")
if release.modified_files_in_git("--staged"):
session.error("There are files staged in git")
session.log(f"# Updating {AUTHORS_FILE}")
release.generate_authors(AUTHORS_FILE)
if release.modified_files_in_git():
release.commit_file(
session, AUTHORS_FILE, message=f"Update {AUTHORS_FILE}",
)
else:
session.log(f"# No changes to {AUTHORS_FILE}")
session.log("# Generating NEWS")
release.generate_news(session, version)
session.log(f"# Bumping for release {version}")
release.update_version_file(version, VERSION_FILE)
release.commit_file(session, VERSION_FILE, message="Bump for release")
session.log("# Tagging release")
release.create_git_tag(session, version, message=f"Release {version}")
session.log("# Bumping for development")
next_dev_version = release.get_next_development_version(version)
release.update_version_file(next_dev_version, VERSION_FILE)
release.commit_file(session, VERSION_FILE, message="Bump for development")
@nox.session(name="build-release")
def build_release(session):
version = release.get_version_from_arguments(session.posargs)
if not version:
session.error("Usage: nox -s build-release -- YY.N[.P]")
session.log("# Ensure no files in dist/")
if release.have_files_in_folder("dist"):
session.error("There are files in dist/. Remove them and try again")
session.log("# Install dependencies")
session.install("setuptools", "wheel", "twine")
session.log("# Checkout the tag")
session.run("git", "checkout", version, external=True, silent=True)
session.log("# Build distributions")
session.run("python", "setup.py", "sdist", "bdist_wheel", silent=True)
session.log("# Verify distributions")
session.run("twine", "check", *glob.glob("dist/*"), silent=True)
session.log("# Checkout the master branch")
session.run("git", "checkout", "master", external=True, silent=True)
@nox.session(name="upload-release")
def upload_release(session):
version = release.get_version_from_arguments(session.posargs)
if not version:
session.error("Usage: nox -s upload-release -- YY.N[.P]")
session.log("# Install dependencies")
session.install("twine")
distribution_files = glob.glob("dist/*")
session.log(f"# Distribution files: {distribution_files}")
# Sanity check: Make sure there's 2 distribution files.
count = len(distribution_files)
if count != 2:
session.error(
f"Expected 2 distribution files for upload, got {count}. "
f"Remove dist/ and run 'nox -s build-release -- {version}'"
)
# Sanity check: Make sure the files are correctly named.
expected_distribution_files = [
f"pip-{version}-py2.py3-none-any.whl",
f"pip-{version}.tar.gz",
]
if sorted(distribution_files) != sorted(expected_distribution_files):
session.error(
f"Distribution files do not seem to be for {version} release."
)
session.log("# Upload distributions")
session.run("twine", "upload", *distribution_files)
|
|
import sys
from django import forms
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import get_resolver
from django.shortcuts import render_to_response, render
from django.template import Context, RequestContext, TemplateDoesNotExist
from django.views.debug import technical_500_response, SafeExceptionReporterFilter
from django.views.decorators.debug import (sensitive_post_parameters,
sensitive_variables)
from django.utils.log import getLogger
from regressiontests.views import BrokenException, except_args
from models import Article
def index_page(request):
"""Dummy index page"""
return HttpResponse('<html><body>Dummy page</body></html>')
def custom_create(request):
"""
Calls create_object generic view with a custom form class.
"""
class SlugChangingArticleForm(forms.ModelForm):
"""Custom form class to overwrite the slug."""
class Meta:
model = Article
def save(self, *args, **kwargs):
self.instance.slug = 'some-other-slug'
return super(SlugChangingArticleForm, self).save(*args, **kwargs)
from django.views.generic.create_update import create_object
return create_object(request,
post_save_redirect='/create_update/view/article/%(slug)s/',
form_class=SlugChangingArticleForm)
def raises(request):
# Make sure that a callable that raises an exception in the stack frame's
# local vars won't hijack the technical 500 response. See:
# http://code.djangoproject.com/ticket/15025
def callable():
raise Exception
try:
raise Exception
except Exception:
return technical_500_response(request, *sys.exc_info())
def raises404(request):
resolver = get_resolver(None)
resolver.resolve('')
def redirect(request):
"""
Forces an HTTP redirect.
"""
return HttpResponseRedirect("target/")
def view_exception(request, n):
raise BrokenException(except_args[int(n)])
def template_exception(request, n):
return render_to_response('debug/template_exception.html',
{'arg': except_args[int(n)]})
# Some views to exercise the shortcuts
def render_to_response_view(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_to_response_view_with_request_context(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, context_instance=RequestContext(request))
def render_to_response_view_with_mimetype(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, mimetype='application/x-rendertest')
def render_view(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_view_with_base_context(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, context_instance=Context())
def render_view_with_content_type(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, content_type='application/x-rendertest')
def render_view_with_status(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, status=403)
def render_view_with_current_app(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, current_app="foobar_app")
def render_view_with_current_app_conflict(request):
# This should fail because we don't passing both a current_app and
# context_instance:
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, current_app="foobar_app", context_instance=RequestContext(request))
def raises_template_does_not_exist(request):
# We need to inspect the HTML generated by the fancy 500 debug view but
# the test client ignores it, so we send it explicitly.
try:
return render_to_response('i_dont_exist.html')
except TemplateDoesNotExist:
return technical_500_response(request, *sys.exc_info())
def send_log(request, exc_info):
logger = getLogger('django.request')
logger.error('Internal Server Error: %s' % request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
def non_sensitive_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables('sauce')
@sensitive_post_parameters('bacon-key', 'sausage-key')
def sensitive_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables()
@sensitive_post_parameters()
def paranoid_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
class UnsafeExceptionReporterFilter(SafeExceptionReporterFilter):
"""
Ignores all the filtering done by its parent class.
"""
def get_post_parameters(self, request):
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return tb_frame.f_locals.items()
@sensitive_variables()
@sensitive_post_parameters()
def custom_exception_reporter_filter_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
request.exception_reporter_filter = UnsafeExceptionReporterFilter()
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
# Lists all closes issues since a given date
import argparse
import sys
import os
import re
import time
import threading
import requests
args = None
class Spinner:
busy = False
delay = 0.1
@staticmethod
def spinning_cursor():
while 1:
for cursor in '|/-\\':
yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay):
self.delay = delay
def spinner_task(self):
while self.busy:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def __enter__(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def __exit__(self, exception, value, tb):
self.busy = False
time.sleep(self.delay)
if exception is not None:
return False
class Issues:
def __init__(self, org, repo, token):
self.repo = repo
self.org = org
self.issues_url = "https://github.com/%s/%s/issues" % (
self.org, self.repo)
self.github_url = 'https://api.github.com/repos/%s/%s' % (
self.org, self.repo)
self.api_token = token
self.headers = {}
self.headers['Authorization'] = 'token %s' % self.api_token
self.headers['Accept'] = 'application/vnd.github.golden-comet-preview+json'
self.items = []
def get_pull(self, pull_nr):
url = ("%s/pulls/%s" % (self.github_url, pull_nr))
response = requests.get("%s" % (url), headers=self.headers)
if response.status_code != 200:
raise RuntimeError(
"Failed to get issue due to unexpected HTTP status code: {}".format(
response.status_code)
)
item = response.json()
return item
def get_issue(self, issue_nr):
url = ("%s/issues/%s" % (self.github_url, issue_nr))
response = requests.get("%s" % (url), headers=self.headers)
if response.status_code != 200:
return None
item = response.json()
return item
def list_issues(self, url):
response = requests.get("%s" % (url), headers=self.headers)
if response.status_code != 200:
raise RuntimeError(
"Failed to get issue due to unexpected HTTP status code: {}".format(
response.status_code)
)
self.items = self.items + response.json()
try:
print("Getting more items...")
next_issues = response.links["next"]
if next_issues:
next_url = next_issues['url']
self.list_issues(next_url)
except KeyError:
pass
def issues_since(self, date, state="closed"):
self.list_issues("%s/issues?state=%s&since=%s" %
(self.github_url, state, date))
def pull_requests(self, base='v1.14-branch', state='closed'):
self.list_issues("%s/pulls?state=%s&base=%s" %
(self.github_url, state, base))
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-o", "--org", default="zephyrproject-rtos",
help="Github organisation")
parser.add_argument("-r", "--repo", default="zephyr",
help="Github repository")
parser.add_argument("-f", "--file", required=True,
help="Name of output file.")
parser.add_argument("-s", "--issues-since",
help="""List issues since date where date
is in the format 2019-09-01.""")
parser.add_argument("-b", "--issues-in-pulls",
help="List issues in pulls for a given branch")
parser.add_argument("-c", "--commits-file",
help="""File with all commits (git log a..b) to
be parsed for fixed bugs.""")
args = parser.parse_args()
def main():
parse_args()
token = os.environ.get('GH_TOKEN', None)
if not token:
sys.exit("""Github token not set in environment,
set the env. variable GH_TOKEN please and retry.""")
i = Issues(args.org, args.repo, token)
if args.issues_since:
i.issues_since(args.issues_since)
count = 0
with open(args.file, "w") as f:
for issue in i.items:
if 'pull_request' not in issue:
# * :github:`8193` - STM32 config BUILD_OUTPUT_HEX fail
f.write("* :github:`{}` - {}\n".format(
issue['number'], issue['title']))
count = count + 1
elif args.issues_in_pulls:
i.pull_requests(base=args.issues_in_pulls)
count = 0
bugs = set()
backports = []
for issue in i.items:
if not isinstance(issue['body'], str):
continue
match = re.findall(r"(Fixes|Closes|Fixed|close):? #([0-9]+)",
issue['body'], re.MULTILINE)
if match:
for mm in match:
bugs.add(mm[1])
else:
match = re.findall(
r"Backport #([0-9]+)", issue['body'], re.MULTILINE)
if match:
backports.append(match[0])
# follow PRs to their origin (backports)
with Spinner():
for p in backports:
item = i.get_pull(p)
match = re.findall(r"(Fixes|Closes|Fixed|close):? #([0-9]+)",
item['body'], re.MULTILINE)
for mm in match:
bugs.add(mm[1])
# now open commits
if args.commits_file:
print("Open commits file and parse for fixed bugs...")
with open(args.commits_file, "r") as commits:
content = commits.read()
match = re.findall(r"(Fixes|Closes|Fixed|close):? #([0-9]+)",
str(content), re.MULTILINE)
for mm in match:
bugs.add(mm[1])
print("Create output file...")
with Spinner():
with open(args.file, "w") as f:
for m in sorted(bugs):
item = i.get_issue(m)
if item:
# * :github:`8193` - STM32 config BUILD_OUTPUT_HEX fail
f.write("* :github:`{}` - {}\n".format(
item['number'], item['title']))
if __name__ == '__main__':
main()
|
|
"""
This script was customized to generate graphs from
calcium imaging data in the AT1/MPO experiments.
"""
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
np.set_printoptions(suppress=True) # don't show exponential notation
import os
import subprocess
import sys
import glob
import shutil
import read_roi
import time
class TiffVid:
def __init__(self,folder,clean=False):
print("LOADING TIFF VIDEO",folder)
self.folder=folder
if clean:
self.clean()
self.loadTXT(os.path.join(folder,"experiment.txt"))
self.loadTSV(os.path.join(folder,"results.xls"))
self.calcdFF()
def clean(self):
"""delete all files in this folder except for the experiment files."""
print("cleaning out old data...")
for fname in os.listdir(self.folder):
if fname.startswith("results_") or \
fname.startswith("fig_"):
print(" DELETING",fname)
os.remove(self.folder+"/"+fname)
def loadTXT(self, fname):
"""load a text file and return its content as a dictionary"""
fname=fname[:-4]+".txt"
if not os.path.exists(fname):
print("WARNING: experiment file does not exist, so I'll make one.")
with open(fname,'w') as f:
f.write("")
with open(fname) as f:
raw=f.readlines()
conf={"baseline":[0,1],"period":10}
for line in raw:
line=line.strip()
if line.startswith("#"): continue
if line.count("=")==1:
var,val=line.split("=")
vals=val.split("-")
for i in range(len(vals)):
vals[i]=float(vals[i])
if len(vals)==1:
vals=vals[0]
conf[var]=vals
print("",var,"=",vals)
self.conf=conf
def fnameToTime(self,path):
fname=os.path.basename(path)
fname=fname.split(".")
thisTime=fname[0]+"."+fname[1]
return float(thisTime)
def loadTSV(self, fname):
"""
load a CSV generated by ImageJ and return it as a numpy array.
Returns data in the same shape as it exists in the CSV.
"""
print("loading data from:",fname)
with open(fname) as f:
raw=f.read()
raw=raw.replace("\t",",")
raw=raw.split("\n")
labels=raw[0].strip().split(",")
labels[0]="frame"
raw=raw[1:]
nRows=len(raw)
nCols=len(labels)
data=np.empty((nRows,nCols))
data[:]=np.nan
for row in range(nRows):
if raw[row].count(","):
data[row]=raw[row].split(",")
if np.all(np.isnan(data[-1])):
data=data[:-1]
print("loaded %d lines of data from %d ROIs"%(nRows,nCols-1))
self.data=np.transpose(data)
self.dataYlabel="raw pixel value"
self.dataXlabel="experiment duration (minutes)"
# fixed time units way
#self.dataX=np.arange(len(data))*self.conf['period']/60
# look up time units from filename way
picFnames=sorted(glob.glob(self.folder+"/video/*.tif"))
self.dataX=[]
for fname in picFnames:
self.dataX.append(self.fnameToTime(fname))
while len(self.dataX)<len(picFnames):
lastDx=self.dataX[-1]-self.dataX[-2]
self.dataX.append(self.dataX[-1]+lastDx)
self.dataX=np.array(self.dataX)-self.dataX[0]
while len(self.dataX)>len(picFnames):
self.dataX=self.dataX[:-1]
def calcdFF(self,subtractOutFirstROI=True):
"""
once raw pixel values are in self.data, run this to convert to dF/F.
I'm using the variable names shown on the GitHub:
github.com/swharden/ROI-Analysis-Pipeline/blob/master/doc/theory.jpg
"""
b1=int(self.conf['baseline'][0])
b2=int(self.conf['baseline'][1])
for i,f in enumerate(self.data[1:]):
b=np.average(f[b1:b2])
r=f/b # baseline-adjusted raw fluorescence intensity
d=(r-1)*100 # delta F / F in percent (%)
self.data[i+1]=d # push this ROI dF/F (d) back into the data.
self.dataYlabel=r'$\Delta$'+"F/F (%)"
if subtractOutFirstROI:
self.data[1:]=self.data[1:]-self.data[1]
def renderVideo(self,overwrite=False,showToo=False):
if overwrite is False and os.path.exists(self.folder+"/render2.mp4"):
print("VIDEO ALREADY EXISTS")
return
fnames=sorted(glob.glob(self.folder+"/video/*.tif"))
self.maxIntensity=False # optionally define this here
for frame in range(len(fnames)):
print("Processing frame %d of %d (%.02f%%)"%(frame,len(self.dataX),100*frame/len(self.dataX)))
fname=fnames[frame]
fname=os.path.join(self.folder,fname)
self.figure_tiff_and_graph(fnamePic=fname,frame=frame,showToo=showToo)
#cmd=r'C:\Users\swharden\Documents\important\ffmpeg\bin\ffmpeg.exe'
#cmd+=' -framerate 10'
#cmd+=r' -y -i "%s\video\frame_%%07d.png"'%self.folder
#cmd+=r' -c:v libx264 -pix_fmt yuv420p "%s"'%os.path.join(self.folder,"render2.mp4")
pathIn=os.path.abspath(self.folder+"/video/video/frame_%07d.tif")
fileOut=os.path.abspath(self.folder+"/render2.mp4")
cmd = 'ffmpeg.exe -framerate 10 -y -i "%s" '%pathIn
cmd += '-c:v libx264 -pix_fmt yuv420p "%s" '%fileOut
print(cmd)
os.system(cmd)
#shutil.rmtree(os.path.join(self.folder,"video"))
print("CREATED VIDEO:\n",os.path.join(self.folder,"render2.mp4"))
###########################################################################
### FIGURES ###############################################################
###########################################################################
def figure_shade(self):
"""read the conf and shade the regions it contains."""
colors=['r','g','b','o','m','k','y']
for i,key in enumerate(self.conf):
if type(self.conf[key]) is list and len(self.conf[key])==2:
plt.axvspan(10*60,13*60,alpha=.1,lw=0,label=key,color=colors[i])
def fig_traces(self):
plt.figure(figsize=(8,6))
plt.axhline(0,lw=1,color='k',ls='--')
for i in range(2,len(self.data)):
plt.plot(self.dataX,self.data[i],color='k',lw=2,alpha=.2)
plt.plot(self.dataX,np.average(self.data[2:],axis=0),color='k',lw=2,alpha=1,ls='-',label="average")
plt.grid(alpha=.5)
self.figure_shade()
plt.margins(0,.1)
plt.legend(fontsize=11)
plt.ylabel(self.dataYlabel,fontsize=16)
plt.xlabel(self.dataXlabel,fontsize=16)
plt.title("ROI Traces [%s]"%os.path.basename(self.folder),fontsize=20)
#plt.savefig(self.folder+"/fig_zzz_traces.png",dpi=100)
plt.tight_layout()
#plt.show()
plt.close()
def fig_av(self,stdErr=False):
plt.figure(figsize=(8,6))
XS=self.data[0]*self.conf['period']/60
AV=np.average(self.data[2:],axis=0)
ERR=np.std(self.data[2:],axis=0)
errorType="stDev"
if stdErr:
ERR=ERR/np.sqrt(len(self.data[2:]))
errorType="stdErr"
plt.axhline(0,lw=1,color='k',ls='--')
plt.fill_between(XS,AV-ERR,AV+ERR,
alpha=.1,label=errorType,lw=0,color='k')
plt.plot(XS,AV,color='k',label="average",lw=2)
plt.grid(alpha=.5)
self.figure_shade()
plt.margins(0,.1)
plt.legend(fontsize=11)
plt.ylabel(r'$\Delta$'+"F/F (%)",fontsize=16)
plt.xlabel("experiment duration (minutes)",fontsize=16)
plt.title("ROI Traces [%s]"%os.path.basename(self.folder),fontsize=20)
#plt.savefig(self.folder+"/fig_zzz_av.png",dpi=100)
plt.tight_layout()
#plt.show()
plt.close()
###########################################################################
def figure_raw_all_highlight(self,upToFrame=-1):
"""
render an image ready for video overlay showing all ROI traces
and highlighting up to a certain frame.
"""
plt.figure(figsize=(8,6))
if upToFrame<0:
upToFrame=len(self.data[0])
for i in range(1,len(self.data)):
offsetX=i*1
offsetY=i*2
plt.plot(offsetX+self.dataBL[0]*self.conf['period']/60,
offsetY+self.dataBL[i],
color='k',alpha=.2,lw=1)
plt.plot(offsetX+self.dataBL[0][:upToFrame]*self.conf['period']/60,
offsetY+self.dataBL[i][:upToFrame],
color='y',alpha=.5,lw=3)
plt.axis('off')
plt.margins(0,.1)
plt.tight_layout()
#plt.savefig("01-raw.png",dpi=100)
plt.show()
plt.close()
def figure_tiff_and_graph(self,fnamePic='../data/sample.jpg',frame=-1,
showToo=False):
# create the individual PNG files to be used for video creation
from read_roi import read_roi_zip
# PREPARE THE FIGURE
mult=1
plt.figure(figsize=(16*mult,9*mult))
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
# LEFT AXES - FIGURE
ax0 = plt.subplot(gs[0])
img=plt.imread(fnamePic)
if self.maxIntensity is False:
self.maxIntensity=np.percentile(img,99)*1.5
print("SETTING MAX INTENSITY:",self.maxIntensity)
ax0.imshow(img, zorder=0, cmap='gray', clim=(0, self.maxIntensity))
roiFile=self.folder+"/RoiSet.zip"
if os.path.exists(roiFile):
rois = read_roi_zip(roiFile)
for p,roi in enumerate(rois):
color='y'
if p==0:
color='c'
X1,Y1=rois[roi]['left'],rois[roi]['top']
X2,Y2=rois[roi]['width']+X1,rois[roi]['height']+Y1
ax0.plot([X1,X2,X2,X1,X1],[Y1,Y1,Y2,Y2,Y1],
color=color,alpha=.5,lw=2)
lbl=str(p)
if p==0:
lbl="baseline"
ax0.text(X1+1,Y1+5,str(lbl),va='top',color=color,
fontsize=14,fontweight='bold')
#miscmsg="OTR Cre / GCaMP6f [PFC]\n"
miscmsg=""
msg="%s\n%sframe:%d\nminutes: %.02f"%(self.folder,miscmsg,frame,self.dataX[frame]/60)
ax0.text(3,3,msg,va='top',color='k',fontsize=14,fontweight='bold')
ax0.text(0,0,msg,va='top',color='w',fontsize=14,fontweight='bold')
plt.margins(0,0)
plt.axis('off')
# RIGHT AXES - GRAPHS
ax1 = plt.subplot(gs[1])
if frame<0:
frame=len(self.data[0])
offsetX=0
offsetY=-30
for i in range(1,len(self.data)):
if i==1:
msg="baseline"
continue
else:
msg="ROI %02d"%(i-1)
ax1.text(0,offsetY*i-offsetY*.2,msg, fontsize=14)
ax1.plot(offsetX*i+self.dataX,
offsetY*i+self.data[i][:len(self.dataX)],
color='k',alpha=.2,lw=2)
ax1.plot(offsetX*i+self.dataX[:frame],
offsetY*i+self.data[i][:frame],
color='b',alpha=.5,lw=2)
self.figure_shade()
plt.axis('off')
plt.margins(0,.05)
# FIX UP THE FIGURE, SAVE, AND SHOW
plt.tight_layout()
bn=os.path.dirname(os.path.abspath(fnamePic))
if not os.path.exists(bn+"/video/"):
os.mkdir(bn+"/video/")
plt.savefig(bn+"/video/frame_%07d.tif"%frame)
if showToo:
plt.show()
plt.close('all')
def scriptRun():
#path=r"X:\Data\SCOTT\2017-05-10 GCaMP6f\2017-05-10 GCaMP6f PFC OXTR cre\2017-05-31 cell1"
#path=r"C:\Users\swharden\Documents\temp\seq"
#for folder in sorted(glob.glob(r"X:\Data\SCOTT\2017-05-10 GCaMP6f\2017-05-10 GCaMP6f PFC OXTR cre\*")):
for folder in sorted(glob.glob(r"X:\Data\AT1-Cre\MPO GCaMP6f\data\*-animal*")):
if not os.path.isdir(folder):
continue
if not "-animal" in folder:
continue
print("\n\n\n","#"*100,"\n"," ANALYZING",folder,"\n","#"*100)
if os.path.exists(folder+"/render2.mp4"):
print("SKIPPING")
continue
else:
print("PROCESSING ...")
time.sleep(1)
TV=TiffVid(folder)
TV.renderVideo(overwrite=True,showToo=True)
if __name__=="__main__":
if len(sys.argv)==1:
print("running from within script")
scriptRun()
else:
print("called from command line with arguments")
TV=TiffVid(sys.argv[1])
TV.fig_traces()
TV.fig_av()
TV.renderVideo()
print("DONE")
|
|
"""Manage release repository (not source repository)."""
__all__ = [
'BuilderImageDir',
'EnvsDir',
'ImageDir',
'PodDir',
'VolumeDir',
'XarDir',
'get_current_image_versions',
'merge_dict_of_sets',
]
import collections
from pathlib import Path
import foreman
from g1 import scripts
from g1.bases import classes
from g1.bases.assertions import ASSERT
import shipyard2
def _remove_file_and_maybe_parents(path, parent_path):
scripts.rm(path, recursive=path.is_dir())
with scripts.using_cwd(parent_path):
scripts.rmdir(
path.parent.relative_to(parent_path),
parents=True,
ignore_fail_on_non_empty=True,
)
class EnvsDir:
@staticmethod
def init(repo_path):
scripts.mkdir(repo_path / shipyard2.RELEASE_ENVS_DIR_NAME)
def __init__(self, repo_path):
self.repo_path = repo_path
self.top_path = self.repo_path / shipyard2.RELEASE_ENVS_DIR_NAME
self.envs = sorted(
p.name for p in self.top_path.iterdir() if p.is_dir()
)
__repr__ = classes.make_repr('repo_path={self.repo_path} envs={self.envs}')
def __hash__(self):
return hash((self.repo_path, tuple(self.envs)))
def __eq__(self, other):
return self.repo_path == other.repo_path and self.envs == other.envs
@property
def _pod_top_path(self):
return self.repo_path / shipyard2.RELEASE_PODS_DIR_NAME
@property
def _xar_top_path(self):
return self.repo_path / shipyard2.RELEASE_XARS_DIR_NAME
def get_current_pod_versions(self):
return self._get_current_versions(self.iter_pod_dirs)
def get_current_xar_versions(self):
return self._get_current_versions(self.iter_xar_dirs)
def _get_current_versions(self, iter_dir_objects):
current_versions = collections.defaultdict(set)
for env in self.envs:
for dir_object in iter_dir_objects(env):
current_versions[dir_object.label].add(dir_object.version)
return dict(current_versions)
def iter_pod_dirs(self, env):
yield from self._iter_dirs(PodDir, self._pod_top_path, env)
def iter_xar_dirs(self, env):
yield from self._iter_dirs(XarDir, self._xar_top_path, env)
def _iter_dirs(self, dir_object_type, target_top_path, env):
ASSERT.in_(env, self.envs)
# NOTE: rglob does NOT traverse into symlink directory (which is
# good in this case).
for link_path in (self.top_path / env).rglob('*'):
if not link_path.is_symlink():
continue
target_path = link_path.resolve()
# XXX Is there a better way to match path prefix?
if str(target_path).startswith(str(target_top_path)):
yield dir_object_type(target_top_path, target_path)
def sort_pod_dirs(self, env):
return _sort_by_path(self.iter_pod_dirs(env))
def sort_xar_dirs(self, env):
return _sort_by_path(self.iter_xar_dirs(env))
def release_pod(self, env, label, version):
return self._release(PodDir, self._pod_top_path, env, label, version)
def release_xar(self, env, label, version):
return self._release(XarDir, self._xar_top_path, env, label, version)
def _release(self, dir_object_type, target_top_path, env, label, version):
relpath = label.path / label.name
link_path = self.top_path / env / relpath
dir_object = dir_object_type(
target_top_path,
target_top_path / relpath / version,
)
scripts.rm(link_path)
scripts.make_relative_symlink(dir_object.path, link_path)
def unrelease(self, env, label):
ASSERT.in_(env, self.envs)
_remove_file_and_maybe_parents(
self.top_path / env / label.path / label.name,
self.top_path / env,
)
class _Base:
_TOP_DIR_NAME = None
_FILENAME = None
@classmethod
def init(cls, repo_path):
scripts.mkdir(repo_path / cls._TOP_DIR_NAME)
@classmethod
def iter_dirs(cls, repo_path):
top_path = repo_path / cls._TOP_DIR_NAME
for path in top_path.rglob(cls._FILENAME):
yield cls(top_path, path.parent)
@classmethod
def sort_dirs(cls, repo_path):
return _sort_by_path(cls.iter_dirs(repo_path))
@classmethod
def group_dirs(cls, repo_path):
groups = collections.defaultdict(list)
for dir_object in cls.sort_dirs(repo_path):
groups[dir_object.label].append(dir_object)
return dict(groups)
@classmethod
def from_relpath(cls, repo_path, relpath):
top_path = repo_path / cls._TOP_DIR_NAME
return cls(top_path, top_path / relpath)
def __init__(self, top_path, path):
ASSERT.predicate(path, Path.is_dir)
ASSERT.predicate(path / self._FILENAME, Path.is_file)
self.top_path = top_path
self.path = path
__repr__ = classes.make_repr('path={self.path}')
def __hash__(self):
return hash((self.top_path, self.path))
def __eq__(self, other):
return self.top_path == other.top_path and self.path == other.path
@property
def label(self):
relpath = self.path.parent.relative_to(self.top_path)
label_path = relpath.parent
label_name = relpath.name
return foreman.Label.parse('//%s:%s' % (label_path, label_name))
@property
def version(self):
return self.path.name
def remove(self):
_remove_file_and_maybe_parents(
self.path,
self.top_path,
)
class PodDir(_Base):
_TOP_DIR_NAME = shipyard2.RELEASE_PODS_DIR_NAME
_FILENAME = shipyard2.POD_DIR_RELEASE_METADATA_FILENAME
@classmethod
def get_current_image_versions(cls, repo_path):
return cls._get_current_versions(repo_path, cls.iter_image_dirs)
@classmethod
def get_current_volume_versions(cls, repo_path):
return cls._get_current_versions(repo_path, cls.iter_volume_dirs)
@classmethod
def _get_current_versions(cls, repo_path, iter_dir_objects):
current_versions = collections.defaultdict(set)
for pod_dir in cls.iter_dirs(repo_path):
for dir_object in iter_dir_objects(pod_dir):
current_versions[dir_object.label].add(dir_object.version)
return dict(current_versions)
def __init__(self, top_path, path):
ASSERT.predicate(path, Path.is_dir)
for name, predicate in (
(shipyard2.POD_DIR_RELEASE_METADATA_FILENAME, Path.is_file),
(shipyard2.POD_DIR_DEPLOY_INSTRUCTION_FILENAME, Path.is_file),
(shipyard2.POD_DIR_IMAGES_DIR_NAME, Path.is_dir),
(shipyard2.POD_DIR_VOLUMES_DIR_NAME, Path.is_dir),
):
ASSERT.predicate(path / name, predicate)
super().__init__(top_path, path)
def iter_image_dirs(self):
yield from self._iter_deps(
ImageDir,
self.top_path.parent / shipyard2.RELEASE_IMAGES_DIR_NAME,
shipyard2.POD_DIR_IMAGES_DIR_NAME,
shipyard2.IMAGE_DIR_IMAGE_FILENAME,
)
def iter_volume_dirs(self):
yield from self._iter_deps(
VolumeDir,
self.top_path.parent / shipyard2.RELEASE_VOLUMES_DIR_NAME,
shipyard2.POD_DIR_VOLUMES_DIR_NAME,
shipyard2.VOLUME_DIR_VOLUME_FILENAME,
)
def _iter_deps(self, dir_object_type, top_path, dir_name, filename):
for dir_path in (self.path / dir_name).iterdir():
link_path = dir_path / filename
if link_path.is_symlink():
yield dir_object_type(
top_path,
link_path.resolve().parent,
)
class XarDir(_Base):
_TOP_DIR_NAME = shipyard2.RELEASE_XARS_DIR_NAME
_FILENAME = shipyard2.XAR_DIR_RELEASE_METADATA_FILENAME
@classmethod
def get_current_image_versions(cls, repo_path):
current_versions = collections.defaultdict(set)
for xar_dir in cls.iter_dirs(repo_path):
image_dir = xar_dir.get_image_dir()
if image_dir is not None:
current_versions[image_dir.label].add(image_dir.version)
return dict(current_versions)
def __init__(self, top_path, path):
ASSERT.predicate(path, Path.is_dir)
for name, predicate in (
(shipyard2.XAR_DIR_RELEASE_METADATA_FILENAME, Path.is_file),
(shipyard2.XAR_DIR_DEPLOY_INSTRUCTION_FILENAME, Path.is_file),
):
ASSERT.predicate(path / name, predicate)
ASSERT.any(
(
path / shipyard2.XAR_DIR_IMAGE_FILENAME,
path / shipyard2.XAR_DIR_ZIPAPP_FILENAME,
),
Path.is_file,
)
super().__init__(top_path, path)
def get_image_dir(self):
link_path = self.path / shipyard2.XAR_DIR_IMAGE_FILENAME
if not link_path.is_symlink():
return None
return ImageDir(
self.top_path.parent / shipyard2.RELEASE_IMAGES_DIR_NAME,
link_path.resolve().parent,
)
class BuilderImageDir(_Base):
_TOP_DIR_NAME = shipyard2.RELEASE_IMAGES_DIR_NAME
_FILENAME = shipyard2.IMAGE_DIR_BUILDER_IMAGE_FILENAME
def remove(self):
_remove_file_and_maybe_parents(
self.path / self._FILENAME,
self.top_path,
)
class ImageDir(_Base):
_TOP_DIR_NAME = shipyard2.RELEASE_IMAGES_DIR_NAME
_FILENAME = shipyard2.IMAGE_DIR_IMAGE_FILENAME
def remove(self):
_remove_file_and_maybe_parents(
self.path / self._FILENAME,
self.top_path,
)
class VolumeDir(_Base):
_TOP_DIR_NAME = shipyard2.RELEASE_VOLUMES_DIR_NAME
_FILENAME = shipyard2.VOLUME_DIR_VOLUME_FILENAME
def _sort_by_path(iterator):
return sorted(iterator, key=lambda obj: obj.path)
def get_current_image_versions(repo_path):
return merge_dict_of_sets(
PodDir.get_current_image_versions(repo_path),
XarDir.get_current_image_versions(repo_path),
)
def merge_dict_of_sets(*dicts_of_sets):
output = collections.defaultdict(set)
for dict_of_sets in dicts_of_sets:
for key, value_set in dict_of_sets.items():
output[key].update(value_set)
return dict(output)
|
|
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <[email protected]>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
|
This file contains the DAL support for many relational databases, including:
- SQLite & SpatiaLite
- MySQL
- Postgres
- Firebird
- Oracle
- MS SQL
- DB2
- Interbase
- Ingres
- Informix (9+ and SE)
- SapDB (experimental)
- Cubrid (experimental)
- CouchDB (experimental)
- MongoDB (in progress)
- Google:nosql
- Google:sql
- Teradata
- IMAP (experimental)
Example of usage::
>>> # from dal import DAL, Field
### create DAL connection (and create DB if it doesn't exist)
>>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'),
... folder=None)
### define a table 'person' (create/alter as necessary)
>>> person = db.define_table('person',Field('name','string'))
### insert a record
>>> id = person.insert(name='James')
### retrieve it by id
>>> james = person(id)
### retrieve it by name
>>> james = person(name='James')
### retrieve it by arbitrary query
>>> query = (person.name=='James') & (person.name.startswith('J'))
>>> james = db(query).select(person.ALL)[0]
### update one record
>>> james.update_record(name='Jim')
<Row {'id': 1, 'name': 'Jim'}>
### update multiple records by query
>>> db(person.name.like('J%')).update(name='James')
1
### delete records by query
>>> db(person.name.lower() == 'jim').delete()
0
### retrieve multiple records (rows)
>>> people = db(person).select(orderby=person.name,
... groupby=person.name, limitby=(0,100))
### further filter them
>>> james = people.find(lambda row: row.name == 'James').first()
>>> print james.id, james.name
1 James
### check aggregates
>>> counter = person.id.count()
>>> print db(person).select(counter).first()(counter)
1
### delete one record
>>> james.delete_record()
1
### delete (drop) entire database table
>>> person.drop()
Supported DAL URI strings::
'sqlite://test.db'
'spatialite://test.db'
'sqlite:memory'
'spatialite:memory'
'jdbc:sqlite://test.db'
'mysql://root:none@localhost/test'
'postgres://mdipierro:password@localhost/test'
'postgres:psycopg2://mdipierro:password@localhost/test'
'postgres:pg8000://mdipierro:password@localhost/test'
'jdbc:postgres://mdipierro:none@localhost/test'
'mssql://web2py:none@A64X2/web2py_test'
'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings
'mssql3://web2py:none@A64X2/web2py_test' # better pagination (requires >= 2005)
'mssql4://web2py:none@A64X2/web2py_test' # best pagination (requires >= 2012)
'oracle://username:password@database'
'firebird://user:password@server:3050/database'
'db2:ibm_db_dbi://DSN=dsn;UID=user;PWD=pass'
'db2:pyodbc://driver=DB2;hostname=host;database=database;uid=user;pwd=password;port=port'
'firebird://username:password@hostname/database'
'firebird_embedded://username:password@c://path'
'informix://user:password@server:3050/database'
'informixu://user:password@server:3050/database' # unicode informix
'ingres://database' # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name'
'google:datastore' # for google app engine datastore (uses ndb by default)
'google:sql' # for google app engine with sql (mysql compatible)
'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental
'imap://user:password@server:port' # experimental
'mongodb://user:password@server:port/database' # experimental
For more info::
help(DAL)
help(Field)
"""
import glob
import logging
import socket
import threading
import time
import traceback
import urllib
from uuid import uuid4
from ._compat import PY2, pickle, hashlib_md5, pjoin, copyreg, integer_types, \
with_metaclass, long
from ._globals import GLOBAL_LOCKER, THREAD_LOCAL, DEFAULT
from ._load import OrderedDict
from .helpers.classes import Serializable, SQLCallableList, BasicStorage, \
RecordUpdater, RecordDeleter, TimingHandler
from .helpers.methods import hide_password, smart_query, auto_validators, \
auto_represent
from .helpers.regex import REGEX_PYTHON_KEYWORDS, REGEX_DBNAME
from .helpers.rest import RestParser
from .helpers.serializers import serializers
from .objects import Table, Field, Rows, Row, Set
from .adapters.base import BaseAdapter, NullAdapter
TABLE_ARGS = set(
('migrate', 'primarykey', 'fake_migrate', 'format', 'redefine',
'singular', 'plural', 'trigger_name', 'sequence_name', 'fields',
'common_filter', 'polymodel', 'table_class', 'on_define', 'rname'))
class MetaDAL(type):
def __call__(cls, *args, **kwargs):
#: intercept arguments for DAL costumisation on call
intercepts = [
'logger', 'representers', 'serializers', 'uuid', 'validators',
'validators_method', 'Table', 'Row']
intercepted = []
for name in intercepts:
val = kwargs.get(name)
if val:
intercepted.append((name, val))
del kwargs[name]
for tup in intercepted:
setattr(cls, tup[0], tup[1])
obj = super(MetaDAL, cls).__call__(*args, **kwargs)
return obj
class DAL(with_metaclass(MetaDAL, Serializable, BasicStorage)):
"""
An instance of this class represents a database connection
Args:
uri(str): contains information for connecting to a database.
Defaults to `'sqlite://dummy.db'`
Note:
experimental: you can specify a dictionary as uri
parameter i.e. with::
db = DAL({"uri": "sqlite://storage.sqlite",
"tables": {...}, ...})
for an example of dict input you can check the output
of the scaffolding db model with
db.as_dict()
Note that for compatibility with Python older than
version 2.6.5 you should cast your dict input keys
to str due to a syntax limitation on kwarg names.
for proper DAL dictionary input you can use one of::
obj = serializers.cast_keys(dict, [encoding="utf-8"])
#or else (for parsing json input)
obj = serializers.loads_json(data, unicode_keys=False)
pool_size: How many open connections to make to the database object.
folder: where .table files will be created. Automatically set within
web2py. Use an explicit path when using DAL outside web2py
db_codec: string encoding of the database (default: 'UTF-8')
table_hash: database identifier with .tables. If your connection hash
change you can still using old .tables if they have db_hash
as prefix
check_reserved: list of adapters to check tablenames and column names
against sql/nosql reserved keywords. Defaults to `None`
- 'common' List of sql keywords that are common to all database
types such as "SELECT, INSERT". (recommended)
- 'all' Checks against all known SQL keywords
- '<adaptername>'' Checks against the specific adapters list of
keywords
- '<adaptername>_nonreserved' Checks against the specific adapters
list of nonreserved keywords. (if available)
migrate: sets default migrate behavior for all tables
fake_migrate: sets default fake_migrate behavior for all tables
migrate_enabled: If set to False disables ALL migrations
fake_migrate_all: If set to True fake migrates ALL tables
attempts: Number of times to attempt connecting
auto_import: If set to True, tries import automatically table
definitions from the databases folder (works only for simple models)
bigint_id: If set, turn on bigint instead of int for id and reference
fields
lazy_tables: delays table definition until table access
after_connection: can a callable that will be executed after the
connection
Example:
Use as::
db = DAL('sqlite://test.db')
or::
db = DAL(**{"uri": ..., "tables": [...]...}) # experimental
db.define_table('tablename', Field('fieldname1'),
Field('fieldname2'))
"""
serializers = None
validators = None
validators_method = None
representers = {}
uuid = lambda x: str(uuid4())
logger = logging.getLogger("pyDAL")
Table = Table
Rows = Rows
Row = Row
record_operators = {
'update_record': RecordUpdater,
'delete_record': RecordDeleter
}
execution_handlers = [TimingHandler]
def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
if not hasattr(THREAD_LOCAL, '_pydal_db_instances_'):
THREAD_LOCAL._pydal_db_instances_ = {}
if not hasattr(THREAD_LOCAL, '_pydal_db_instances_zombie_'):
THREAD_LOCAL._pydal_db_instances_zombie_ = {}
if uri == '<zombie>':
db_uid = kwargs['db_uid'] # a zombie must have a db_uid!
if db_uid in THREAD_LOCAL._pydal_db_instances_:
db_group = THREAD_LOCAL._pydal_db_instances_[db_uid]
db = db_group[-1]
elif db_uid in THREAD_LOCAL._pydal_db_instances_zombie_:
db = THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
else:
db = super(DAL, cls).__new__(cls)
THREAD_LOCAL._pydal_db_instances_zombie_[db_uid] = db
else:
db_uid = kwargs.get('db_uid', hashlib_md5(repr(uri)).hexdigest())
if db_uid in THREAD_LOCAL._pydal_db_instances_zombie_:
db = THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
del THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
else:
db = super(DAL, cls).__new__(cls)
db_group = THREAD_LOCAL._pydal_db_instances_.get(db_uid, [])
db_group.append(db)
THREAD_LOCAL._pydal_db_instances_[db_uid] = db_group
db._db_uid = db_uid
return db
@staticmethod
def set_folder(folder):
# ## this allows gluon to set a folder for this thread
# ## <<<<<<<<< Should go away as new DAL replaces old sql.py
BaseAdapter.set_folder(folder)
@staticmethod
def get_instances():
"""
Returns a dictionary with uri as key with timings and defined tables::
{'sqlite://storage.sqlite': {
'dbstats': [(select auth_user.email from auth_user, 0.02009)],
'dbtables': {
'defined': ['auth_cas', 'auth_event', 'auth_group',
'auth_membership', 'auth_permission', 'auth_user'],
'lazy': '[]'
}
}
}
"""
dbs = getattr(THREAD_LOCAL, '_pydal_db_instances_', {}).items()
infos = {}
for db_uid, db_group in dbs:
for db in db_group:
if not db._uri:
continue
k = hide_password(db._adapter.uri)
infos[k] = dict(
dbstats=[(row[0], row[1]) for row in db._timings],
dbtables={
'defined': sorted(
list(set(db.tables) - set(db._LAZY_TABLES.keys()))
),
'lazy': sorted(db._LAZY_TABLES.keys())}
)
return infos
@staticmethod
def distributed_transaction_begin(*instances):
if not instances:
return
thread_key = '%s.%s' % (
socket.gethostname(), threading.currentThread())
keys = ['%s.%i' % (thread_key, i) for (i, db) in instances]
instances = enumerate(instances)
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
'distributed transaction not suported by %s' % db._dbname)
for (i, db) in instances:
db._adapter.distributed_transaction_begin(keys[i])
@staticmethod
def distributed_transaction_commit(*instances):
if not instances:
return
instances = enumerate(instances)
thread_key = '%s.%s' % (
socket.gethostname(), threading.currentThread())
keys = ['%s.%i' % (thread_key, i) for (i, db) in instances]
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
'distributed transaction not suported by %s' % db._dbanme)
try:
for (i, db) in instances:
db._adapter.prepare(keys[i])
except:
for (i, db) in instances:
db._adapter.rollback_prepared(keys[i])
raise RuntimeError('failure to commit distributed transaction')
else:
for (i, db) in instances:
db._adapter.commit_prepared(keys[i])
return
def __init__(self, uri='sqlite://dummy.db',
pool_size=0, folder=None,
db_codec='UTF-8', check_reserved=None,
migrate=True, fake_migrate=False,
migrate_enabled=True, fake_migrate_all=False,
decode_credentials=False, driver_args=None,
adapter_args=None, attempts=5, auto_import=False,
bigint_id=False, debug=False, lazy_tables=False,
db_uid=None, do_connect=True,
after_connection=None, tables=None, ignore_field_case=True,
entity_quoting=True, table_hash=None):
if uri == '<zombie>' and db_uid is not None:
return
super(DAL, self).__init__()
if not issubclass(self.Rows, Rows):
raise RuntimeError(
'`Rows` class must be a subclass of pydal.objects.Rows'
)
if not issubclass(self.Row, Row):
raise RuntimeError(
'`Row` class must be a subclass of pydal.objects.Row'
)
from .drivers import DRIVERS, is_jdbc
self._drivers_available = DRIVERS
if not decode_credentials:
credential_decoder = lambda cred: cred
else:
credential_decoder = lambda cred: urllib.unquote(cred)
self._folder = folder
if folder:
self.set_folder(folder)
self._uri = uri
self._pool_size = pool_size
self._db_codec = db_codec
self._pending_references = {}
self._request_tenant = 'request_tenant'
self._common_fields = []
self._referee_name = '%(table)s'
self._bigint_id = bigint_id
self._debug = debug
self._migrated = []
self._LAZY_TABLES = {}
self._lazy_tables = lazy_tables
self._tables = SQLCallableList()
self._driver_args = driver_args
self._adapter_args = adapter_args
self._check_reserved = check_reserved
self._decode_credentials = decode_credentials
self._attempts = attempts
self._do_connect = do_connect
self._ignore_field_case = ignore_field_case
if not str(attempts).isdigit() or attempts < 0:
attempts = 5
if uri:
uris = isinstance(uri, (list, tuple)) and uri or [uri]
connected = False
for k in range(attempts):
for uri in uris:
try:
from .adapters import adapters
if is_jdbc and not uri.startswith('jdbc:'):
uri = 'jdbc:' + uri
self._dbname = REGEX_DBNAME.match(uri).group()
# notice that driver args or {} else driver_args
# defaults to {} global, not correct
kwargs = dict(db=self,
uri=uri,
pool_size=pool_size,
folder=folder,
db_codec=db_codec,
credential_decoder=credential_decoder,
driver_args=driver_args or {},
adapter_args=adapter_args or {},
do_connect=do_connect,
after_connection=after_connection,
entity_quoting=entity_quoting)
adapter = adapters.get_for(self._dbname)
self._adapter = adapter(**kwargs)
#self._adapter.ignore_field_case = ignore_field_case
if bigint_id:
self._adapter.dialect._force_bigints()
connected = True
break
except SyntaxError:
raise
except Exception:
tb = traceback.format_exc()
self.logger.debug(
'DEBUG: connect attempt %i, connection error:\n%s'
% (k, tb)
)
if connected:
break
else:
time.sleep(1)
if not connected:
raise RuntimeError(
"Failure to connect, tried %d times:\n%s" % (attempts, tb)
)
else:
self._adapter = NullAdapter(
db=self, pool_size=0, uri='None', folder=folder,
db_codec=db_codec, after_connection=after_connection,
entity_quoting=entity_quoting)
migrate = fake_migrate = False
self.validators_method = None
self.validators = None
adapter = self._adapter
self._uri_hash = table_hash or hashlib_md5(adapter.uri).hexdigest()
self.check_reserved = check_reserved
if self.check_reserved:
from .contrib.reserved_sql_keywords import ADAPTERS as RSK
self.RSK = RSK
self._migrate = migrate
self._fake_migrate = fake_migrate
self._migrate_enabled = migrate_enabled
self._fake_migrate_all = fake_migrate_all
if self.serializers is not None:
for k, v in self.serializers.items():
serializers._custom_[k] = v
if auto_import or tables:
self.import_table_definitions(adapter.folder,
tables=tables)
@property
def tables(self):
return self._tables
@property
def _timings(self):
return getattr(THREAD_LOCAL, '_pydal_timings_', [])
@property
def _lastsql(self):
return self._timings[-1] if self._timings else None
def import_table_definitions(self, path, migrate=False,
fake_migrate=False, tables=None):
if tables:
for table in tables:
self.define_table(**table)
else:
pattern = pjoin(path, self._uri_hash + '_*.table')
for filename in glob.glob(pattern):
tfile = self._adapter.file_open(filename, 'r')
try:
sql_fields = pickle.load(tfile)
name = filename[len(pattern) - 7:-6]
mf = [
(value['sortable'], Field(
key,
type=value['type'],
length=value.get('length', None),
notnull=value.get('notnull', False),
unique=value.get('unique', False)))
for key, value in sql_fields.iteritems()
]
mf.sort(lambda a, b: cmp(a[0], b[0]))
self.define_table(name, *[item[1] for item in mf],
**dict(migrate=migrate,
fake_migrate=fake_migrate))
finally:
self._adapter.file_close(tfile)
def check_reserved_keyword(self, name):
"""
Validates `name` against SQL keywords
Uses self.check_reserve which is a list of operators to use.
"""
for backend in self.check_reserved:
if name.upper() in self.RSK[backend]:
raise SyntaxError(
'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper()))
def parse_as_rest(self, patterns, args, vars, queries=None,
nested_select=True):
return RestParser(self).parse(
patterns, args, vars, queries, nested_select)
def define_table(self, tablename, *fields, **args):
if not fields and 'fields' in args:
fields = args.get('fields',())
if not isinstance(tablename, str):
if isinstance(tablename, unicode):
try:
tablename = str(tablename)
except UnicodeEncodeError:
raise SyntaxError("invalid unicode table name")
else:
raise SyntaxError("missing table name")
elif hasattr(self, tablename) or tablename in self.tables:
if args.get('redefine', False):
delattr(self, tablename)
else:
raise SyntaxError('table already defined: %s' % tablename)
elif tablename.startswith('_') or hasattr(self, tablename) or \
REGEX_PYTHON_KEYWORDS.match(tablename):
raise SyntaxError('invalid table name: %s' % tablename)
elif self.check_reserved:
self.check_reserved_keyword(tablename)
else:
invalid_args = set(args) - TABLE_ARGS
if invalid_args:
raise SyntaxError('invalid table "%s" attributes: %s' %
(tablename, invalid_args))
if self._lazy_tables and tablename not in self._LAZY_TABLES:
self._LAZY_TABLES[tablename] = (tablename, fields, args)
table = None
else:
table = self.lazy_define_table(tablename, *fields, **args)
if tablename not in self.tables:
self.tables.append(tablename)
return table
def lazy_define_table(self, tablename, *fields, **args):
args_get = args.get
common_fields = self._common_fields
if common_fields:
fields = list(fields) + list(common_fields)
table_class = args_get('table_class', Table)
table = table_class(self, tablename, *fields, **args)
table._actual = True
self[tablename] = table
# must follow above line to handle self references
table._create_references()
for field in table:
if field.requires == DEFAULT:
field.requires = auto_validators(field)
if field.represent is None:
field.represent = auto_represent(field)
migrate = self._migrate_enabled and args_get('migrate', self._migrate)
if migrate and self._uri not in (None, 'None') \
or self._adapter.dbengine == 'google:datastore':
fake_migrate = self._fake_migrate_all or \
args_get('fake_migrate', self._fake_migrate)
polymodel = args_get('polymodel', None)
try:
GLOBAL_LOCKER.acquire()
self._adapter.create_table(
table, migrate=migrate,
fake_migrate=fake_migrate,
polymodel=polymodel)
finally:
GLOBAL_LOCKER.release()
else:
table._dbt = None
on_define = args_get('on_define', None)
if on_define:
on_define(table)
return table
def as_dict(self, flat=False, sanitize=True):
db_uid = uri = None
if not sanitize:
uri, db_uid = (self._uri, self._db_uid)
db_as_dict = dict(
tables=[],
uri=uri,
db_uid=db_uid,
**dict(
[(k, getattr(self, "_" + k, None)) for k in [
'pool_size', 'folder', 'db_codec', 'check_reserved',
'migrate', 'fake_migrate', 'migrate_enabled',
'fake_migrate_all', 'decode_credentials', 'driver_args',
'adapter_args', 'attempts', 'bigint_id', 'debug',
'lazy_tables', 'do_connect']]))
for table in self:
db_as_dict["tables"].append(table.as_dict(flat=flat,
sanitize=sanitize))
return db_as_dict
def __contains__(self, tablename):
try:
return tablename in self.tables
except AttributeError:
# The instance has no .tables attribute yet
return False
def __iter__(self):
for tablename in self.tables:
yield self[tablename]
def __getitem__(self, key):
return self.__getattr__(str(key))
def __getattr__(self, key):
if object.__getattribute__(self, '_lazy_tables') and \
key in object.__getattribute__(self, '_LAZY_TABLES'):
tablename, fields, args = self._LAZY_TABLES.pop(key)
return self.lazy_define_table(tablename, *fields, **args)
return BasicStorage.__getattribute__(self, key)
def __setattr__(self, key, value):
if key[:1] != '_' and key in self:
raise SyntaxError(
'Object %s exists and cannot be redefined' % key)
return super(DAL, self).__setattr__(key, value)
def __repr__(self):
if hasattr(self, '_uri'):
return '<DAL uri="%s">' % hide_password(self._adapter.uri)
else:
return '<DAL db_uid="%s">' % self._db_uid
def smart_query(self, fields, text):
return Set(self, smart_query(fields, text))
def __call__(self, query=None, ignore_common_filters=None):
return self.where(query, ignore_common_filters)
def where(self, query=None, ignore_common_filters=None):
if isinstance(query, Table):
query = self._adapter.id_query(query)
elif isinstance(query, Field):
query = query != None
elif isinstance(query, dict):
icf = query.get("ignore_common_filters")
if icf:
ignore_common_filters = icf
return Set(self, query, ignore_common_filters=ignore_common_filters)
def commit(self):
self._adapter.commit()
def rollback(self):
self._adapter.rollback()
def close(self):
self._adapter.close()
if self._db_uid in THREAD_LOCAL._pydal_db_instances_:
db_group = THREAD_LOCAL._pydal_db_instances_[self._db_uid]
db_group.remove(self)
if not db_group:
del THREAD_LOCAL._pydal_db_instances_[self._db_uid]
def executesql(self, query, placeholders=None, as_dict=False,
fields=None, colnames=None, as_ordered_dict=False):
"""
Executes an arbitrary query
Args:
query (str): the query to submit to the backend
placeholders: is optional and will always be None.
If using raw SQL with placeholders, placeholders may be
a sequence of values to be substituted in
or, (if supported by the DB driver), a dictionary with keys
matching named placeholders in your SQL.
as_dict: will always be None when using DAL.
If using raw SQL can be set to True and the results cursor
returned by the DB driver will be converted to a sequence of
dictionaries keyed with the db field names. Results returned
with as_dict=True are the same as those returned when applying
.to_list() to a DAL query. If "as_ordered_dict"=True the
behaviour is the same as when "as_dict"=True with the keys
(field names) guaranteed to be in the same order as returned
by the select name executed on the database.
fields: list of DAL Fields that match the fields returned from the
DB. The Field objects should be part of one or more Table
objects defined on the DAL object. The "fields" list can include
one or more DAL Table objects in addition to or instead of
including Field objects, or it can be just a single table
(not in a list). In that case, the Field objects will be
extracted from the table(s).
Note:
if either `fields` or `colnames` is provided, the results
will be converted to a DAL `Rows` object using the
`db._adapter.parse()` method
colnames: list of field names in tablename.fieldname format
Note:
It is also possible to specify both "fields" and the associated
"colnames". In that case, "fields" can also include DAL Expression
objects in addition to Field objects. For Field objects in "fields",
the associated "colnames" must still be in tablename.fieldname
format. For Expression objects in "fields", the associated
"colnames" can be any arbitrary labels.
DAL Table objects referred to by "fields" or "colnames" can be dummy
tables and do not have to represent any real tables in the database.
Also, note that the "fields" and "colnames" must be in the
same order as the fields in the results cursor returned from the DB.
"""
adapter = self._adapter
if placeholders:
adapter.execute(query, placeholders)
else:
adapter.execute(query)
if as_dict or as_ordered_dict:
if not hasattr(adapter.cursor,'description'):
raise RuntimeError("database does not support executesql(...,as_dict=True)")
# Non-DAL legacy db query, converts cursor results to dict.
# sequence of 7-item sequences. each sequence tells about a column.
# first item is always the field name according to Python Database API specs
columns = adapter.cursor.description
# reduce the column info down to just the field names
fields = colnames or [f[0] for f in columns]
if len(fields) != len(set(fields)):
raise RuntimeError("Result set includes duplicate column names. Specify unique column names using the 'colnames' argument")
#: avoid bytes strings in columns names (py3)
if columns and not PY2:
for i in range(0, len(fields)):
if isinstance(fields[i], bytes):
fields[i] = fields[i].decode("utf8")
# will hold our finished resultset in a list
data = adapter.fetchall()
# convert the list for each row into a dictionary so it's
# easier to work with. row['field_name'] rather than row[0]
if as_ordered_dict:
_dict = OrderedDict
else:
_dict = dict
return [_dict(zip(fields, row)) for row in data]
try:
data = adapter.fetchall()
except:
return None
if fields or colnames:
fields = [] if fields is None else fields
if not isinstance(fields, list):
fields = [fields]
extracted_fields = []
for field in fields:
if isinstance(field, Table):
extracted_fields.extend([f for f in field])
else:
extracted_fields.append(field)
if not colnames:
colnames = [f.sqlsafe for f in extracted_fields]
else:
newcolnames = []
for tf in colnames:
if '.' in tf:
newcolnames.append('.'.join(adapter.dialect.quote(f)
for f in tf.split('.')))
else:
newcolnames.append(tf)
colnames = newcolnames
data = adapter.parse(
data, fields=extracted_fields, colnames=colnames)
return data
def _remove_references_to(self, thistable):
for table in self:
table._referenced_by = [field for field in table._referenced_by
if not field.table==thistable]
def has_representer(self, name):
return callable(self.representers.get(name))
def represent(self, name, *args, **kwargs):
return self.representers[name](*args, **kwargs)
def export_to_csv_file(self, ofile, *args, **kwargs):
step = long(kwargs.get('max_fetch_rows,',500))
write_colnames = kwargs['write_colnames'] = \
kwargs.get("write_colnames", True)
for table in self.tables:
ofile.write('TABLE %s\r\n' % table)
query = self._adapter.id_query(self[table])
nrows = self(query).count()
kwargs['write_colnames'] = write_colnames
for k in range(0,nrows,step):
self(query).select(limitby=(k,k+step)).export_to_csv_file(
ofile, *args, **kwargs)
kwargs['write_colnames'] = False
ofile.write('\r\n\r\n')
ofile.write('END')
def import_from_csv_file(self, ifile, id_map=None, null='<NULL>',
unique='uuid', map_tablenames=None,
ignore_missing_tables=False,
*args, **kwargs):
#if id_map is None: id_map={}
id_offset = {} # only used if id_map is None
map_tablenames = map_tablenames or {}
for line in ifile:
line = line.strip()
if not line:
continue
elif line == 'END':
return
elif not line.startswith('TABLE ') or \
not line[6:] in self.tables:
raise SyntaxError('invalid file format')
else:
tablename = line[6:]
tablename = map_tablenames.get(tablename,tablename)
if tablename is not None and tablename in self.tables:
self[tablename].import_from_csv_file(
ifile, id_map, null, unique, id_offset,
*args, **kwargs)
elif tablename is None or ignore_missing_tables:
# skip all non-empty lines
for line in ifile:
if not line.strip():
break
else:
raise RuntimeError("Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)")
def can_join(self):
return self._adapter.can_join()
def DAL_unpickler(db_uid):
return DAL('<zombie>', db_uid=db_uid)
def DAL_pickler(db):
return DAL_unpickler, (db._db_uid,)
copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Implementation of non-blocking server.
The main idea of the server is to receive and send requests
only from the main thread.
The thread poool should be sized for concurrent tasks, not
maximum connections
"""
import threading
import socket
import select
import struct
import logging
logger = logging.getLogger(__name__)
from six.moves import queue
from thrift.transport import TTransport
from thrift.protocol.TBinaryProtocol import TBinaryProtocolFactory
__all__ = ['TNonblockingServer']
class Worker(threading.Thread):
"""Worker is a small helper to process incoming connection."""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
"""Process queries from task queue, stop if processor is None."""
while True:
try:
processor, iprot, oprot, otrans, callback = self.queue.get()
if processor is None:
break
processor.process(iprot, oprot)
callback(True, otrans.getvalue())
except Exception:
logger.exception("Exception while processing request")
callback(False, b'')
WAIT_LEN = 0
WAIT_MESSAGE = 1
WAIT_PROCESS = 2
SEND_ANSWER = 3
CLOSED = 4
def locked(func):
"""Decorator which locks self.lock."""
def nested(self, *args, **kwargs):
self.lock.acquire()
try:
return func(self, *args, **kwargs)
finally:
self.lock.release()
return nested
def socket_exception(func):
"""Decorator close object on socket.error."""
def read(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except socket.error:
self.close()
return read
class Connection(object):
"""Basic class is represented connection.
It can be in state:
WAIT_LEN --- connection is reading request len.
WAIT_MESSAGE --- connection is reading request.
WAIT_PROCESS --- connection has just read whole request and
waits for call ready routine.
SEND_ANSWER --- connection is sending answer string (including length
of answer).
CLOSED --- socket was closed and connection should be deleted.
"""
def __init__(self, new_socket, wake_up):
self.socket = new_socket
self.socket.setblocking(False)
self.status = WAIT_LEN
self.len = 0
self.message = b''
self.lock = threading.Lock()
self.wake_up = wake_up
def _read_len(self):
"""Reads length of request.
It's a safer alternative to self.socket.recv(4)
"""
read = self.socket.recv(4 - len(self.message))
if len(read) == 0:
# if we read 0 bytes and self.message is empty, then
# the client closed the connection
if len(self.message) != 0:
logger.error("can't read frame size from socket")
self.close()
return
self.message += read
if len(self.message) == 4:
self.len, = struct.unpack('!i', self.message)
if self.len < 0:
logger.error("negative frame size, it seems client "
"doesn't use FramedTransport")
self.close()
elif self.len == 0:
logger.error("empty frame, it's really strange")
self.close()
else:
self.message = b''
self.status = WAIT_MESSAGE
@socket_exception
def read(self):
"""Reads data from stream and switch state."""
assert self.status in (WAIT_LEN, WAIT_MESSAGE)
if self.status == WAIT_LEN:
self._read_len()
# go back to the main loop here for simplicity instead of
# falling through, even though there is a good chance that
# the message is already available
elif self.status == WAIT_MESSAGE:
read = self.socket.recv(self.len - len(self.message))
if len(read) == 0:
logger.error("can't read frame from socket (get %d of "
"%d bytes)" % (len(self.message), self.len))
self.close()
return
self.message += read
if len(self.message) == self.len:
self.status = WAIT_PROCESS
@socket_exception
def write(self):
"""Writes data from socket and switch state."""
assert self.status == SEND_ANSWER
sent = self.socket.send(self.message)
if sent == len(self.message):
self.status = WAIT_LEN
self.message = b''
self.len = 0
else:
self.message = self.message[sent:]
@locked
def ready(self, all_ok, message):
"""Callback function for switching state and waking up main thread.
This function is the only function witch can be called asynchronous.
The ready can switch Connection to three states:
WAIT_LEN if request was oneway.
SEND_ANSWER if request was processed in normal way.
CLOSED if request throws unexpected exception.
The one wakes up main thread.
"""
assert self.status == WAIT_PROCESS
if not all_ok:
self.close()
self.wake_up()
return
self.len = 0
if len(message) == 0:
# it was a oneway request, do not write answer
self.message = b''
self.status = WAIT_LEN
else:
self.message = struct.pack('!i', len(message)) + message
self.status = SEND_ANSWER
self.wake_up()
@locked
def is_writeable(self):
"""Return True if connection should be added to write list of select"""
return self.status == SEND_ANSWER
# it's not necessary, but...
@locked
def is_readable(self):
"""Return True if connection should be added to read list of select"""
return self.status in (WAIT_LEN, WAIT_MESSAGE)
@locked
def is_closed(self):
"""Returns True if connection is closed."""
return self.status == CLOSED
def fileno(self):
"""Returns the file descriptor of the associated socket."""
return self.socket.fileno()
def close(self):
"""Closes connection"""
self.status = CLOSED
self.socket.close()
class TNonblockingServer(object):
"""Non-blocking server."""
def __init__(self,
processor,
lsocket,
inputProtocolFactory=None,
outputProtocolFactory=None,
threads=10):
self.processor = processor
self.socket = lsocket
self.in_protocol = inputProtocolFactory or TBinaryProtocolFactory()
self.out_protocol = outputProtocolFactory or self.in_protocol
self.threads = int(threads)
self.clients = {}
self.tasks = queue.Queue()
self._read, self._write = socket.socketpair()
self.prepared = False
self._stop = False
def setNumThreads(self, num):
"""Set the number of worker threads that should be created."""
# implement ThreadPool interface
assert not self.prepared, "Can't change number of threads after start"
self.threads = num
def prepare(self):
"""Prepares server for serve requests."""
if self.prepared:
return
self.socket.listen()
for _ in range(self.threads):
thread = Worker(self.tasks)
thread.setDaemon(True)
thread.start()
self.prepared = True
def wake_up(self):
"""Wake up main thread.
The server usually waits in select call in we should terminate one.
The simplest way is using socketpair.
Select always wait to read from the first socket of socketpair.
In this case, we can just write anything to the second socket from
socketpair.
"""
self._write.send(b'1')
def stop(self):
"""Stop the server.
This method causes the serve() method to return. stop() may be invoked
from within your handler, or from another thread.
After stop() is called, serve() will return but the server will still
be listening on the socket. serve() may then be called again to resume
processing requests. Alternatively, close() may be called after
serve() returns to close the server socket and shutdown all worker
threads.
"""
self._stop = True
self.wake_up()
def _select(self):
"""Does select on open connections."""
readable = [self.socket.handle.fileno(), self._read.fileno()]
writable = []
for i, connection in list(self.clients.items()):
if connection.is_readable():
readable.append(connection.fileno())
if connection.is_writeable():
writable.append(connection.fileno())
if connection.is_closed():
del self.clients[i]
return select.select(readable, writable, readable)
def handle(self):
"""Handle requests.
WARNING! You must call prepare() BEFORE calling handle()
"""
assert self.prepared, "You have to call prepare before handle"
rset, wset, xset = self._select()
for readable in rset:
if readable == self._read.fileno():
# don't care i just need to clean readable flag
self._read.recv(1024)
elif readable == self.socket.handle.fileno():
client = self.socket.accept().handle
self.clients[client.fileno()] = Connection(client,
self.wake_up)
else:
connection = self.clients[readable]
connection.read()
if connection.status == WAIT_PROCESS:
itransport = TTransport.TMemoryBuffer(connection.message)
otransport = TTransport.TMemoryBuffer()
iprot = self.in_protocol.getProtocol(itransport)
oprot = self.out_protocol.getProtocol(otransport)
self.tasks.put([self.processor, iprot, oprot,
otransport, connection.ready])
for writeable in wset:
self.clients[writeable].write()
for oob in xset:
self.clients[oob].close()
del self.clients[oob]
def close(self):
"""Closes the server."""
for _ in range(self.threads):
self.tasks.put([None, None, None, None, None])
self.socket.close()
self.prepared = False
def serve(self):
"""Serve requests.
Serve requests forever, or until stop() is called.
"""
self._stop = False
self.prepare()
while not self._stop:
self.handle()
|
|
"""A Script that tags your movie files.
Run the script in a folder containing the mp4/mkv movie files with their
filename as the movie's title.
This script might seem a little messy and ugly and I know maybe there is
better and effecient way to do some of the tasks.
but I am unaware of them at the moment and am a begginer in Python and
this is my first, or maybe second python script.
"""
import os
import subprocess
import urllib
import shlex
import linecache
import sys
from json import JSONDecoder
import tmdbsimple as tmdb
from imdbpie import Imdb
from mutagen.mp4 import MP4, MP4Cover
# The following subtitle codecs are ingored if found in the file as they are
# not supported by the mp4 container. These are mainly picture-based subtitles
sub_codec_blacklist = ("dvdsub", "dvd_subtitle", "pgssub", "hdmv_pgs_subtitle")
def collect_stream_metadata(filename):
"""
Returns a list of streams' metadata present in the media file passed as
the argument (filename)
"""
command = 'ffprobe -i "{}" -show_streams -of json'.format(filename)
args = shlex.split(command)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
json_data = JSONDecoder().decode(out)
return json_data
def PrintException():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
fname = f.f_code.co_filename
linecache.checkcache(fname)
line = linecache.getline(fname, lineno, f.f_globals)
print ('\nEXCEPTION IN ({}, LINE {} "{}"): {}'.format(fname,
lineno,
line.strip(),
exc_obj))
# Setting the API key for usage of TMDB API
tmdb.API_KEY = 'b888b64c9155c26ade5659ea4dd60e64'
def collect_files(file_type):
"""
returns a list of files in the current directory that are of
the extension passed as string\n
eg: collect_files('txt') would return a list of all txt files
"""
filenames = []
for filename in os.listdir(os.getcwd()):
if filename.endswith(file_type):
filenames.append(filename)
return filenames
def get_common_files(mediafile_list, srtfile_list):
"""
returns a list of filenames that are common in mediafile_list and
strfile_list. \n
While getting common filenames it ignores the extension.\n
Also the returned list will have the same file extension the mediafile_list
files have
"""
media_filenames = [i[:-4] for i in mediafile_list]
subtitle_filenames = [i[:-4] for i in srtfile_list]
media_type = mediafile_list[0][-4:]
media_set = set(media_filenames)
srt_set = set(subtitle_filenames)
common_files = list(media_set & srt_set)
common_files = [i + media_type for i in common_files]
common_files.sort()
return common_files
def remove_common_files(list1, list2):
"""
returns a subset of list1 that has common elements removed that were found
in both lists
or in other words - returns a subset of list1 that has elements unique to
only list1
"""
# results in a list of values that are unique to list1
new_list1 = list(set(list1) - set(list2))
new_list1.sort()
return new_list1
def start_process(filenames, mode):
"""
This is the main funtion of the script
where it does its main processing.\n
filenames is the list of files to be processed\n
mode = 1,2,3 or 4\n
1 means mp4 to tagged mp4\n
2 means mp4 with sub to subbed and tagged mp4\n
3 means mkv to tagged mp4\n
4 means mkv with sub to subbed and tagged mp4
"""
for filename in filenames:
try:
title = filename[:-4]
stream_md = collect_stream_metadata(filename)
streams_to_process = []
dvdsub_exists=False
for stream in stream_md['streams']:
if not stream['codec_name'] in sub_codec_blacklist:
streams_to_process.append(stream['index'])
else:
dvdsub_exists=True
print('\nSearching IMDb for "{}"'.format(title))
imdb = Imdb()
movie_results = []
results = imdb.search_for_title(title)
for result in results:
if result['type'] == "feature":
movie_results.append(result)
if not movie_results:
while not movie_results:
title = input('\nNo results for "' + title +
'" Enter alternate/correct movie title >> ')
results = imdb.search_for_title(title)
for result in results:
if result['type'] == "feature":
movie_results.append(result)
# The most prominent result is the first one
# mpr - Most Prominent Result
mpr = movie_results[0]
print('\nFetching data for {} ({})'.format(mpr['title'],
mpr['year']))
# imdb_movie is a dict of info about the movie
imdb_movie = imdb.get_title(mpr['imdb_id'])
imdb_movie_title = imdb_movie['base']['title']
imdb_movie_year = imdb_movie['base']['year']
imdb_movie_id = mpr['imdb_id']
imdb_movie_rating = imdb_movie['ratings']['rating']
if not 'outline' in imdb_movie['plot']:
imdb_movie_plot_outline = (imdb_movie['plot']['summaries'][0]
['text'])
print("\nPlot outline does not exist. Fetching plot summary "
"instead.\n\n")
else:
imdb_movie_plot_outline = imdb_movie['plot']['outline']['text']
# Composing a string to have the rating and the plot of the
# movie which will go into the 'comment' metadata of the
# mp4 file.
imdb_rating_and_plot = str('IMDb rating ['
+ str(float(imdb_movie_rating))
+ '/10] - '
+ imdb_movie_plot_outline)
imdb_movie_genres = imdb.get_title_genres(imdb_movie_id)['genres']
# Composing the 'genre' string of the movie.
# I use ';' as a delimeter to searate the multiple genre values
genre = ';'.join(imdb_movie_genres)
newfilename = (imdb_movie_title
+ ' ('
+ str(imdb_movie_year)
+ ').mp4')
# We don't want the characters not allowed in a filename
newfilename = (newfilename
.replace(':', ' -')
.replace('/', ' ')
.replace('?', ''))
command = ""
stream_map = []
for f in streams_to_process:
stream_map.append("-map 0:{}".format(f))
stream_map_str = ' '.join(stream_map)
if mode == 1:
# it is required to rename it as its already an mp4 file that
# wasn't proccessed by ffmpeg
os.rename(filename, newfilename)
if mode == 2 or mode == 4:
command = ('ffmpeg -i "'
+ filename
+ '" -sub_charenc UTF-8 -i "'
+ filename[:-4]
+ '.srt" '
+ stream_map_str
+ ' -map 1 -c copy -c:s mov_text '
'"' + newfilename + '"')
subprocess.run(shlex.split(command))
if mode == 3:
command = ('ffmpeg -i '
+ '"' + filename + '" '
+ stream_map_str
+ ' -c copy -c:s mov_text '
'"' + newfilename + '"')
subprocess.run(shlex.split(command))
if dvdsub_exists:
print("\nRemoved DVD Subtitles due to uncompatibility with "
"mp4 file format")
# The poster is fetched from tmdb only if there is no file
# named " filename + '.jpg' " in the working directory
# this way user can provide their own poster image to be used
poster_filename = filename[:-4] + '.jpg'
if not os.path.isfile(poster_filename):
print('\nFetching the movie poster...')
tmdb_find = tmdb.Find(imdb_movie_id)
tmdb_find.info(external_source = 'imdb_id')
path = tmdb_find.movie_results[0]['poster_path']
complete_path = r'https://image.tmdb.org/t/p/w780' + path
uo = urllib.request.urlopen(complete_path)
with open(poster_filename, "wb") as poster_file:
poster_file.write(uo.read())
poster_file.close()
video = MP4(newfilename)
with open(poster_filename, "rb") as f:
video["covr"] = [MP4Cover(
f.read(),
imageformat=MP4Cover.FORMAT_JPEG)]
video['\xa9day'] = str(imdb_movie_year)
video['\xa9nam'] = imdb_movie_title
video['\xa9cmt'] = imdb_rating_and_plot
video['\xa9gen'] = genre
print('\nAdding poster and tagging file...')
try:
video.save()
# I have encounterd this error in pevious version
# of script, now I handle it by removing the metadata
# of the file. That seems to solve the probelem
except OverflowError:
remove_meta_command = ('ffmpeg -i "' + newfilename
+ '" -codec copy -map_metadata -1 "'
+ newfilename[:-4] + 'new.mp4"')
subprocess.run(shlex.split(remove_meta_command))
video_new = MP4(newfilename[:-4] + 'new.mp4')
with open(poster_filename, "rb") as f:
video_new["covr"] = [MP4Cover(
f.read(),
imageformat=MP4Cover.FORMAT_JPEG)]
video_new['\xa9day'] = str(imdb_movie_year)
video_new['\xa9nam'] = imdb_movie_title
video_new['\xa9cmt'] = imdb_rating_and_plot
video_new['\xa9gen'] = genre
print('\nAdding poster and tagging file...')
try:
video_new.save()
if not os.path.exists('auto fixed files'):
os.makedirs('auto fixed files')
os.rename(newfilename[:-4]
+ 'new.mp4', 'auto fixed files\\'
+ newfilename[:-4] + '.mp4')
os.remove(newfilename)
except OverflowError:
errored_files.append(filename
+ (' - Could not save even after'
'striping metadata'))
continue
os.remove(poster_filename)
print('\n' + filename
+ (' was proccesed successfuly!\n\n===================='
'======================================'))
except Exception as e:
print('\nSome error occured while processing '
+ filename
+ '\n\n====================================================')
errored_files.append(filename + ' - ' + str(e))
PrintException()
mp4_filenames = []
mkv_filenames = []
srt_filenames = []
mp4_with_srt_filenames = []
mkv_with_srt_filenames = []
errored_files = []
mp4_filenames = collect_files('mp4')
mkv_filenames = collect_files('mkv')
srt_filenames = collect_files('srt')
# We check whether there are mp4 files and if yes, are there any
# srt files? if yes, then get the mp4 files that have srts associated with them
# then if there are mp4 files that have srt files associated with them then
# remove the others as they are to be proccessed separately
if not len(mp4_filenames) == 0:
if not len(srt_filenames) == 0:
mp4_with_srt_filenames = get_common_files(mp4_filenames,
srt_filenames)
if not len(mp4_with_srt_filenames) == 0:
mp4_filenames = remove_common_files(mp4_filenames,
mp4_with_srt_filenames)
if not len(mkv_filenames) == 0:
if not len(srt_filenames) == 0:
mkv_with_srt_filenames = get_common_files(mkv_filenames, srt_filenames)
if not len(mkv_with_srt_filenames) == 0:
mkv_filenames = remove_common_files(mkv_filenames,
mkv_with_srt_filenames)
# This is where the main process of conversion takes place.
# We simply check the file lists are not empty and then execute the main task
# depending on what type it is according to mode in the funtion "start_process"
if not len(mp4_filenames) == 0:
start_process(mp4_filenames, 1)
if not len(mp4_with_srt_filenames) == 0:
start_process(mp4_with_srt_filenames, 2)
if not len(mkv_filenames) == 0:
start_process(mkv_filenames, 3)
if not len(mkv_with_srt_filenames) == 0:
start_process(mkv_with_srt_filenames, 4)
if (len(mp4_filenames) == 0 and len(mkv_filenames) == 0
and len(mp4_with_srt_filenames) == 0
and len(mkv_with_srt_filenames) == 0):
print('There were no MP4 or MKV files found in the directory')
else:
# Checks if there were any files that caused the Overflow Error,
# if yes then prints them out.
if len(errored_files) == 0:
print('\n\n\nAll files proccessed successfuly!')
else:
print('\n\n\nThe files that were not proccessed: \n')
for er in errored_files:
print(er)
|
|
"""Chalice deployer module.
The deployment system in chalice is broken down into a pipeline of multiple
stages. Each stage takes the input and transforms it to some other form. The
reason for this is so that each stage can stay simple and focused on only a
single part of the deployment process. This makes the code easier to follow
and easier to test. The biggest downside is that adding support for a new
resource type is split across several objects now, but I imagine as we add
support for more resource types, we'll see common patterns emerge that we can
extract out into higher levels of abstraction.
These are the stages of the deployment process.
Application Graph Builder
=========================
The first stage is the resource graph builder. This takes the objects in the
``Chalice`` app and structures them into an ``Application`` object which
consists of various ``models.Model`` objects. These models are just python
objects that describe the attributes of various AWS resources. These
models don't have any behavior on their own.
Dependency Builder
==================
This process takes the graph of resources created from the previous step and
orders them such that all objects are listed before objects that depend on
them. The AWS resources in the ``chalice.deploy.models`` module also model
their required dependencies (see the ``dependencies()`` methods of the models).
This is the mechanism that's used to build the correct dependency ordering.
Local Build Stage
=================
This takes the ordered list of resources and allows any local build processes
to occur. The rule of thumb here is no remote AWS calls. This stage includes
auto policy generation, pip packaging, injecting default values, etc. To
clarify which attributes are affected by the build stage, they'll usually have
a value of ``models.Placeholder.BUILD_STAGE``. Processors in the build stage
will replaced those ``models.Placeholder.BUILD_STAGE`` values with whatever the
"built" value is (e.g the filename of the zipped deployment package).
For example, we know when we create a lambda function that we need to create a
deployment package, but we don't know the name nor contents of the deployment
package until the ``LambdaDeploymentPackager`` runs. Therefore, the Resource
Builder stage can record the fact that it knows that a
``models.DeploymentPackage`` is needed, but use
``models.Placeholder.BUILD_STAGE`` for the value of the filename. The enum
values aren't strictly necessary, they just add clarity about when this value
is expected to be filled in. These could also just be set to ``None`` and be
of type ``Optional[T]``.
Execution Plan Stage
====================
This stage takes the ordered list of resources and figures out what AWS API
calls we have to make. For example, if a resource doesn't exist at all, we'll
need to make a ``create_*`` call. If the resource exists, we may need to make
a series of ``update_*`` calls. If the resource exists and is already up to
date, we might not need to make any calls at all. The output of this stage is
a list of ``APICall`` objects. This stage doesn't actually make the mutating
API calls, it only figures out what calls we should make. This stage will
typically only make ``describe/list`` AWS calls.
The Executor
============
This takes the list of ``APICall`` objects from the previous stage and finally
executes them. It also manages taking the output of API calls and storing them
in variables so they can be referenced in subsequent ``APICall`` objects (see
the ``Variable`` class to see how this is used). For example, if a lambda
function needs the ``role_arn`` that's the result of a previous ``create_role``
API call, a ``Variable`` object is used to forward this information.
The executor also records these variables with their associated resources so a
``deployed.json`` file can be written to disk afterwards. An ``APICall``
takes an optional resource object when it's created whose ``resource_name``
is used as the key in the ``deployed.json`` dictionary.
"""
# pylint: disable=too-many-lines
import json
import textwrap
import socket
import logging
import botocore.exceptions
from botocore.vendored.requests import ConnectionError as \
RequestsConnectionError
from botocore.session import Session # noqa
from typing import Optional, Dict, List, Any, Type # noqa
from chalice.config import Config # noqa
from chalice.compat import is_broken_pipe_error
from chalice.awsclient import DeploymentPackageTooLargeError
from chalice.awsclient import LambdaClientError
from chalice.awsclient import AWSClientError
from chalice.awsclient import TypedAWSClient
from chalice.constants import MAX_LAMBDA_DEPLOYMENT_SIZE
from chalice.constants import VPC_ATTACH_POLICY
from chalice.constants import DEFAULT_LAMBDA_TIMEOUT
from chalice.constants import DEFAULT_LAMBDA_MEMORY_SIZE
from chalice.constants import SQS_EVENT_SOURCE_POLICY
from chalice.constants import POST_TO_WEBSOCKET_CONNECTION_POLICY
from chalice.deploy import models
from chalice.deploy.appgraph import ApplicationGraphBuilder, DependencyBuilder
from chalice.deploy.executor import BaseExecutor # noqa
from chalice.deploy.executor import Executor
from chalice.deploy.executor import DisplayOnlyExecutor
from chalice.deploy.packager import PipRunner
from chalice.deploy.packager import SubprocessPip
from chalice.deploy.packager import DependencyBuilder as PipDependencyBuilder
from chalice.deploy.packager import LambdaDeploymentPackager
from chalice.deploy.planner import PlanStage
from chalice.deploy.planner import RemoteState
from chalice.deploy.planner import NoopPlanner
from chalice.deploy.swagger import TemplatedSwaggerGenerator
from chalice.deploy.swagger import SwaggerGenerator # noqa
from chalice.deploy.sweeper import ResourceSweeper
from chalice.deploy.validate import validate_configuration
from chalice.policy import AppPolicyGenerator
from chalice.utils import OSUtils
from chalice.utils import UI
from chalice.utils import serialize_to_json
OptStr = Optional[str]
LOGGER = logging.getLogger(__name__)
_AWSCLIENT_EXCEPTIONS = (
botocore.exceptions.ClientError, AWSClientError
)
class ChaliceDeploymentError(Exception):
def __init__(self, error):
# type: (Exception) -> None
self.original_error = error
where = self._get_error_location(error)
msg = self._wrap_text(
'ERROR - %s, received the following error:' % where
)
msg += '\n\n'
msg += self._wrap_text(self._get_error_message(error), indent=' ')
msg += '\n\n'
suggestion = self._get_error_suggestion(error)
if suggestion is not None:
msg += self._wrap_text(suggestion)
super(ChaliceDeploymentError, self).__init__(msg)
def _get_error_location(self, error):
# type: (Exception) -> str
where = 'While deploying your chalice application'
if isinstance(error, LambdaClientError):
where = (
'While sending your chalice handler code to Lambda to %s '
'function "%s"' % (
self._get_verb_from_client_method(
error.context.client_method_name),
error.context.function_name
)
)
return where
def _get_error_message(self, error):
# type: (Exception) -> str
msg = str(error)
if isinstance(error, LambdaClientError):
if isinstance(error.original_error, RequestsConnectionError):
msg = self._get_error_message_for_connection_error(
error.original_error)
return msg
def _get_error_message_for_connection_error(self, connection_error):
# type: (RequestsConnectionError) -> str
# To get the underlying error that raised the
# requests.ConnectionError it is required to go down two levels of
# arguments to get the underlying exception. The instantiation of
# one of these exceptions looks like this:
#
# requests.ConnectionError(
# urllib3.exceptions.ProtocolError(
# 'Connection aborted.', <SomeException>)
# )
message = connection_error.args[0].args[0]
underlying_error = connection_error.args[0].args[1]
if is_broken_pipe_error(underlying_error):
message += (
' Lambda closed the connection before chalice finished '
'sending all of the data.'
)
elif isinstance(underlying_error, socket.timeout):
message += ' Timed out sending your app to Lambda.'
return message
def _get_error_suggestion(self, error):
# type: (Exception) -> OptStr
suggestion = None
if isinstance(error, DeploymentPackageTooLargeError):
suggestion = (
'To avoid this error, decrease the size of your chalice '
'application by removing code or removing '
'dependencies from your chalice application.'
)
deployment_size = error.context.deployment_size
if deployment_size > MAX_LAMBDA_DEPLOYMENT_SIZE:
size_warning = (
'This is likely because the deployment package is %s. '
'Lambda only allows deployment packages that are %s or '
'less in size.' % (
self._get_mb(deployment_size),
self._get_mb(MAX_LAMBDA_DEPLOYMENT_SIZE)
)
)
suggestion = size_warning + ' ' + suggestion
return suggestion
def _wrap_text(self, text, indent=''):
# type: (str, str) -> str
return '\n'.join(
textwrap.wrap(
text, 79, replace_whitespace=False, drop_whitespace=False,
initial_indent=indent, subsequent_indent=indent
)
)
def _get_verb_from_client_method(self, client_method_name):
# type: (str) -> str
client_method_name_to_verb = {
'update_function_code': 'update',
'create_function': 'create'
}
return client_method_name_to_verb.get(
client_method_name, client_method_name)
def _get_mb(self, value):
# type: (int) -> str
return '%.1f MB' % (float(value) / (1024 ** 2))
def create_plan_only_deployer(session, config, ui):
# type: (Session, Config, UI) -> Deployer
return _create_deployer(session, config, ui, DisplayOnlyExecutor,
NoopResultsRecorder)
def create_default_deployer(session, config, ui):
# type: (Session, Config, UI) -> Deployer
return _create_deployer(session, config, ui, Executor, ResultsRecorder)
def _create_deployer(session, # type: Session
config, # type: Config
ui, # type: UI
executor_cls, # type: Type[BaseExecutor]
recorder_cls, # type: Type[ResultsRecorder]
):
# type: (...) -> Deployer
client = TypedAWSClient(session)
osutils = OSUtils()
return Deployer(
application_builder=ApplicationGraphBuilder(),
deps_builder=DependencyBuilder(),
build_stage=create_build_stage(
osutils, UI(), TemplatedSwaggerGenerator(),
),
plan_stage=PlanStage(
osutils=osutils, remote_state=RemoteState(
client, config.deployed_resources(config.chalice_stage)),
),
sweeper=ResourceSweeper(),
executor=executor_cls(client, ui),
recorder=recorder_cls(osutils=osutils),
)
def create_build_stage(osutils, ui, swagger_gen):
# type: (OSUtils, UI, SwaggerGenerator) -> BuildStage
pip_runner = PipRunner(pip=SubprocessPip(osutils=osutils),
osutils=osutils)
dependency_builder = PipDependencyBuilder(
osutils=osutils,
pip_runner=pip_runner
)
build_stage = BuildStage(
steps=[
InjectDefaults(),
DeploymentPackager(
packager=LambdaDeploymentPackager(
osutils=osutils,
dependency_builder=dependency_builder,
ui=ui,
),
),
PolicyGenerator(
policy_gen=AppPolicyGenerator(
osutils=osutils
),
osutils=osutils,
),
SwaggerBuilder(
swagger_generator=swagger_gen,
),
LambdaEventSourcePolicyInjector(),
WebsocketPolicyInjector()
],
)
return build_stage
def create_deletion_deployer(client, ui):
# type: (TypedAWSClient, UI) -> Deployer
return Deployer(
application_builder=ApplicationGraphBuilder(),
deps_builder=DependencyBuilder(),
build_stage=BuildStage(steps=[]),
plan_stage=NoopPlanner(),
sweeper=ResourceSweeper(),
executor=Executor(client, ui),
recorder=ResultsRecorder(osutils=OSUtils()),
)
class Deployer(object):
BACKEND_NAME = 'api'
def __init__(self,
application_builder, # type: ApplicationGraphBuilder
deps_builder, # type: DependencyBuilder
build_stage, # type: BuildStage
plan_stage, # type: PlanStage
sweeper, # type: ResourceSweeper
executor, # type: BaseExecutor
recorder, # type: ResultsRecorder
):
# type: (...) -> None
self._application_builder = application_builder
self._deps_builder = deps_builder
self._build_stage = build_stage
self._plan_stage = plan_stage
self._sweeper = sweeper
self._executor = executor
self._recorder = recorder
def deploy(self, config, chalice_stage_name):
# type: (Config, str) -> Dict[str, Any]
try:
return self._deploy(config, chalice_stage_name)
except _AWSCLIENT_EXCEPTIONS as e:
raise ChaliceDeploymentError(e)
def _deploy(self, config, chalice_stage_name):
# type: (Config, str) -> Dict[str, Any]
self._validate_config(config)
application = self._application_builder.build(
config, chalice_stage_name)
resources = self._deps_builder.build_dependencies(application)
self._build_stage.execute(config, resources)
plan = self._plan_stage.execute(resources)
self._sweeper.execute(plan, config)
self._executor.execute(plan)
deployed_values = {
'resources': self._executor.resource_values,
'schema_version': '2.0',
'backend': self.BACKEND_NAME,
}
self._recorder.record_results(
deployed_values,
chalice_stage_name,
config.project_dir,
)
return deployed_values
def _validate_config(self, config):
# type: (Config) -> None
try:
validate_configuration(config)
except ValueError as e:
raise ChaliceDeploymentError(e)
class BaseDeployStep(object):
def handle(self, config, resource):
# type: (Config, models.Model) -> None
name = 'handle_%s' % resource.__class__.__name__.lower()
handler = getattr(self, name, None)
if handler is not None:
handler(config, resource)
class InjectDefaults(BaseDeployStep):
def __init__(self, lambda_timeout=DEFAULT_LAMBDA_TIMEOUT,
lambda_memory_size=DEFAULT_LAMBDA_MEMORY_SIZE):
# type: (int, int) -> None
self._lambda_timeout = lambda_timeout
self._lambda_memory_size = lambda_memory_size
def handle_lambdafunction(self, config, resource):
# type: (Config, models.LambdaFunction) -> None
if resource.timeout is None:
resource.timeout = self._lambda_timeout
if resource.memory_size is None:
resource.memory_size = self._lambda_memory_size
class DeploymentPackager(BaseDeployStep):
def __init__(self, packager):
# type: (LambdaDeploymentPackager) -> None
self._packager = packager
def handle_deploymentpackage(self, config, resource):
# type: (Config, models.DeploymentPackage) -> None
if isinstance(resource.filename, models.Placeholder):
zip_filename = self._packager.create_deployment_package(
config.project_dir, config.lambda_python_version)
resource.filename = zip_filename
class SwaggerBuilder(BaseDeployStep):
def __init__(self, swagger_generator):
# type: (SwaggerGenerator) -> None
self._swagger_generator = swagger_generator
def handle_restapi(self, config, resource):
# type: (Config, models.RestAPI) -> None
swagger_doc = self._swagger_generator.generate_swagger(
config.chalice_app, resource)
resource.swagger_doc = swagger_doc
class LambdaEventSourcePolicyInjector(BaseDeployStep):
def __init__(self):
# type: () -> None
self._policy_injected = False
def handle_sqseventsource(self, config, resource):
# type: (Config, models.SQSEventSource) -> None
# The sqs integration works by polling for
# available records so the lambda function needs
# permission to call sqs.
role = resource.lambda_function.role
if (not self._policy_injected and
isinstance(role, models.ManagedIAMRole) and
isinstance(role.policy, models.AutoGenIAMPolicy) and
not isinstance(role.policy.document,
models.Placeholder)):
self._inject_trigger_policy(role.policy.document,
SQS_EVENT_SOURCE_POLICY.copy())
self._policy_injected = True
def _inject_trigger_policy(self, document, policy):
# type: (Dict[str, Any], Dict[str, Any]) -> None
document['Statement'].append(policy)
class WebsocketPolicyInjector(BaseDeployStep):
def __init__(self):
# type: () -> None
self._policy_injected = False
def handle_websocketapi(self, config, resource):
# type: (Config, models.WebsocketAPI) -> None
self._inject_into_function(config, resource.connect_function)
self._inject_into_function(config, resource.message_function)
self._inject_into_function(config, resource.disconnect_function)
def _inject_into_function(self, config, lambda_function):
# type: (Config, Optional[models.LambdaFunction]) -> None
if lambda_function is None:
return
role = lambda_function.role
if role is None:
return
if (not self._policy_injected and
isinstance(role, models.ManagedIAMRole) and
isinstance(role.policy, models.AutoGenIAMPolicy) and
not isinstance(role.policy.document,
models.Placeholder)):
self._inject_policy(
role.policy.document,
POST_TO_WEBSOCKET_CONNECTION_POLICY.copy())
self._policy_injected = True
def _inject_policy(self, document, policy):
# type: (Dict[str, Any], Dict[str, Any]) -> None
document['Statement'].append(policy)
class PolicyGenerator(BaseDeployStep):
def __init__(self, policy_gen, osutils):
# type: (AppPolicyGenerator, OSUtils) -> None
self._policy_gen = policy_gen
self._osutils = osutils
def _read_document_from_file(self, filename):
# type: (PolicyGenerator, str) -> Dict[str, Any]
try:
return json.loads(self._osutils.get_file_contents(filename))
except IOError as e:
raise RuntimeError("Unable to load IAM policy file %s: %s"
% (filename, e))
def handle_filebasediampolicy(self, config, resource):
# type: (Config, models.FileBasedIAMPolicy) -> None
resource.document = self._read_document_from_file(resource.filename)
def handle_restapi(self, config, resource):
# type: (Config, models.RestAPI) -> None
if resource.policy and isinstance(
resource.policy, models.FileBasedIAMPolicy):
resource.policy.document = self._read_document_from_file(
resource.policy.filename)
def handle_autogeniampolicy(self, config, resource):
# type: (Config, models.AutoGenIAMPolicy) -> None
if isinstance(resource.document, models.Placeholder):
policy = self._policy_gen.generate_policy(config)
if models.RoleTraits.VPC_NEEDED in resource.traits:
policy['Statement'].append(VPC_ATTACH_POLICY)
resource.document = policy
class BuildStage(object):
def __init__(self, steps):
# type: (List[BaseDeployStep]) -> None
self._steps = steps
def execute(self, config, resources):
# type: (Config, List[models.Model]) -> None
for resource in resources:
for step in self._steps:
step.handle(config, resource)
class ResultsRecorder(object):
def __init__(self, osutils):
# type: (OSUtils) -> None
self._osutils = osutils
def record_results(self, results, chalice_stage_name, project_dir):
# type: (Any, str, str) -> None
deployed_dir = self._osutils.joinpath(
project_dir, '.chalice', 'deployed')
deployed_filename = self._osutils.joinpath(
deployed_dir, '%s.json' % chalice_stage_name)
if not self._osutils.directory_exists(deployed_dir):
self._osutils.makedirs(deployed_dir)
serialized = serialize_to_json(results)
self._osutils.set_file_contents(
filename=deployed_filename,
contents=serialized,
binary=False
)
class NoopResultsRecorder(ResultsRecorder):
def record_results(self, results, chalice_stage_name, project_dir):
# type: (Any, str, str) -> None
return None
class DeploymentReporter(object):
# We want the API URLs to be displayed last.
_SORT_ORDER = {
'rest_api': 100,
'websocket_api': 100,
}
# The default is chosen to sort before the rest_api
_DEFAULT_ORDERING = 50
def __init__(self, ui):
# type: (UI) -> None
self._ui = ui
def generate_report(self, deployed_values):
# type: (Dict[str, Any]) -> str
report = [
'Resources deployed:',
]
ordered = sorted(
deployed_values['resources'],
key=lambda x: self._SORT_ORDER.get(x['resource_type'],
self._DEFAULT_ORDERING))
for resource in ordered:
getattr(self, '_report_%s' % resource['resource_type'],
self._default_report)(resource, report)
report.append('')
return '\n'.join(report)
def _report_rest_api(self, resource, report):
# type: (Dict[str, Any], List[str]) -> None
report.append(' - Rest API URL: %s' % resource['rest_api_url'])
def _report_websocket_api(self, resource, report):
# type: (Dict[str, Any], List[str]) -> None
report.append(
' - Websocket API URL: %s' % resource['websocket_api_url'])
def _report_lambda_function(self, resource, report):
# type: (Dict[str, Any], List[str]) -> None
report.append(' - Lambda ARN: %s' % resource['lambda_arn'])
def _default_report(self, resource, report):
# type: (Dict[str, Any], List[str]) -> None
# The default behavior is to not report a resource. This
# cuts down on the output verbosity.
pass
def display_report(self, deployed_values):
# type: (Dict[str, Any]) -> None
report = self.generate_report(deployed_values)
self._ui.write(report)
|
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import os
import re
import shlex
from ..outproc import base as outproc
from ..local import command
from ..local import statusfile
from ..local import utils
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
# Patterns for additional resource files on Android. Files that are not covered
# by one of the other patterns below will be specified in the resources section.
RESOURCES_PATTERN = re.compile(r"//\s+Resources:(.*)")
# Pattern to auto-detect files to push on Android for statements like:
# load("path/to/file.js")
LOAD_PATTERN = re.compile(
r"(?:load|readbuffer|read)\((?:'|\")([^'\"]+)(?:'|\")\)")
# Pattern to auto-detect files to push on Android for statements like:
# import "path/to/file.js"
MODULE_RESOURCES_PATTERN_1 = re.compile(
r"(?:import|export)(?:\(| )(?:'|\")([^'\"]+)(?:'|\")")
# Pattern to auto-detect files to push on Android for statements like:
# import foobar from "path/to/file.js"
MODULE_RESOURCES_PATTERN_2 = re.compile(
r"(?:import|export).*from (?:'|\")([^'\"]+)(?:'|\")")
TIMEOUT_LONG = "long"
try:
cmp # Python 2
except NameError:
def cmp(x, y): # Python 3
return (x > y) - (x < y)
class TestCase(object):
def __init__(self, suite, path, name, test_config):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.name = name # string that identifies test in the status file
self.variant = None # name of the used testing variant
self.variant_flags = [] # list of strings, flags specific to this test
# Fields used by the test processors.
self.origin = None # Test that this test is subtest of.
self.processor = None # Processor that created this subtest.
self.procid = '%s/%s' % (self.suite.name, self.name) # unique id
self.keep_output = False # Can output of this test be dropped
# Test config contains information needed to build the command.
self._test_config = test_config
self._random_seed = None # Overrides test config value if not None
# Outcomes
self._statusfile_outcomes = None
self.expected_outcomes = None
self._statusfile_flags = None
self._prepare_outcomes()
def create_subtest(self, processor, subtest_id, variant=None, flags=None,
keep_output=False, random_seed=None):
subtest = copy.copy(self)
subtest.origin = self
subtest.processor = processor
subtest.procid += '.%s' % subtest_id
subtest.keep_output |= keep_output
if random_seed:
subtest._random_seed = random_seed
if flags:
subtest.variant_flags = subtest.variant_flags + flags
if variant is not None:
assert self.variant is None
subtest.variant = variant
subtest._prepare_outcomes()
return subtest
def _prepare_outcomes(self, force_update=True):
if force_update or self._statusfile_outcomes is None:
def is_flag(outcome):
return outcome.startswith('--')
def not_flag(outcome):
return not is_flag(outcome)
outcomes = self.suite.statusfile.get_outcomes(self.name, self.variant)
self._statusfile_outcomes = filter(not_flag, outcomes)
self._statusfile_flags = filter(is_flag, outcomes)
self.expected_outcomes = (
self._parse_status_file_outcomes(self._statusfile_outcomes))
def _parse_status_file_outcomes(self, outcomes):
if (statusfile.FAIL_SLOPPY in outcomes and
'--use-strict' not in self.variant_flags):
return outproc.OUTCOMES_FAIL
expected_outcomes = []
if (statusfile.FAIL in outcomes or
statusfile.FAIL_OK in outcomes):
expected_outcomes.append(statusfile.FAIL)
if statusfile.CRASH in outcomes:
expected_outcomes.append(statusfile.CRASH)
# Do not add PASS if there is nothing else. Empty outcomes are converted to
# the global [PASS].
if expected_outcomes and statusfile.PASS in outcomes:
expected_outcomes.append(statusfile.PASS)
# Avoid creating multiple instances of a list with a single FAIL.
if expected_outcomes == outproc.OUTCOMES_FAIL:
return outproc.OUTCOMES_FAIL
return expected_outcomes or outproc.OUTCOMES_PASS
@property
def do_skip(self):
return (statusfile.SKIP in self._statusfile_outcomes and
not self.suite.test_config.run_skipped)
@property
def is_slow(self):
return statusfile.SLOW in self._statusfile_outcomes
@property
def is_fail_ok(self):
return statusfile.FAIL_OK in self._statusfile_outcomes
@property
def is_pass_or_fail(self):
return (statusfile.PASS in self._statusfile_outcomes and
statusfile.FAIL in self._statusfile_outcomes and
statusfile.CRASH not in self._statusfile_outcomes)
@property
def only_standard_variant(self):
return statusfile.NO_VARIANTS in self._statusfile_outcomes
def get_command(self):
params = self._get_cmd_params()
env = self._get_cmd_env()
shell = self.get_shell()
if utils.IsWindows():
shell += '.exe'
shell_flags = self._get_shell_flags()
timeout = self._get_timeout(params)
return self._create_cmd(shell, shell_flags + params, env, timeout)
def _get_cmd_params(self):
"""Gets command parameters and combines them in the following order:
- files [empty by default]
- random seed
- extra flags (from command line)
- user flags (variant/fuzzer flags)
- mode flags (based on chosen mode)
- source flags (from source code) [empty by default]
- test-suite flags
- statusfile flags
The best way to modify how parameters are created is to only override
methods for getting partial parameters.
"""
return (
self._get_files_params() +
self._get_random_seed_flags() +
self._get_extra_flags() +
self._get_variant_flags() +
self._get_mode_flags() +
self._get_source_flags() +
self._get_suite_flags() +
self._get_statusfile_flags()
)
def _get_cmd_env(self):
return {}
def _get_files_params(self):
return []
def _get_timeout_param(self):
return None
def _get_random_seed_flags(self):
return ['--random-seed=%d' % self.random_seed]
@property
def random_seed(self):
return self._random_seed or self._test_config.random_seed
def _get_extra_flags(self):
return self._test_config.extra_flags
def _get_variant_flags(self):
return self.variant_flags
def _get_statusfile_flags(self):
"""Gets runtime flags from a status file.
Every outcome that starts with "--" is a flag.
"""
return self._statusfile_flags
def _get_mode_flags(self):
return self._test_config.mode_flags
def _get_source_flags(self):
return []
def _get_suite_flags(self):
return []
def _get_shell_flags(self):
return []
def _get_timeout(self, params):
timeout = self._test_config.timeout
if "--stress-opt" in params:
timeout *= 4
if "--jitless" in params:
timeout *= 2
if "--no-opt" in params:
timeout *= 2
if "--noenable-vfp3" in params:
timeout *= 2
if self._get_timeout_param() == TIMEOUT_LONG:
timeout *= 10
if self.is_slow:
timeout *= 4
return timeout
def get_shell(self):
raise NotImplementedError()
def _get_suffix(self):
return '.js'
def _create_cmd(self, shell, params, env, timeout):
return command.Command(
cmd_prefix=self._test_config.command_prefix,
shell=os.path.abspath(os.path.join(self._test_config.shell_dir, shell)),
args=params,
env=env,
timeout=timeout,
verbose=self._test_config.verbose,
resources_func=self._get_resources,
)
def _parse_source_flags(self, source=None):
source = source or self.get_source()
flags = []
for match in re.findall(FLAGS_PATTERN, source):
flags += shlex.split(match.strip())
return flags
def is_source_available(self):
return self._get_source_path() is not None
def get_source(self):
with open(self._get_source_path()) as f:
return f.read()
def _get_source_path(self):
return None
def _get_resources(self):
"""Returns a list of absolute paths with additional files needed by the
test case.
Used to push additional files to Android devices.
"""
return []
def skip_predictable(self):
"""Returns True if the test case is not suitable for predictable testing."""
return True
@property
def output_proc(self):
if self.expected_outcomes is outproc.OUTCOMES_PASS:
return outproc.DEFAULT
return outproc.OutProc(self.expected_outcomes)
def __cmp__(self, other):
# Make sure that test cases are sorted correctly if sorted without
# key function. But using a key function is preferred for speed.
return cmp(
(self.suite.name, self.name, self.variant),
(other.suite.name, other.name, other.variant)
)
def __str__(self):
return self.suite.name + '/' + self.name
class D8TestCase(TestCase):
def get_shell(self):
return "d8"
def _get_shell_flags(self):
return ['--test']
def _get_resources_for_file(self, file):
"""Returns for a given file a list of absolute paths of files needed by the
given file.
"""
with open(file) as f:
source = f.read()
result = []
def add_path(path):
result.append(os.path.abspath(path.replace('/', os.path.sep)))
for match in RESOURCES_PATTERN.finditer(source):
# There are several resources per line. Relative to base dir.
for path in match.group(1).strip().split():
add_path(path)
for match in LOAD_PATTERN.finditer(source):
# Files in load statements are relative to base dir.
add_path(match.group(1))
for match in MODULE_RESOURCES_PATTERN_1.finditer(source):
# Imported files are relative to the file importing them.
add_path(os.path.join(os.path.dirname(file), match.group(1)))
for match in MODULE_RESOURCES_PATTERN_2.finditer(source):
# Imported files are relative to the file importing them.
add_path(os.path.join(os.path.dirname(file), match.group(1)))
return result
def _get_resources(self):
"""Returns the list of files needed by a test case."""
if not self._get_source_path():
return []
result = set()
to_check = [self._get_source_path()]
# Recurse over all files until reaching a fixpoint.
while to_check:
next_resource = to_check.pop()
result.add(next_resource)
for resource in self._get_resources_for_file(next_resource):
# Only add files that exist on disc. The pattens we check for give some
# false positives otherwise.
if resource not in result and os.path.exists(resource):
to_check.append(resource)
return sorted(list(result))
def skip_predictable(self):
"""Returns True if the test case is not suitable for predictable testing."""
return (statusfile.FAIL in self.expected_outcomes or
self.output_proc.negative)
|
|
# Copyright 2020 The gRPC Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import datetime
import logging
import unittest
import grpc
from grpc.experimental import aio
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import test_pb2_grpc
from tests.unit.framework.common import test_constants
from tests_aio.unit._common import CountingRequestIterator
from tests_aio.unit._common import inject_callbacks
from tests_aio.unit._constants import UNREACHABLE_TARGET
from tests_aio.unit._test_base import AioTestBase
from tests_aio.unit._test_server import start_test_server
_SHORT_TIMEOUT_S = 1.0
_NUM_STREAM_REQUESTS = 5
_REQUEST_PAYLOAD_SIZE = 7
_RESPONSE_INTERVAL_US = int(_SHORT_TIMEOUT_S * 1000 * 1000)
class _StreamUnaryInterceptorEmpty(aio.StreamUnaryClientInterceptor):
async def intercept_stream_unary(self, continuation, client_call_details,
request_iterator):
return await continuation(client_call_details, request_iterator)
def assert_in_final_state(self, test: unittest.TestCase):
pass
class _StreamUnaryInterceptorWithRequestIterator(
aio.StreamUnaryClientInterceptor):
async def intercept_stream_unary(self, continuation, client_call_details,
request_iterator):
self.request_iterator = CountingRequestIterator(request_iterator)
call = await continuation(client_call_details, self.request_iterator)
return call
def assert_in_final_state(self, test: unittest.TestCase):
test.assertEqual(_NUM_STREAM_REQUESTS,
self.request_iterator.request_cnt)
class TestStreamUnaryClientInterceptor(AioTestBase):
async def setUp(self):
self._server_target, self._server = await start_test_server()
async def tearDown(self):
await self._server.stop(None)
async def test_intercepts(self):
for interceptor_class in (_StreamUnaryInterceptorEmpty,
_StreamUnaryInterceptorWithRequestIterator):
with self.subTest(name=interceptor_class):
interceptor = interceptor_class()
channel = aio.insecure_channel(self._server_target,
interceptors=[interceptor])
stub = test_pb2_grpc.TestServiceStub(channel)
payload = messages_pb2.Payload(body=b'\0' *
_REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(
payload=payload)
async def request_iterator():
for _ in range(_NUM_STREAM_REQUESTS):
yield request
call = stub.StreamingInputCall(request_iterator())
response = await call
self.assertEqual(_NUM_STREAM_REQUESTS * _REQUEST_PAYLOAD_SIZE,
response.aggregated_payload_size)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
self.assertEqual(await call.initial_metadata(), aio.Metadata())
self.assertEqual(await call.trailing_metadata(), aio.Metadata())
self.assertEqual(await call.details(), '')
self.assertEqual(await call.debug_error_string(), '')
self.assertEqual(call.cancel(), False)
self.assertEqual(call.cancelled(), False)
self.assertEqual(call.done(), True)
interceptor.assert_in_final_state(self)
await channel.close()
async def test_intercepts_using_write(self):
for interceptor_class in (_StreamUnaryInterceptorEmpty,
_StreamUnaryInterceptorWithRequestIterator):
with self.subTest(name=interceptor_class):
interceptor = interceptor_class()
channel = aio.insecure_channel(self._server_target,
interceptors=[interceptor])
stub = test_pb2_grpc.TestServiceStub(channel)
payload = messages_pb2.Payload(body=b'\0' *
_REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(
payload=payload)
call = stub.StreamingInputCall()
for _ in range(_NUM_STREAM_REQUESTS):
await call.write(request)
await call.done_writing()
response = await call
self.assertEqual(_NUM_STREAM_REQUESTS * _REQUEST_PAYLOAD_SIZE,
response.aggregated_payload_size)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
self.assertEqual(await call.initial_metadata(), aio.Metadata())
self.assertEqual(await call.trailing_metadata(), aio.Metadata())
self.assertEqual(await call.details(), '')
self.assertEqual(await call.debug_error_string(), '')
self.assertEqual(call.cancel(), False)
self.assertEqual(call.cancelled(), False)
self.assertEqual(call.done(), True)
interceptor.assert_in_final_state(self)
await channel.close()
async def test_add_done_callback_interceptor_task_not_finished(self):
for interceptor_class in (_StreamUnaryInterceptorEmpty,
_StreamUnaryInterceptorWithRequestIterator):
with self.subTest(name=interceptor_class):
interceptor = interceptor_class()
channel = aio.insecure_channel(self._server_target,
interceptors=[interceptor])
stub = test_pb2_grpc.TestServiceStub(channel)
payload = messages_pb2.Payload(body=b'\0' *
_REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(
payload=payload)
async def request_iterator():
for _ in range(_NUM_STREAM_REQUESTS):
yield request
call = stub.StreamingInputCall(request_iterator())
validation = inject_callbacks(call)
response = await call
await validation
await channel.close()
async def test_add_done_callback_interceptor_task_finished(self):
for interceptor_class in (_StreamUnaryInterceptorEmpty,
_StreamUnaryInterceptorWithRequestIterator):
with self.subTest(name=interceptor_class):
interceptor = interceptor_class()
channel = aio.insecure_channel(self._server_target,
interceptors=[interceptor])
stub = test_pb2_grpc.TestServiceStub(channel)
payload = messages_pb2.Payload(body=b'\0' *
_REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(
payload=payload)
async def request_iterator():
for _ in range(_NUM_STREAM_REQUESTS):
yield request
call = stub.StreamingInputCall(request_iterator())
response = await call
validation = inject_callbacks(call)
await validation
await channel.close()
async def test_multiple_interceptors_request_iterator(self):
for interceptor_class in (_StreamUnaryInterceptorEmpty,
_StreamUnaryInterceptorWithRequestIterator):
with self.subTest(name=interceptor_class):
interceptors = [interceptor_class(), interceptor_class()]
channel = aio.insecure_channel(self._server_target,
interceptors=interceptors)
stub = test_pb2_grpc.TestServiceStub(channel)
payload = messages_pb2.Payload(body=b'\0' *
_REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(
payload=payload)
async def request_iterator():
for _ in range(_NUM_STREAM_REQUESTS):
yield request
call = stub.StreamingInputCall(request_iterator())
response = await call
self.assertEqual(_NUM_STREAM_REQUESTS * _REQUEST_PAYLOAD_SIZE,
response.aggregated_payload_size)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
self.assertEqual(await call.initial_metadata(), aio.Metadata())
self.assertEqual(await call.trailing_metadata(), aio.Metadata())
self.assertEqual(await call.details(), '')
self.assertEqual(await call.debug_error_string(), '')
self.assertEqual(call.cancel(), False)
self.assertEqual(call.cancelled(), False)
self.assertEqual(call.done(), True)
for interceptor in interceptors:
interceptor.assert_in_final_state(self)
await channel.close()
async def test_intercepts_request_iterator_rpc_error(self):
for interceptor_class in (_StreamUnaryInterceptorEmpty,
_StreamUnaryInterceptorWithRequestIterator):
with self.subTest(name=interceptor_class):
channel = aio.insecure_channel(
UNREACHABLE_TARGET, interceptors=[interceptor_class()])
stub = test_pb2_grpc.TestServiceStub(channel)
payload = messages_pb2.Payload(body=b'\0' *
_REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(
payload=payload)
# When there is an error the request iterator is no longer
# consumed.
async def request_iterator():
for _ in range(_NUM_STREAM_REQUESTS):
yield request
call = stub.StreamingInputCall(request_iterator())
with self.assertRaises(aio.AioRpcError) as exception_context:
await call
self.assertEqual(grpc.StatusCode.UNAVAILABLE,
exception_context.exception.code())
self.assertTrue(call.done())
self.assertEqual(grpc.StatusCode.UNAVAILABLE, await call.code())
await channel.close()
async def test_intercepts_request_iterator_rpc_error_using_write(self):
for interceptor_class in (_StreamUnaryInterceptorEmpty,
_StreamUnaryInterceptorWithRequestIterator):
with self.subTest(name=interceptor_class):
channel = aio.insecure_channel(
UNREACHABLE_TARGET, interceptors=[interceptor_class()])
stub = test_pb2_grpc.TestServiceStub(channel)
payload = messages_pb2.Payload(body=b'\0' *
_REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(
payload=payload)
call = stub.StreamingInputCall()
# When there is an error during the write, exception is raised.
with self.assertRaises(asyncio.InvalidStateError):
for _ in range(_NUM_STREAM_REQUESTS):
await call.write(request)
with self.assertRaises(aio.AioRpcError) as exception_context:
await call
self.assertEqual(grpc.StatusCode.UNAVAILABLE,
exception_context.exception.code())
self.assertTrue(call.done())
self.assertEqual(grpc.StatusCode.UNAVAILABLE, await call.code())
await channel.close()
async def test_cancel_before_rpc(self):
interceptor_reached = asyncio.Event()
wait_for_ever = self.loop.create_future()
class Interceptor(aio.StreamUnaryClientInterceptor):
async def intercept_stream_unary(self, continuation,
client_call_details,
request_iterator):
interceptor_reached.set()
await wait_for_ever
channel = aio.insecure_channel(self._server_target,
interceptors=[Interceptor()])
stub = test_pb2_grpc.TestServiceStub(channel)
payload = messages_pb2.Payload(body=b'\0' * _REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(payload=payload)
call = stub.StreamingInputCall()
self.assertFalse(call.cancelled())
self.assertFalse(call.done())
await interceptor_reached.wait()
self.assertTrue(call.cancel())
# When there is an error during the write, exception is raised.
with self.assertRaises(asyncio.InvalidStateError):
for _ in range(_NUM_STREAM_REQUESTS):
await call.write(request)
with self.assertRaises(asyncio.CancelledError):
await call
self.assertTrue(call.cancelled())
self.assertTrue(call.done())
self.assertEqual(await call.code(), grpc.StatusCode.CANCELLED)
self.assertEqual(await call.initial_metadata(), None)
self.assertEqual(await call.trailing_metadata(), None)
await channel.close()
async def test_cancel_after_rpc(self):
interceptor_reached = asyncio.Event()
wait_for_ever = self.loop.create_future()
class Interceptor(aio.StreamUnaryClientInterceptor):
async def intercept_stream_unary(self, continuation,
client_call_details,
request_iterator):
call = await continuation(client_call_details, request_iterator)
interceptor_reached.set()
await wait_for_ever
channel = aio.insecure_channel(self._server_target,
interceptors=[Interceptor()])
stub = test_pb2_grpc.TestServiceStub(channel)
payload = messages_pb2.Payload(body=b'\0' * _REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(payload=payload)
call = stub.StreamingInputCall()
self.assertFalse(call.cancelled())
self.assertFalse(call.done())
await interceptor_reached.wait()
self.assertTrue(call.cancel())
# When there is an error during the write, exception is raised.
with self.assertRaises(asyncio.InvalidStateError):
for _ in range(_NUM_STREAM_REQUESTS):
await call.write(request)
with self.assertRaises(asyncio.CancelledError):
await call
self.assertTrue(call.cancelled())
self.assertTrue(call.done())
self.assertEqual(await call.code(), grpc.StatusCode.CANCELLED)
self.assertEqual(await call.initial_metadata(), None)
self.assertEqual(await call.trailing_metadata(), None)
await channel.close()
async def test_cancel_while_writing(self):
# Test cancelation before making any write or after doing at least 1
for num_writes_before_cancel in (0, 1):
with self.subTest(name="Num writes before cancel: {}".format(
num_writes_before_cancel)):
channel = aio.insecure_channel(
UNREACHABLE_TARGET,
interceptors=[_StreamUnaryInterceptorWithRequestIterator()])
stub = test_pb2_grpc.TestServiceStub(channel)
payload = messages_pb2.Payload(body=b'\0' *
_REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(
payload=payload)
call = stub.StreamingInputCall()
with self.assertRaises(asyncio.InvalidStateError):
for i in range(_NUM_STREAM_REQUESTS):
if i == num_writes_before_cancel:
self.assertTrue(call.cancel())
await call.write(request)
with self.assertRaises(asyncio.CancelledError):
await call
self.assertTrue(call.cancelled())
self.assertTrue(call.done())
self.assertEqual(await call.code(), grpc.StatusCode.CANCELLED)
await channel.close()
async def test_cancel_by_the_interceptor(self):
class Interceptor(aio.StreamUnaryClientInterceptor):
async def intercept_stream_unary(self, continuation,
client_call_details,
request_iterator):
call = await continuation(client_call_details, request_iterator)
call.cancel()
return call
channel = aio.insecure_channel(UNREACHABLE_TARGET,
interceptors=[Interceptor()])
stub = test_pb2_grpc.TestServiceStub(channel)
payload = messages_pb2.Payload(body=b'\0' * _REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(payload=payload)
call = stub.StreamingInputCall()
with self.assertRaises(asyncio.InvalidStateError):
for i in range(_NUM_STREAM_REQUESTS):
await call.write(request)
with self.assertRaises(asyncio.CancelledError):
await call
self.assertTrue(call.cancelled())
self.assertTrue(call.done())
self.assertEqual(await call.code(), grpc.StatusCode.CANCELLED)
await channel.close()
async def test_exception_raised_by_interceptor(self):
class InterceptorException(Exception):
pass
class Interceptor(aio.StreamUnaryClientInterceptor):
async def intercept_stream_unary(self, continuation,
client_call_details,
request_iterator):
raise InterceptorException
channel = aio.insecure_channel(UNREACHABLE_TARGET,
interceptors=[Interceptor()])
stub = test_pb2_grpc.TestServiceStub(channel)
payload = messages_pb2.Payload(body=b'\0' * _REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(payload=payload)
call = stub.StreamingInputCall()
with self.assertRaises(InterceptorException):
for i in range(_NUM_STREAM_REQUESTS):
await call.write(request)
with self.assertRaises(InterceptorException):
await call
await channel.close()
async def test_intercepts_prohibit_mixing_style(self):
channel = aio.insecure_channel(
self._server_target, interceptors=[_StreamUnaryInterceptorEmpty()])
stub = test_pb2_grpc.TestServiceStub(channel)
payload = messages_pb2.Payload(body=b'\0' * _REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(payload=payload)
async def request_iterator():
for _ in range(_NUM_STREAM_REQUESTS):
yield request
call = stub.StreamingInputCall(request_iterator())
with self.assertRaises(grpc._cython.cygrpc.UsageError):
await call.write(request)
with self.assertRaises(grpc._cython.cygrpc.UsageError):
await call.done_writing()
await channel.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
|
|
"""
mbed SDK
Copyright (c) 2011-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function, division, absolute_import
import re
import tempfile
import datetime
import uuid
import struct
import zlib
import hashlib
from shutil import rmtree
from os.path import join, exists, dirname, basename, abspath, normpath, splitext
from os.path import relpath
from os import linesep, remove, makedirs
from time import time
from intelhex import IntelHex
from json import load, dump
from jinja2 import FileSystemLoader
from jinja2.environment import Environment
from .arm_pack_manager import Cache
from .utils import (mkdir, run_cmd, run_cmd_ext, NotSupportedException,
ToolException, InvalidReleaseTargetException,
intelhex_offset, integer)
from .paths import (MBED_CMSIS_PATH, MBED_TARGETS_PATH, MBED_LIBRARIES,
MBED_HEADER, MBED_DRIVERS, MBED_PLATFORM, MBED_HAL,
MBED_CONFIG_FILE, MBED_LIBRARIES_DRIVERS,
MBED_LIBRARIES_PLATFORM, MBED_LIBRARIES_HAL,
BUILD_DIR)
from .resources import Resources, FileType, FileRef
from .notifier.mock import MockNotifier
from .targets import TARGET_NAMES, TARGET_MAP, CORE_ARCH
from .libraries import Library
from .toolchains import TOOLCHAIN_CLASSES
from .config import Config
RELEASE_VERSIONS = ['2', '5']
def prep_report(report, target_name, toolchain_name, id_name):
"""Setup report keys
Positional arguments:
report - the report to fill
target_name - the target being used
toolchain_name - the toolchain being used
id_name - the name of the executable or library being built
"""
if not target_name in report:
report[target_name] = {}
if not toolchain_name in report[target_name]:
report[target_name][toolchain_name] = {}
if not id_name in report[target_name][toolchain_name]:
report[target_name][toolchain_name][id_name] = []
def prep_properties(properties, target_name, toolchain_name, vendor_label):
"""Setup test properties
Positional arguments:
properties - the dict to fill
target_name - the target the test is targeting
toolchain_name - the toolchain that will compile the test
vendor_label - the vendor
"""
if not target_name in properties:
properties[target_name] = {}
if not toolchain_name in properties[target_name]:
properties[target_name][toolchain_name] = {}
properties[target_name][toolchain_name]["target"] = target_name
properties[target_name][toolchain_name]["vendor"] = vendor_label
properties[target_name][toolchain_name]["toolchain"] = toolchain_name
def create_result(target_name, toolchain_name, id_name, description):
"""Create a result dictionary
Positional arguments:
target_name - the target being built for
toolchain_name - the toolchain doing the building
id_name - the name of the executable or library being built
description - a human readable description of what's going on
"""
cur_result = {}
cur_result["target_name"] = target_name
cur_result["toolchain_name"] = toolchain_name
cur_result["id"] = id_name
cur_result["description"] = description
cur_result["elapsed_time"] = 0
cur_result["output"] = ""
return cur_result
def add_result_to_report(report, result):
"""Add a single result to a report dictionary
Positional arguments:
report - the report to append to
result - the result to append
"""
result["date"] = datetime.datetime.utcnow().isoformat()
result["uuid"] = str(uuid.uuid1())
target = result["target_name"]
toolchain = result["toolchain_name"]
id_name = result['id']
result_wrap = {0: result}
report[target][toolchain][id_name].append(result_wrap)
def get_config(src_paths, target, toolchain_name=None, app_config=None):
"""Get the configuration object for a target-toolchain combination
Positional arguments:
src_paths - paths to scan for the configuration files
target - the device we are building for
toolchain_name - the string that identifies the build tools
"""
# Convert src_paths to a list if needed
if not isinstance(src_paths, list):
src_paths = [src_paths]
res = Resources(MockNotifier())
if toolchain_name:
toolchain = prepare_toolchain(src_paths, None, target, toolchain_name,
app_config=app_config)
config = toolchain.config
res.scan_with_toolchain(src_paths, toolchain, exclude=False)
else:
config = Config(target, src_paths, app_config=app_config)
res.scan_with_config(src_paths, config)
if config.has_regions:
_ = list(config.regions)
cfg, macros = config.get_config_data()
features = config.get_features()
return cfg, macros, features
def is_official_target(target_name, version):
""" Returns True, None if a target is part of the official release for the
given version. Return False, 'reason' if a target is not part of the
official release for the given version.
Positional arguments:
target_name - Name if the target (ex. 'K64F')
version - The release version string. Should be a string contained within
RELEASE_VERSIONS
"""
result = True
reason = None
target = TARGET_MAP[target_name]
if hasattr(target, 'release_versions') \
and version in target.release_versions:
if version == '2':
# For version 2, either ARM or uARM toolchain support is required
required_toolchains = set(['ARM', 'uARM'])
if not len(required_toolchains.intersection(
set(target.supported_toolchains))) > 0:
result = False
reason = ("Target '%s' must support " % target.name) + \
("one of the folowing toolchains to be included in the") + \
((" mbed 2.0 official release: %s" + linesep) %
", ".join(required_toolchains)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" %
", ".join(target.supported_toolchains))
elif version == '5':
# For version 5, ARM, GCC_ARM, and IAR toolchain support is required
required_toolchains = [
set(['ARM', 'GCC_ARM', 'IAR']),
set(['ARMC6'])
]
supported_toolchains = set(target.supported_toolchains)
if not any(r.issubset(supported_toolchains)
for r in required_toolchains):
result = False
reason = ("Target '%s' must support " % target.name) + \
("ALL of the folowing toolchains to be included in the") + \
((" mbed OS 5.0 official release: %s" + linesep) %
", ".join(sorted(required_toolchains[0]))) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" %
", ".join(sorted(supported_toolchains)))
elif not target.default_lib == 'std':
result = False
reason = ("Target '%s' must set the " % target.name) + \
("'default_lib' to 'std' to be included in the ") + \
("mbed OS 5.0 official release." + linesep) + \
("Currently it is set to '%s'" % target.default_lib)
else:
result = False
reason = ("Target '%s' has set an invalid release version of '%s'" %
version) + \
("Please choose from the following release versions: %s" %
', '.join(RELEASE_VERSIONS))
else:
result = False
if not hasattr(target, 'release_versions'):
reason = "Target '%s' " % target.name
reason += "does not have the 'release_versions' key set"
elif not version in target.release_versions:
reason = "Target '%s' does not contain the version '%s' " % \
(target.name, version)
reason += "in its 'release_versions' key"
return result, reason
def transform_release_toolchains(toolchains, version):
""" Given a list of toolchains and a release version, return a list of
only the supported toolchains for that release
Positional arguments:
toolchains - The list of toolchains
version - The release version string. Should be a string contained within
RELEASE_VERSIONS
"""
if version == '5':
return ['ARM', 'GCC_ARM', 'IAR']
else:
return toolchains
def get_mbed_official_release(version):
""" Given a release version string, return a tuple that contains a target
and the supported toolchains for that release.
Ex. Given '2', return (('LPC1768', ('ARM', 'GCC_ARM')),
('K64F', ('ARM', 'GCC_ARM')), ...)
Positional arguments:
version - The version string. Should be a string contained within
RELEASE_VERSIONS
"""
mbed_official_release = (
tuple(
tuple(
[
TARGET_MAP[target].name,
tuple(transform_release_toolchains(
TARGET_MAP[target].supported_toolchains, version))
]
) for target in TARGET_NAMES \
if (hasattr(TARGET_MAP[target], 'release_versions')
and version in TARGET_MAP[target].release_versions)
)
)
for target in mbed_official_release:
is_official, reason = is_official_target(target[0], version)
if not is_official:
raise InvalidReleaseTargetException(reason)
return mbed_official_release
ARM_COMPILERS = ("ARM", "ARMC6", "uARM")
def target_supports_toolchain(target, toolchain_name):
if toolchain_name in ARM_COMPILERS:
return any(tc in target.supported_toolchains for tc in ARM_COMPILERS)
else:
return toolchain_name in target.supported_toolchains
def prepare_toolchain(src_paths, build_dir, target, toolchain_name,
macros=None, clean=False, jobs=1,
notify=None, config=None, app_config=None,
build_profile=None, ignore=None):
""" Prepares resource related objects - toolchain, target, config
Positional arguments:
src_paths - the paths to source directories
target - ['LPC1768', 'LPC11U24', etc.]
toolchain_name - ['ARM', 'uARM', 'GCC_ARM', 'GCC_CR']
Keyword arguments:
macros - additional macros
clean - Rebuild everything if True
jobs - how many compilers we can run at once
notify - Notify function for logs
config - a Config object to use instead of creating one
app_config - location of a chosen mbed_app.json file
build_profile - a list of mergeable build profiles
ignore - list of paths to add to mbedignore
"""
# We need to remove all paths which are repeated to avoid
# multiple compilations and linking with the same objects
src_paths = [src_paths[0]] + list(set(src_paths[1:]))
# If the configuration object was not yet created, create it now
config = config or Config(target, src_paths, app_config=app_config)
target = config.target
if not target_supports_toolchain(target, toolchain_name):
raise NotSupportedException(
"Target {} is not supported by toolchain {}".format(
target.name, toolchain_name))
if (toolchain_name == "ARM" and CORE_ARCH[target.core] == 8):
toolchain_name = "ARMC6"
try:
cur_tc = TOOLCHAIN_CLASSES[toolchain_name]
except KeyError:
raise KeyError("Toolchain %s not supported" % toolchain_name)
profile = {'c': [], 'cxx': [], 'common': [], 'asm': [], 'ld': []}
for contents in build_profile or []:
for key in profile:
profile[key].extend(contents[toolchain_name].get(key, []))
toolchain = cur_tc(
target, notify, macros, build_dir=build_dir, build_profile=profile)
toolchain.config = config
toolchain.jobs = jobs
toolchain.build_all = clean
if ignore:
toolchain.add_ignore_patterns(root=".", base_path=".", patterns=ignore)
return toolchain
def _printihex(ihex):
import pprint
pprint.PrettyPrinter().pprint(ihex.todict())
def _real_region_size(region):
try:
part = intelhex_offset(region.filename, offset=region.start)
return (part.maxaddr() - part.minaddr()) + 1
except AttributeError:
return region.size
def _fill_header(region_list, current_region):
"""Fill an application header region
This is done it three steps:
* Fill the whole region with zeros
* Fill const, timestamp and size entries with their data
* Fill the digests using this header as the header region
"""
region_dict = {r.name: r for r in region_list}
header = IntelHex()
header.puts(current_region.start, b'\x00' * current_region.size)
start = current_region.start
for member in current_region.filename:
_, type, subtype, data = member
member_size = Config.header_member_size(member)
if type == "const":
fmt = {
"8le": ">B", "16le": "<H", "32le": "<L", "64le": "<Q",
"8be": "<B", "16be": ">H", "32be": ">L", "64be": ">Q"
}[subtype]
header.puts(start, struct.pack(fmt, integer(data, 0)))
elif type == "timestamp":
fmt = {"32le": "<L", "64le": "<Q",
"32be": ">L", "64be": ">Q"}[subtype]
header.puts(start, struct.pack(fmt, time()))
elif type == "size":
fmt = {"32le": "<L", "64le": "<Q",
"32be": ">L", "64be": ">Q"}[subtype]
size = sum(_real_region_size(region_dict[r]) for r in data)
header.puts(start, struct.pack(fmt, size))
elif type == "digest":
if data == "header":
ih = header[:start]
else:
ih = intelhex_offset(region_dict[data].filename, offset=region_dict[data].start)
if subtype.startswith("CRCITT32"):
fmt = {"CRCITT32be": ">l", "CRCITT32le": "<l"}[subtype]
header.puts(start, struct.pack(fmt, zlib.crc32(ih.tobinarray())))
elif subtype.startswith("SHA"):
if subtype == "SHA256":
hash = hashlib.sha256()
elif subtype == "SHA512":
hash = hashlib.sha512()
hash.update(ih.tobinarray())
header.puts(start, hash.digest())
start += Config.header_member_size(member)
return header
def merge_region_list(region_list, destination, notify, padding=b'\xFF'):
"""Merge the region_list into a single image
Positional Arguments:
region_list - list of regions, which should contain filenames
destination - file name to write all regions to
padding - bytes to fill gapps with
"""
merged = IntelHex()
_, format = splitext(destination)
notify.info("Merging Regions")
for region in region_list:
if region.active and not region.filename:
raise ToolException("Active region has no contents: No file found.")
if isinstance(region.filename, list):
header_basename, _ = splitext(destination)
header_filename = header_basename + "_header.hex"
_fill_header(region_list, region).tofile(header_filename, format='hex')
region = region._replace(filename=header_filename)
if region.filename:
notify.info(" Filling region %s with %s" % (region.name, region.filename))
part = intelhex_offset(region.filename, offset=region.start)
part.start_addr = None
part_size = (part.maxaddr() - part.minaddr()) + 1
if part_size > region.size:
raise ToolException("Contents of region %s does not fit"
% region.name)
merged.merge(part)
pad_size = region.size - part_size
if pad_size > 0 and region != region_list[-1]:
notify.info(" Padding region %s with 0x%x bytes" %
(region.name, pad_size))
if format is ".hex":
"""The offset will be in the hex file generated when we're done,
so we can skip padding here"""
else:
merged.puts(merged.maxaddr() + 1, padding * pad_size)
if not exists(dirname(destination)):
makedirs(dirname(destination))
notify.info("Space used after regions merged: 0x%x" %
(merged.maxaddr() - merged.minaddr() + 1))
merged.tofile(destination, format=format.strip("."))
UPDATE_WHITELIST = (
"application",
)
def build_project(src_paths, build_path, target, toolchain_name,
libraries_paths=None, linker_script=None, clean=False,
notify=None, name=None, macros=None, inc_dirs=None, jobs=1,
report=None, properties=None, project_id=None,
project_description=None, config=None,
app_config=None, build_profile=None, stats_depth=None, ignore=None):
""" Build a project. A project may be a test or a user program.
Positional arguments:
src_paths - a path or list of paths that contain all files needed to build
the project
build_path - the directory where all of the object files will be placed
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
libraries_paths - The location of libraries to include when linking
linker_script - the file that drives the linker to do it's job
clean - Rebuild everything if True
notify - Notify function for logs
name - the name of the project
macros - additional macros
inc_dirs - additional directories where include files may be found
jobs - how many compilers we can run at once
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
project_id - the name put in the report
project_description - the human-readable version of what this thing does
config - a Config object to use instead of creating one
app_config - location of a chosen mbed_app.json file
build_profile - a dict of flags that will be passed to the compiler
stats_depth - depth level for memap to display file/dirs
ignore - list of paths to add to mbedignore
"""
# Convert src_path to a list if needed
if not isinstance(src_paths, list):
src_paths = [src_paths]
# Extend src_paths wiht libraries_paths
if libraries_paths is not None:
src_paths.extend(libraries_paths)
inc_dirs.extend(map(dirname, libraries_paths))
if clean and exists(build_path):
rmtree(build_path)
mkdir(build_path)
toolchain = prepare_toolchain(
src_paths, build_path, target, toolchain_name, macros=macros,
clean=clean, jobs=jobs, notify=notify, config=config,
app_config=app_config, build_profile=build_profile, ignore=ignore)
toolchain.version_check()
# The first path will give the name to the library
name = (name or toolchain.config.name or
basename(normpath(abspath(src_paths[0]))))
notify.info("Building project %s (%s, %s)" %
(name, toolchain.target.name, toolchain_name))
# Initialize reporting
if report != None:
start = time()
# If project_id is specified, use that over the default name
id_name = project_id.upper() if project_id else name.upper()
description = project_description if project_description else name
vendor_label = toolchain.target.extra_labels[0]
prep_report(report, toolchain.target.name, toolchain_name, id_name)
cur_result = create_result(toolchain.target.name, toolchain_name,
id_name, description)
if properties != None:
prep_properties(properties, toolchain.target.name, toolchain_name,
vendor_label)
try:
resources = Resources(notify).scan_with_toolchain(
src_paths, toolchain, inc_dirs=inc_dirs)
# Change linker script if specified
if linker_script is not None:
resources.add_file_ref(linker_script, linker_script)
# Compile Sources
objects = toolchain.compile_sources(resources, sorted(resources.get_file_paths(FileType.INC_DIR)))
resources.add_files_to_type(FileType.OBJECT, objects)
# Link Program
if toolchain.config.has_regions:
binary, _ = toolchain.link_program(resources, build_path, name + "_application")
region_list = list(toolchain.config.regions)
region_list = [r._replace(filename=binary) if r.active else r
for r in region_list]
res = "%s.%s" % (join(build_path, name),
getattr(toolchain.target, "OUTPUT_EXT", "bin"))
merge_region_list(region_list, res, notify)
update_regions = [
r for r in region_list if r.name in UPDATE_WHITELIST
]
if update_regions:
update_res = "%s_update.%s" % (
join(build_path, name),
getattr(toolchain.target, "OUTPUT_EXT", "bin")
)
merge_region_list(update_regions, update_res, notify)
res = (res, update_res)
else:
res = (res, None)
else:
res, _ = toolchain.link_program(resources, build_path, name)
res = (res, None)
memap_instance = getattr(toolchain, 'memap_instance', None)
memap_table = ''
if memap_instance:
# Write output to stdout in text (pretty table) format
memap_table = memap_instance.generate_output('table', stats_depth)
notify.info(memap_table)
# Write output to file in JSON format
map_out = join(build_path, name + "_map.json")
memap_instance.generate_output('json', stats_depth, map_out)
# Write output to file in CSV format for the CI
map_csv = join(build_path, name + "_map.csv")
memap_instance.generate_output('csv-ci', stats_depth, map_csv)
map_html = join(build_path, name + "_map.html")
memap_instance.generate_output('html', stats_depth, map_html)
resources.detect_duplicates()
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["result"] = "OK"
cur_result["memory_usage"] = (memap_instance.mem_report
if memap_instance is not None else None)
cur_result["bin"] = res[0]
cur_result["elf"] = splitext(res[0])[0] + ".elf"
cur_result.update(toolchain.report)
add_result_to_report(report, cur_result)
return res
except Exception as exc:
if report != None:
end = time()
if isinstance(exc, NotSupportedException):
cur_result["result"] = "NOT_SUPPORTED"
else:
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
def build_library(src_paths, build_path, target, toolchain_name,
dependencies_paths=None, name=None, clean=False,
archive=True, notify=None, macros=None, inc_dirs=None, jobs=1,
report=None, properties=None, project_id=None,
remove_config_header_file=False, app_config=None,
build_profile=None, ignore=None):
""" Build a library
Positional arguments:
src_paths - a path or list of paths that contain all files needed to build
the library
build_path - the directory where all of the object files will be placed
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
dependencies_paths - The location of libraries to include when linking
name - the name of the library
clean - Rebuild everything if True
archive - whether the library will create an archive file
notify - Notify function for logs
macros - additional macros
inc_dirs - additional directories where include files may be found
jobs - how many compilers we can run at once
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
project_id - the name that goes in the report
remove_config_header_file - delete config header file when done building
app_config - location of a chosen mbed_app.json file
build_profile - a dict of flags that will be passed to the compiler
ignore - list of paths to add to mbedignore
"""
# Convert src_path to a list if needed
if not isinstance(src_paths, list):
src_paths = [src_paths]
src_paths = [relpath(s) for s in src_paths]
# Build path
if archive:
# Use temp path when building archive
tmp_path = join(build_path, '.temp')
mkdir(tmp_path)
else:
tmp_path = build_path
# Clean the build directory
if clean and exists(tmp_path):
rmtree(tmp_path)
mkdir(tmp_path)
# Pass all params to the unified prepare_toolchain()
toolchain = prepare_toolchain(
src_paths, build_path, target, toolchain_name, macros=macros,
clean=clean, jobs=jobs, notify=notify, app_config=app_config,
build_profile=build_profile, ignore=ignore)
# The first path will give the name to the library
if name is None:
name = basename(normpath(abspath(src_paths[0])))
notify.info("Building library %s (%s, %s)" %
(name, toolchain.target.name, toolchain_name))
# Initialize reporting
if report != None:
start = time()
# If project_id is specified, use that over the default name
id_name = project_id.upper() if project_id else name.upper()
description = name
vendor_label = toolchain.target.extra_labels[0]
prep_report(report, toolchain.target.name, toolchain_name, id_name)
cur_result = create_result(toolchain.target.name, toolchain_name,
id_name, description)
cur_result['type'] = 'library'
if properties != None:
prep_properties(properties, toolchain.target.name, toolchain_name,
vendor_label)
for src_path in src_paths:
if not exists(src_path):
error_msg = "The library source folder does not exist: %s", src_path
if report != None:
cur_result["output"] = error_msg
cur_result["result"] = "FAIL"
add_result_to_report(report, cur_result)
raise Exception(error_msg)
try:
res = Resources(notify).scan_with_toolchain(
src_paths, toolchain, dependencies_paths, inc_dirs=inc_dirs)
# Copy headers, objects and static libraries - all files needed for
# static lib
to_copy = (
res.get_file_refs(FileType.HEADER) +
res.get_file_refs(FileType.OBJECT) +
res.get_file_refs(FileType.LIB) +
res.get_file_refs(FileType.JSON) +
res.get_file_refs(FileType.LD_SCRIPT) +
res.get_file_refs(FileType.HEX) +
res.get_file_refs(FileType.BIN)
)
toolchain.copy_files(to_copy, build_path)
# Compile Sources
objects = toolchain.compile_sources(
res, res.get_file_paths(FileType.INC_DIR))
res.add_files_to_type(FileType.OBJECT, objects)
if archive:
toolchain.build_library(objects, build_path, name)
if remove_config_header_file:
config_header_path = toolchain.get_config_header()
if config_header_path:
remove(config_header_path)
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception as exc:
if report != None:
end = time()
if isinstance(exc, ToolException):
cur_result["result"] = "FAIL"
elif isinstance(exc, NotSupportedException):
cur_result["result"] = "NOT_SUPPORTED"
cur_result["elapsed_time"] = end - start
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
######################
### Legacy methods ###
######################
def mbed2_obj_path(target_name, toolchain_name):
real_tc_name = TOOLCHAIN_CLASSES[toolchain_name].__name__
return join("TARGET_" + target_name, "TOOLCHAIN_" + real_tc_name)
def build_lib(lib_id, target, toolchain_name, clean=False, macros=None,
notify=None, jobs=1, report=None, properties=None,
build_profile=None, ignore=None):
""" Legacy method for building mbed libraries
Positional arguments:
lib_id - the library's unique identifier
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
clean - Rebuild everything if True
macros - additional macros
notify - Notify function for logs
jobs - how many compilers we can run at once
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
build_profile - a dict of flags that will be passed to the compiler
ignore - list of paths to add to mbedignore
"""
lib = Library(lib_id)
if not lib.is_supported(target, toolchain_name):
print('Library "%s" is not yet supported on target %s with toolchain %s'
% (lib_id, target.name, toolchain_name))
return False
# We need to combine macros from parameter list with macros from library
# definition
lib_macros = lib.macros if lib.macros else []
if macros:
macros.extend(lib_macros)
else:
macros = lib_macros
src_paths = lib.source_dir
build_path = lib.build_dir
dependencies_paths = lib.dependencies
inc_dirs = lib.inc_dirs
if not isinstance(src_paths, list):
src_paths = [src_paths]
# The first path will give the name to the library
name = basename(src_paths[0])
if report is not None:
start = time()
id_name = name.upper()
description = name
vendor_label = target.extra_labels[0]
cur_result = None
prep_report(report, target.name, toolchain_name, id_name)
cur_result = create_result(target.name, toolchain_name, id_name,
description)
if properties != None:
prep_properties(properties, target.name, toolchain_name,
vendor_label)
for src_path in src_paths:
if not exists(src_path):
error_msg = "The library source folder does not exist: %s", src_path
if report != None:
cur_result["output"] = error_msg
cur_result["result"] = "FAIL"
add_result_to_report(report, cur_result)
raise Exception(error_msg)
try:
# Toolchain instance
# Create the desired build directory structure
bin_path = join(build_path, mbed2_obj_path(target.name, toolchain_name))
mkdir(bin_path)
tmp_path = join(build_path, '.temp', mbed2_obj_path(target.name,
toolchain_name))
mkdir(tmp_path)
toolchain = prepare_toolchain(
src_paths, tmp_path, target, toolchain_name, macros=macros,
notify=notify, build_profile=build_profile, jobs=jobs, clean=clean,
ignore=ignore)
notify.info("Building library %s (%s, %s)" %
(name.upper(), target.name, toolchain_name))
# Take into account the library configuration (MBED_CONFIG_FILE)
config = toolchain.config
config.add_config_files([MBED_CONFIG_FILE])
# Scan Resources
resources = Resources(notify).scan_with_toolchain(
src_paths + (lib.inc_dirs_ext or []), toolchain,
inc_dirs=inc_dirs, dependencies_paths=dependencies_paths)
# Copy Headers
toolchain.copy_files(
resources.get_file_refs(FileType.HEADER), build_path)
dependencies_include_dir = Resources(notify).sacn_with_toolchain([build_path], toolchain).inc_dirs
# Compile Sources
objects = []
for resource in resources:
objects.extend(toolchain.compile_sources(resource, dependencies_include_dir))
needed_update = toolchain.build_library(objects, bin_path, name)
if report != None and needed_update:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception:
if report != None:
end = time()
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
# A number of compiled files need to be copied as objects as the linker
# will not search for weak symbol overrides in archives. These are:
# - mbed_retarget.o: to make sure that the C standard lib symbols get
# overridden
# - mbed_board.o: `mbed_die` is weak
# - mbed_overrides.o: this contains platform overrides of various
# weak SDK functions
# - mbed_main.o: this contains main redirection
# - mbed_sdk_boot.o: this contains the main boot code in
# - PeripheralPins.o: PinMap can be weak
SEPARATE_NAMES = [
'PeripheralPins.o',
'mbed_retarget.o',
'mbed_board.o',
'mbed_overrides.o',
'mbed_main.o',
'mbed_sdk_boot.o',
]
def build_mbed_libs(target, toolchain_name, clean=False, macros=None,
notify=None, jobs=1, report=None, properties=None,
build_profile=None, ignore=None):
""" Build legacy libraries for a target and toolchain pair
Positional arguments:
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
clean - Rebuild everything if True
macros - additional macros
notify - Notify function for logs
jobs - how many compilers we can run at once
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
build_profile - a dict of flags that will be passed to the compiler
ignore - list of paths to add to mbedignore
Return - True if target + toolchain built correctly, False if not supported
"""
if report is not None:
start = time()
id_name = "MBED"
description = "mbed SDK"
vendor_label = target.extra_labels[0]
cur_result = None
prep_report(report, target.name, toolchain_name, id_name)
cur_result = create_result(
target.name, toolchain_name, id_name, description)
if properties is not None:
prep_properties(
properties, target.name, toolchain_name, vendor_label)
if toolchain_name not in target.supported_toolchains:
supported_toolchains_text = ", ".join(target.supported_toolchains)
notify.info('The target {} does not support the toolchain {}'.format(
target.name,
toolchain_name
))
notify.info('{} supports {} toolchain{}'.format(
target.name,
supported_toolchains_text,
's' if len(target.supported_toolchains) > 1 else ''
))
if report is not None:
cur_result["result"] = "SKIP"
add_result_to_report(report, cur_result)
return False
try:
# Source and Build Paths
build_toolchain = join(
MBED_LIBRARIES, mbed2_obj_path(target.name, toolchain_name))
mkdir(build_toolchain)
tmp_path = join(
MBED_LIBRARIES,
'.temp',
mbed2_obj_path(target.name, toolchain_name)
)
mkdir(tmp_path)
# Toolchain and config
toolchain = prepare_toolchain(
[""], tmp_path, target, toolchain_name, macros=macros, notify=notify,
build_profile=build_profile, jobs=jobs, clean=clean, ignore=ignore)
config = toolchain.config
config.add_config_files([MBED_CONFIG_FILE])
toolchain.set_config_data(toolchain.config.get_config_data())
# distribute header files
toolchain.copy_files(
[FileRef(basename(MBED_HEADER),MBED_HEADER)], MBED_LIBRARIES)
library_incdirs = [dirname(MBED_LIBRARIES), MBED_LIBRARIES]
for dir, dest in [(MBED_DRIVERS, MBED_LIBRARIES_DRIVERS),
(MBED_PLATFORM, MBED_LIBRARIES_PLATFORM),
(MBED_HAL, MBED_LIBRARIES_HAL)]:
resources = Resources(notify).scan_with_toolchain([dir], toolchain)
toolchain.copy_files(
[FileRef(basename(p), p) for p
in resources.get_file_paths(FileType.HEADER)] ,
dest)
library_incdirs.append(dest)
# collect resources of the libs to compile
cmsis_res = Resources(notify).scan_with_toolchain(
[MBED_CMSIS_PATH], toolchain)
hal_res = Resources(notify).scan_with_toolchain(
[MBED_TARGETS_PATH], toolchain)
mbed_resources = Resources(notify).scan_with_toolchain(
[MBED_DRIVERS, MBED_PLATFORM, MBED_HAL], toolchain)
incdirs = cmsis_res.inc_dirs + hal_res.inc_dirs + library_incdirs
# Build Things
notify.info("Building library %s (%s, %s)" %
('MBED', target.name, toolchain_name))
objects = toolchain.compile_sources(mbed_resources, incdirs)
separate_objects = []
for obj in objects:
for name in SEPARATE_NAMES:
if obj.endswith(name):
separate_objects.append(obj)
for obj in separate_objects:
objects.remove(obj)
toolchain.build_library(objects, build_toolchain, "mbed")
notify.info("Building library %s (%s, %s)" %
('CMSIS', target.name, toolchain_name))
cmsis_objects = toolchain.compile_sources(cmsis_res, incdirs + [tmp_path])
notify.info("Building library %s (%s, %s)" %
('HAL', target.name, toolchain_name))
hal_objects = toolchain.compile_sources(hal_res, incdirs + [tmp_path])
# Copy everything into the build directory
to_copy_paths = [
hal_res.get_file_paths(FileType.HEADER),
hal_res.get_file_paths(FileType.HEX),
hal_res.get_file_paths(FileType.BIN),
hal_res.get_file_paths(FileType.LIB),
cmsis_res.get_file_paths(FileType.HEADER),
cmsis_res.get_file_paths(FileType.BIN),
cmsis_res.get_file_paths(FileType.LD_SCRIPT),
hal_res.get_file_paths(FileType.LD_SCRIPT),
[MBED_CONFIG_FILE],
cmsis_objects,
hal_objects,
separate_objects,
]
to_copy = [FileRef(basename(p), p) for p in sum(to_copy_paths, [])]
toolchain.copy_files(to_copy, build_toolchain)
if report is not None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception as exc:
if report is not None:
end = time()
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
cur_result["output"] += str(exc)
add_result_to_report(report, cur_result)
raise
def get_unique_supported_toolchains(release_targets=None):
""" Get list of all unique toolchains supported by targets
Keyword arguments:
release_targets - tuple structure returned from get_mbed_official_release().
If release_targets is not specified, then it queries all
known targets
"""
unique_supported_toolchains = []
if not release_targets:
for target in TARGET_NAMES:
for toolchain in TARGET_MAP[target].supported_toolchains:
if toolchain not in unique_supported_toolchains:
unique_supported_toolchains.append(toolchain)
else:
for target in release_targets:
for toolchain in target[1]:
if toolchain not in unique_supported_toolchains:
unique_supported_toolchains.append(toolchain)
return unique_supported_toolchains
def _lowercase_release_version(release_version):
try:
return release_version.lower()
except AttributeError:
return 'all'
def mcu_toolchain_list(release_version='5'):
""" Shows list of toolchains
"""
release_version = _lowercase_release_version(release_version)
version_release_targets = {}
version_release_target_names = {}
for version in RELEASE_VERSIONS:
version_release_targets[version] = get_mbed_official_release(version)
version_release_target_names[version] = [x[0] for x in
version_release_targets[
version]]
if release_version in RELEASE_VERSIONS:
release_targets = version_release_targets[release_version]
else:
release_targets = None
unique_supported_toolchains = get_unique_supported_toolchains(
release_targets)
columns = ["mbed OS %s" % x for x in RELEASE_VERSIONS] + unique_supported_toolchains
return "\n".join(columns)
def mcu_target_list(release_version='5'):
""" Shows target list
"""
release_version = _lowercase_release_version(release_version)
version_release_targets = {}
version_release_target_names = {}
for version in RELEASE_VERSIONS:
version_release_targets[version] = get_mbed_official_release(version)
version_release_target_names[version] = [x[0] for x in
version_release_targets[
version]]
if release_version in RELEASE_VERSIONS:
release_targets = version_release_targets[release_version]
else:
release_targets = None
target_names = []
if release_targets:
target_names = [x[0] for x in release_targets]
else:
target_names = TARGET_NAMES
return "\n".join(target_names)
def mcu_toolchain_matrix(verbose_html=False, platform_filter=None,
release_version='5'):
""" Shows target map using prettytable
Keyword arguments:
verbose_html - emit html instead of a simple table
platform_filter - remove results that match the string
release_version - get the matrix for this major version number
"""
# Only use it in this function so building works without extra modules
from prettytable import PrettyTable, HEADER
release_version = _lowercase_release_version(release_version)
version_release_targets = {}
version_release_target_names = {}
for version in RELEASE_VERSIONS:
version_release_targets[version] = get_mbed_official_release(version)
version_release_target_names[version] = [x[0] for x in
version_release_targets[
version]]
if release_version in RELEASE_VERSIONS:
release_targets = version_release_targets[release_version]
else:
release_targets = None
unique_supported_toolchains = get_unique_supported_toolchains(
release_targets)
prepend_columns = ["Target"] + ["mbed OS %s" % x for x in RELEASE_VERSIONS]
# All tests status table print
columns = prepend_columns + unique_supported_toolchains
table_printer = PrettyTable(columns, junction_char="|", hrules=HEADER)
# Align table
for col in columns:
table_printer.align[col] = "c"
table_printer.align["Target"] = "l"
perm_counter = 0
target_counter = 0
target_names = []
if release_targets:
target_names = [x[0] for x in release_targets]
else:
target_names = TARGET_NAMES
for target in sorted(target_names):
if platform_filter is not None:
# FIlter out platforms using regex
if re.search(platform_filter, target) is None:
continue
target_counter += 1
row = [target] # First column is platform name
for version in RELEASE_VERSIONS:
if target in version_release_target_names[version]:
text = "Supported"
else:
text = "-"
row.append(text)
for unique_toolchain in unique_supported_toolchains:
tgt_obj = TARGET_MAP[target]
if (unique_toolchain in tgt_obj.supported_toolchains or
(unique_toolchain == "ARMC6" and
"ARM" in tgt_obj.supported_toolchains) or
(unique_toolchain == "ARM" and
"ARMC6" in tgt_obj.supported_toolchains and
CORE_ARCH[tgt_obj.core] == 8)):
text = "Supported"
perm_counter += 1
else:
text = "-"
row.append(text)
table_printer.add_row(row)
result = table_printer.get_html_string() if verbose_html \
else table_printer.get_string()
result += "\n"
result += "Supported targets: %d\n"% (target_counter)
if target_counter == 1:
result += "Supported toolchains: %d"% (perm_counter)
return result
def get_target_supported_toolchains(target):
""" Returns target supported toolchains list
Positional arguments:
target - the target to get the supported toolchains of
"""
return TARGET_MAP[target].supported_toolchains if target in TARGET_MAP \
else None
def print_build_results(result_list, build_name):
""" Generate result string for build results
Positional arguments:
result_list - the list of results to print
build_name - the name of the build we are printing result for
"""
result = ""
if len(result_list) > 0:
result += build_name + "\n"
result += "\n".join([" * %s" % f for f in result_list])
result += "\n"
return result
def print_build_memory_usage(report):
""" Generate result table with memory usage values for build results
Aggregates (puts together) reports obtained from self.get_memory_summary()
Positional arguments:
report - Report generated during build procedure.
"""
from prettytable import PrettyTable, HEADER
columns_text = ['name', 'target', 'toolchain']
columns_int = ['static_ram', 'total_flash']
table = PrettyTable(columns_text + columns_int, junction_char="|", hrules=HEADER)
for col in columns_text:
table.align[col] = 'l'
for col in columns_int:
table.align[col] = 'r'
for target in report:
for toolchain in report[target]:
for name in report[target][toolchain]:
for dlist in report[target][toolchain][name]:
for dlistelem in dlist:
# Get 'memory_usage' record and build table with
# statistics
record = dlist[dlistelem]
if 'memory_usage' in record and record['memory_usage']:
# Note that summary should be in the last record of
# 'memory_usage' section. This is why we are
# grabbing last "[-1]" record.
row = [
record['description'],
record['target_name'],
record['toolchain_name'],
record['memory_usage'][-1]['summary'][
'static_ram'],
record['memory_usage'][-1]['summary'][
'total_flash'],
]
table.add_row(row)
result = "Memory map breakdown for built projects (values in Bytes):\n"
result += table.get_string(sortby='name')
return result
def write_build_report(build_report, template_filename, filename):
"""Write a build report to disk using a template file
Positional arguments:
build_report - a report generated by the build system
template_filename - a file that contains the template for the style of build
report
filename - the location on disk to write the file to
"""
build_report_failing = []
build_report_passing = []
for report in build_report:
if len(report["failing"]) > 0:
build_report_failing.append(report)
else:
build_report_passing.append(report)
env = Environment(extensions=['jinja2.ext.with_'])
env.loader = FileSystemLoader('ci_templates')
template = env.get_template(template_filename)
with open(filename, 'w+') as placeholder:
placeholder.write(template.render(
failing_builds=build_report_failing,
passing_builds=build_report_passing))
def merge_build_data(filename, toolchain_report, app_type):
path_to_file = dirname(abspath(filename))
try:
build_data = load(open(filename))
except (IOError, ValueError):
build_data = {'builds': []}
for tgt in toolchain_report.values():
for tc in tgt.values():
for project in tc.values():
for build in project:
try:
build[0]['bin_fullpath'] = build[0]['bin']
build[0]['elf_fullpath'] = build[0]['elf']
build[0]['elf'] = relpath(build[0]['elf'], path_to_file)
build[0]['bin'] = relpath(build[0]['bin'], path_to_file)
except KeyError:
pass
if 'type' not in build[0]:
build[0]['type'] = app_type
build_data['builds'].insert(0, build[0])
dump(build_data, open(filename, "wb"), indent=4, separators=(',', ': '))
|
|
# -*- coding: utf-8 -*-
"""
Django to Jinja
~~~~~~~~~~~~~~~
Helper module that can convert django templates into Jinja2 templates.
This file is not intended to be used as stand alone application but to
be used as library. To convert templates you basically create your own
writer, add extra conversion logic for your custom template tags,
configure your django environment and run the `convert_templates`
function.
Here a simple example::
# configure django (or use settings.configure)
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'yourapplication.settings'
from yourapplication.foo.templatetags.bar import MyNode
from django2jinja import Writer, convert_templates
def write_my_node(writer, node):
writer.start_variable()
writer.write('myfunc(')
for idx, arg in enumerate(node.args):
if idx:
writer.write(', ')
writer.node(arg)
writer.write(')')
writer.end_variable()
writer = Writer()
writer.node_handlers[MyNode] = write_my_node
convert_templates('/path/to/output/folder', writer=writer)
Here is an example hos to automatically translate your django
variables to jinja2::
import re
# List of tuple (Match pattern, Replace pattern, Exclusion pattern)
var_re = ((re.compile(r"(u|user)\.is_authenticated"), r"\1.is_authenticated()", None),
(re.compile(r"\.non_field_errors"), r".non_field_errors()", None),
(re.compile(r"\.label_tag"), r".label_tag()", None),
(re.compile(r"\.as_dl"), r".as_dl()", None),
(re.compile(r"\.as_table"), r".as_table()", None),
(re.compile(r"\.as_widget"), r".as_widget()", None),
(re.compile(r"\.as_hidden"), r".as_hidden()", None),
(re.compile(r"\.get_([0-9_\w]+)_url"), r".get_\1_url()", None),
(re.compile(r"\.url"), r".url()", re.compile(r"(form|calendar).url")),
(re.compile(r"\.get_([0-9_\w]+)_display"), r".get_\1_display()", None),
(re.compile(r"loop\.counter"), r"loop.index", None),
(re.compile(r"loop\.revcounter"), r"loop.revindex", None),
(re.compile(r"request\.GET\.([0-9_\w]+)"), r"request.GET.get('\1', '')", None),
(re.compile(r"request\.get_host"), r"request.get_host()", None),
(re.compile(r"\.all(?!_)"), r".all()", None),
(re.compile(r"\.all\.0"), r".all()[0]", None),
(re.compile(r"\.([0-9])($|\s+)"), r"[\1]\2", None),
(re.compile(r"\.items"), r".items()", None),
)
writer = Writer(var_re=var_re)
For details about the writing process have a look at the module code.
:copyright: (c) 2009 by the Jinja Team.
:license: BSD.
"""
from __future__ import print_function
import re
import os
import sys
from django.template.defaulttags import CsrfTokenNode, VerbatimNode, LoremNode
from django.templatetags.static import StaticNode
from django.utils.encoding import force_text
from django.utils.safestring import SafeData
from jinja2.defaults import *
from django.conf import settings
from django.template import defaulttags as core_tags, loader, loader_tags, engines
from django.template.base import (
TextNode, FilterExpression, Variable, TOKEN_TEXT, TOKEN_VAR, VariableNode
)
from django.templatetags import i18n as i18n_tags
_node_handlers = {}
_resolved_simple_tags = None
_newline_re = re.compile(r'(?:\r\n|\r|\n)')
# Django stores an itertools object on the cycle node. Not only is this
# thread unsafe but also a problem for the converter which needs the raw
# string values passed to the constructor to create a jinja loop.cycle()
# call from it.
_old_cycle_init = core_tags.CycleNode.__init__
def _fixed_cycle_init(self, cyclevars, variable_name=None, silent=False):
self.raw_cycle_vars = map(Variable, cyclevars)
_old_cycle_init(self, cyclevars, variable_name, silent)
core_tags.CycleNode.__init__ = _fixed_cycle_init
def node(cls):
def proxy(f):
_node_handlers[cls] = f
return f
return proxy
def convert_templates(output_dir, extensions=('.html', '.txt'), writer=None,
callback=None):
"""Iterates over all templates in the template dirs configured and
translates them and writes the new templates into the output directory.
"""
if writer is None:
writer = Writer()
def filter_templates(files):
for filename in files:
ifilename = filename.lower()
for extension in extensions:
if ifilename.endswith(extension):
yield filename
def translate(f, loadname):
template = loader.get_template(loadname)
original = writer.stream
writer.stream = f
writer.body(template.template.nodelist)
writer.stream = original
if callback is None:
def callback(template):
print(template)
for directory in settings.TEMPLATE_DIRS:
for dirname, _, files in os.walk(directory):
dirname = dirname[len(directory):].lstrip('/')
for filename in filter_templates(files):
source = os.path.normpath(os.path.join(dirname, filename))
target = os.path.join(output_dir, dirname, filename)
basetarget = os.path.dirname(target)
if not os.path.exists(basetarget):
os.makedirs(basetarget)
callback(source)
f = open(target, 'w')
try:
translate(f, source)
finally:
f.close()
class Writer(object):
"""The core writer class."""
def __init__(self, stream=None, error_stream=None,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
initial_autoescape=True,
use_jinja_autoescape=False,
custom_node_handlers=None,
var_re=None,
env=None):
if stream is None:
stream = sys.stdout
if error_stream is None:
error_stream = sys.stderr
self.stream = stream
self.error_stream = error_stream
self.block_start_string = block_start_string
self.block_end_string = block_end_string
self.variable_start_string = variable_start_string
self.variable_end_string = variable_end_string
self.comment_start_string = comment_start_string
self.comment_end_string = comment_end_string
self.autoescape = initial_autoescape
self.spaceless = False
self.use_jinja_autoescape = use_jinja_autoescape
self.node_handlers = dict(_node_handlers,
**(custom_node_handlers or {}))
self._loop_depth = 0
self.var_re = var_re or []
self.env = env
def enter_loop(self):
"""Increments the loop depth so that write functions know if they
are in a loop.
"""
self._loop_depth += 1
def leave_loop(self):
"""Reverse of enter_loop."""
self._loop_depth -= 1
@property
def in_loop(self):
"""True if we are in a loop."""
return self._loop_depth > 0
def write(self, s):
"""Writes stuff to the stream."""
self.stream.write(force_text(s))
def print_expr(self, expr):
"""Open a variable tag, write to the string to the stream and close."""
self.start_variable()
self.write(expr)
self.end_variable()
def _post_open(self):
if self.spaceless:
self.write('- ')
else:
self.write(' ')
def _pre_close(self):
if self.spaceless:
self.write(' -')
else:
self.write(' ')
def start_variable(self):
"""Start a variable."""
self.write(self.variable_start_string)
self._post_open()
def end_variable(self, always_safe=False):
"""End a variable."""
if not always_safe and self.autoescape and \
not self.use_jinja_autoescape:
self.write('|e')
self._pre_close()
self.write(self.variable_end_string)
def start_block(self):
"""Starts a block."""
self.write(self.block_start_string)
self._post_open()
def end_block(self):
"""Ends a block."""
self._pre_close()
self.write(self.block_end_string)
def tag(self, name):
"""Like `print_expr` just for blocks."""
self.start_block()
self.write(name)
self.end_block()
def variable(self, name):
"""Prints a variable. This performs variable name transformation."""
self.write(self.translate_variable_name(name))
def literal(self, value):
"""Writes a value as literal."""
value = repr(value)
if value[:2] in ('u"', "u'"):
value = value[1:]
self.write(value)
def filters(self, filters, is_block=False):
"""Dumps a list of filters."""
want_pipe = not is_block
for filter, args in filters:
name = self.get_filter_name(filter)
if name is None:
self.warn('Could not find filter %s' % name)
continue
if name not in self.env.filters:
self.warn('Filter %s probably doesn\'t exist in Jinja' %
name)
if not want_pipe:
want_pipe = True
else:
self.write('|')
self.write(name)
if args:
self.write('(')
for idx, (is_var, value) in enumerate(args):
if idx:
self.write(', ')
if is_var:
self.node(value)
else:
self.literal(value)
self.write(')')
def get_location(self, origin, position):
"""Returns the location for an origin and position tuple as name
and lineno.
"""
if hasattr(origin, 'source'):
source = origin.source
name = '<unknown source>'
else:
source = origin.loader(origin.loadname, origin.dirs)[0]
name = origin.loadname
lineno = len(_newline_re.findall(source[:position[0]])) + 1
return name, lineno
def warn(self, message, node=None):
"""Prints a warning to the error stream."""
if node is not None and hasattr(node, 'source'):
filename, lineno = self.get_location(*node.source)
message = '[%s:%d] %s' % (filename, lineno, message)
print(message, file=self.error_stream)
def translate_variable_name(self, var):
"""Performs variable name translation."""
if self.in_loop and var == 'forloop' or var.startswith('forloop.'):
var = var[3:]
for reg, rep, unless in self.var_re:
no_unless = unless and unless.search(var) or True
if reg.search(var) and no_unless:
var = reg.sub(rep, var)
break
return var
def get_filter_name(self, filter):
"""Returns the filter name for a filter function or `None` if there
is no such filter.
"""
return getattr(filter, '_filter_name', None)
def get_simple_tag_name(self, tag):
global _resolved_simple_tags
from django.template.library import SimpleNode, InclusionNode
if not isinstance(tag, (SimpleNode, InclusionNode)):
self.warn("Can't get tag name from an unknown tag type", node=node)
return
target_func = tag.func
target_name = '.'.join((target_func.__module__, target_func.__name__))
if _resolved_simple_tags is None:
_resolved_simple_tags = {}
libraries = engines['django'].engine.template_libraries
for library in libraries.values():
for func_name, func in library.tags.items():
_resolved_simple_tags['.'.join((func.__module__, func.__name__))] = func_name
return _resolved_simple_tags.get(target_name)
def node(self, node):
"""Invokes the node handler for a node."""
for cls, handler in self.node_handlers.items():
if type(node) is cls or type(node).__name__ == cls:
handler(self, node)
break
else:
self.warn('Untranslatable node %s.%s found' % (
node.__module__,
node.__class__.__name__
), node)
def body(self, nodes):
"""Calls node() for every node in the iterable passed."""
for node in nodes:
self.node(node)
@node(TextNode)
def text_node(writer, node):
writer.write(node.s)
@node(Variable)
def variable(writer, node):
if node.translate:
writer.warn('i18n system used, make sure to install translations', node)
writer.write('_(')
if node.literal is not None:
writer.literal(node.literal)
else:
writer.variable(node.var)
if node.translate:
writer.write(')')
@node(VariableNode)
def variable_node(writer, node):
writer.start_variable()
if node.filter_expression.var.var == 'block.super' \
and not node.filter_expression.filters:
writer.write('super()')
else:
writer.node(node.filter_expression)
writer.end_variable()
@node(FilterExpression)
def filter_expression(writer, node):
if isinstance(node.var, SafeData):
writer.literal(node.var)
else:
writer.node(node.var)
writer.filters(node.filters)
@node(core_tags.CommentNode)
def comment_tag(writer, node):
pass
@node(core_tags.DebugNode)
def comment_tag(writer, node):
writer.warn('Debug tag detected. Make sure to add a global function '
'called debug to the namespace.', node=node)
writer.print_expr('debug()')
@node(core_tags.ForNode)
def for_loop(writer, node):
writer.start_block()
writer.write('for ')
for idx, var in enumerate(node.loopvars):
if idx:
writer.write(', ')
writer.variable(var)
writer.write(' in ')
if node.is_reversed:
writer.write('(')
writer.node(node.sequence)
if node.is_reversed:
writer.write(')|reverse')
writer.end_block()
writer.enter_loop()
writer.body(node.nodelist_loop)
writer.leave_loop()
if node.nodelist_empty:
writer.tag('else')
writer.body(node.nodelist_empty)
writer.tag('endfor')
def _if_condition_to_bits_backwards(condition):
from django.template.smartif import Literal, OPERATORS
if isinstance(condition, Literal):
yield condition.value
return
if condition.second:
yield from _if_condition_to_bits_backwards(condition.second)
if isinstance(condition, OPERATORS['not']): # prefix
yield from _if_condition_to_bits_backwards(condition.first)
yield condition
else:
yield condition
yield from _if_condition_to_bits_backwards(condition.first)
def if_condition_to_bits(condition):
backward_bits = list(_if_condition_to_bits_backwards(condition))
backward_bits.reverse()
return backward_bits
@node(core_tags.IfNode)
def if_condition(writer, node):
for x, (condition, nodelist) in enumerate(node.conditions_nodelists):
writer.start_block()
if x == 0:
writer.write('if')
elif condition is None:
writer.write('else')
else:
writer.write('elif')
if condition:
condition_bits = if_condition_to_bits(condition)
for bit in condition_bits:
writer.write(' ')
writer.node(bit)
writer.end_block()
writer.body(nodelist)
writer.tag('endif')
@node('Operator')
def operator(writer, node):
writer.write(node.id)
@node(core_tags.IfEqualNode)
def if_equal(writer, node):
writer.start_block()
writer.write('if ')
writer.node(node.var1)
if node.negate:
writer.write(' != ')
else:
writer.write(' == ')
writer.node(node.var2)
writer.end_block()
writer.body(node.nodelist_true)
if node.nodelist_false:
writer.tag('else')
writer.body(node.nodelist_false)
writer.tag('endif')
@node(loader_tags.BlockNode)
def block(writer, node):
writer.tag('block ' + node.name.replace('-', '_').rstrip('_'))
node = node
while node.parent is not None:
node = node.parent
writer.body(node.nodelist)
writer.tag('endblock')
@node(loader_tags.ExtendsNode)
def extends(writer, node):
writer.start_block()
writer.write('extends ')
writer.node(node.parent_name)
writer.end_block()
writer.body(node.nodelist)
@node(loader_tags.IncludeNode)
def include(writer, node):
writer.start_block()
writer.write('include ')
writer.node(node.template)
writer.end_block()
@node(core_tags.CycleNode)
def cycle(writer, node):
if not writer.in_loop:
writer.warn('Untranslatable free cycle (cycle outside loop)', node=node)
return
if node.variable_name is not None:
writer.start_block()
writer.write('set %s = ' % node.variable_name)
else:
writer.start_variable()
writer.write('loop.cycle(')
for idx, var in enumerate(node.raw_cycle_vars):
if idx:
writer.write(', ')
writer.node(var)
writer.write(')')
if node.variable_name is not None:
writer.end_block()
else:
writer.end_variable()
@node(core_tags.FilterNode)
def filter(writer, node):
writer.start_block()
writer.write('filter ')
writer.filters(node.filter_expr.filters, True)
writer.end_block()
writer.body(node.nodelist)
writer.tag('endfilter')
@node(core_tags.AutoEscapeControlNode)
def autoescape_control(writer, node):
original = writer.autoescape
writer.autoescape = node.setting
writer.body(node.nodelist)
writer.autoescape = original
@node(core_tags.SpacelessNode)
def spaceless(writer, node):
original = writer.spaceless
writer.spaceless = True
writer.warn('entering spaceless mode with different semantics', node)
# do the initial stripping
nodelist = list(node.nodelist)
if nodelist:
if isinstance(nodelist[0], TextNode):
nodelist[0] = TextNode(nodelist[0].s.lstrip())
if isinstance(nodelist[-1], TextNode):
nodelist[-1] = TextNode(nodelist[-1].s.rstrip())
writer.body(nodelist)
writer.spaceless = original
@node(core_tags.TemplateTagNode)
def template_tag(writer, node):
tag = {
'openblock': writer.block_start_string,
'closeblock': writer.block_end_string,
'openvariable': writer.variable_start_string,
'closevariable': writer.variable_end_string,
'opencomment': writer.comment_start_string,
'closecomment': writer.comment_end_string,
'openbrace': '{',
'closebrace': '}'
}.get(node.tagtype)
if tag:
writer.start_variable()
writer.literal(tag)
writer.end_variable()
@node(core_tags.URLNode)
def url_tag(writer, node):
#writer.warn('url node used. make sure to provide a proper url() '
# 'function', node)
if node.asvar:
writer.start_block()
writer.write('set %s = ' % node.asvar)
else:
writer.start_variable()
writer.write('url(')
writer.node(node.view_name)
for arg in node.args:
writer.write(', ')
writer.node(arg)
for key, arg in node.kwargs.items():
writer.write(', %s=' % key)
writer.node(arg)
writer.write(')')
if node.asvar:
writer.end_block()
else:
writer.end_variable()
@node(core_tags.WidthRatioNode)
def width_ratio(writer, node):
writer.warn('widthratio expanded into formula. You may want to provide '
'a helper function for this calculation', node)
writer.start_variable()
writer.write('(')
writer.node(node.val_expr)
writer.write(' / ')
writer.node(node.max_expr)
writer.write(' * ')
writer.write(str(int(node.max_width)))
writer.write(')|round|int')
writer.end_variable(always_safe=True)
@node(core_tags.WithNode)
def with_block(writer, node):
writer.start_block()
writer.write('with ')
for x, (key, value) in enumerate(node.extra_context.items()):
if x:
writer.write(', ')
writer.write(key)
writer.write('=')
writer.node(value)
writer.end_block()
writer.body(node.nodelist)
writer.tag('endwith')
@node(core_tags.RegroupNode)
def regroup(writer, node):
if node.expression.var.literal:
writer.warn('literal in groupby filter used. Behavior in that '
'situation is undefined and translation is skipped.', node)
return
elif node.expression.filters:
writer.warn('filters in groupby filter used. Behavior in that '
'situation is undefined which is most likely a bug '
'in your code. Filters were ignored.', node)
writer.start_block()
writer.write('set %s = ' % node.var_name)
writer.node(node.target)
writer.write('|groupby(')
writer.literal(node.expression.var.var)
writer.write(')')
writer.end_block()
@node(core_tags.LoadNode)
def warn_load(writer, node):
#writer.warn('load statement used which was ignored on conversion', node)
pass
@node(i18n_tags.GetAvailableLanguagesNode)
def get_available_languages(writer, node):
writer.warn('make sure to provide a get_available_languages function', node)
writer.tag('set %s = get_available_languages()' %
writer.translate_variable_name(node.variable))
@node(i18n_tags.GetCurrentLanguageNode)
def get_current_language(writer, node):
writer.warn('make sure to provide a get_current_language function', node)
writer.tag('set %s = get_current_language()' %
writer.translate_variable_name(node.variable))
@node(i18n_tags.GetCurrentLanguageBidiNode)
def get_current_language_bidi(writer, node):
writer.warn('make sure to provide a get_current_language_bidi function', node)
writer.tag('set %s = get_current_language_bidi()' %
writer.translate_variable_name(node.variable))
@node(i18n_tags.TranslateNode)
def simple_gettext(writer, node):
writer.warn('i18n system used, make sure to install translations', node)
writer.start_variable()
writer.write('_(')
writer.node(node.value)
writer.write(')')
writer.end_variable()
@node(i18n_tags.BlockTranslateNode)
def translate_block(writer, node):
first_var = []
variables = set()
def touch_var(name):
variables.add(name)
if not first_var:
first_var.append(name)
def dump_token_list(tokens):
for token in tokens:
if token.token_type == TOKEN_TEXT:
writer.write(token.contents)
elif token.token_type == TOKEN_VAR:
writer.print_expr(token.contents)
touch_var(token.contents)
writer.warn('i18n system used, make sure to install translations', node)
writer.start_block()
writer.write('trans')
idx = -1
for idx, (key, var) in enumerate(node.extra_context.items()):
if idx:
writer.write(',')
writer.write(' %s=' % key)
touch_var(key)
writer.node(var.filter_expression)
have_plural = False
plural_var = None
if node.plural and node.countervar and node.counter:
have_plural = True
plural_var = node.countervar
if plural_var not in variables:
if idx > -1:
writer.write(',')
touch_var(plural_var)
writer.write(' %s=' % plural_var)
writer.node(node.counter)
writer.end_block()
dump_token_list(node.singular)
if node.plural and node.countervar and node.counter:
writer.start_block()
writer.write('pluralize')
if node.countervar != first_var[0]:
writer.write(' ' + node.countervar)
writer.end_block()
dump_token_list(node.plural)
writer.tag('endtrans')
@node("SimpleNode")
def simple_tag(writer, node):
"""Check if the simple tag exist as a filter in """
name = writer.get_simple_tag_name(node)
if (
writer.env
and name not in writer.env.globals
and name not in writer.env.filters
):
writer.warn('Tag %s probably doesn\'t exist in Jinja' % name)
if node.target_var:
writer.start_block()
writer.write('set %s=%s' % (node.target_var, name))
else:
writer.start_variable()
writer.write(name)
writer.write('(')
has_args = False
if node.args:
has_args = True
for idx, var in enumerate(node.args):
if idx:
writer.write(', ')
writer.node(var)
if node.kwargs:
for idx, (key, val) in enumerate(node.kwargs.items()):
if has_args or idx:
writer.write(', ')
writer.write('%s=' % key)
writer.node(val)
writer.write(')')
if node.target_var:
writer.end_block()
else:
writer.end_variable()
@node("InclusionNode")
def inclusion_tag(writer, node):
name = writer.get_simple_tag_name(node)
if (
writer.env
and name not in writer.env.globals
and name not in writer.env.filters
):
writer.warn('Tag %s probably doesn\'t exist in Jinja' % name)
writer.start_variable()
writer.write(name)
writer.write('(')
has_args = False
if node.args:
has_args = True
for idx, var in enumerate(node.args):
if idx:
writer.write(', ')
writer.node(var)
if node.kwargs:
for idx, (key, val) in enumerate(node.kwargs.items()):
if has_args or idx:
writer.write(', ')
writer.write('%s=' % key)
writer.node(val)
writer.write(')')
writer.end_variable()
@node(StaticNode)
def static_tag(writer: Writer, node: StaticNode):
if node.varname:
writer.start_block()
writer.write('set %s=static(' % node.varname)
else:
writer.start_variable()
writer.write('static(')
writer.node(node.path)
writer.write(')')
if node.varname:
writer.end_block()
else:
writer.end_variable()
@node(CsrfTokenNode)
def csrf_tag(writer: Writer, node: CsrfTokenNode):
writer.start_variable()
writer.write('csrf_token()')
writer.end_variable()
@node(VerbatimNode)
def verbatim_tag(writer: Writer, node: VerbatimNode):
writer.tag('raw')
writer.write(node.content)
writer.tag('endraw')
@node(LoremNode)
def lorem_tag(writer: Writer, node: LoremNode):
method, count = node.method, node.count
uses_html = method == 'p'
if method == 'w':
min_max = count
count = 1
else:
min_max = None
writer.start_variable()
func_string = 'lipsum(n={count}, html={uses_html}'
if min_max:
func_string += ', min={min_max}, max={min_max}'
func_string += ')'
writer.write(func_string.format(
count=count,
uses_html=str(uses_html),
min_max=min_max,
))
writer.end_variable()
# get rid of node now, it shouldn't be used normally
del node
|
|
import requests
import json
import time
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class DisputeCloseBuyerTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
def run_test(self):
alice = self.nodes[0]
bob = self.nodes[1]
charlie = self.nodes[2]
# generate some coins and send them to bob
generated_coins = 10
time.sleep(4)
api_url = bob["gateway_url"] + "wallet/address"
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
address = resp["address"]
elif r.status_code == 404:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Address endpoint not found")
else:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Unknown response")
self.send_bitcoin_cmd("sendtoaddress", address, generated_coins)
time.sleep(20)
# create a profile for charlie
pro = {"name": "Charlie"}
api_url = charlie["gateway_url"] + "ob/profile"
r = requests.post(api_url, data=json.dumps(pro, indent=4))
if r.status_code == 404:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Profile post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("DisputeCloseBuyerTest - FAIL: Profile POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# make charlie a moderator
with open('testdata/moderation.json') as listing_file:
moderation_json = json.load(listing_file, object_pairs_hook=OrderedDict)
api_url = charlie["gateway_url"] + "ob/moderator"
r = requests.put(api_url, data=json.dumps(moderation_json, indent=4))
if r.status_code == 404:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Moderator post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("DisputeCloseBuyerTest - FAIL: Moderator POST failed. Reason: %s", resp["reason"])
moderatorId = charlie["peerId"]
time.sleep(4)
# post listing to alice
with open('testdata/listing.json') as listing_file:
listing_json = json.load(listing_file, object_pairs_hook=OrderedDict)
listing_json["moderators"] = [moderatorId]
api_url = alice["gateway_url"] + "ob/listing"
r = requests.post(api_url, data=json.dumps(listing_json, indent=4))
if r.status_code == 404:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Listing post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("DisputeCloseBuyerTest - FAIL: Listing POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# get listing hash
api_url = alice["gateway_url"] + "ipns/" + alice["peerId"] + "/listings.json"
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Couldn't get listing index")
resp = json.loads(r.text)
listingId = resp[0]["hash"]
# bob send order
with open('testdata/order_direct.json') as order_file:
order_json = json.load(order_file, object_pairs_hook=OrderedDict)
order_json["items"][0]["listingHash"] = listingId
order_json["moderator"] = moderatorId
api_url = bob["gateway_url"] + "ob/purchase"
r = requests.post(api_url, data=json.dumps(order_json, indent=4))
if r.status_code == 404:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Purchase post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
self.print_logs(alice, "ob.log")
raise TestFailure("DisputeCloseBuyerTest - FAIL: Purchase POST failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
orderId = resp["orderId"]
payment_address = resp["paymentAddress"]
payment_amount = resp["amount"]
# check the purchase saved correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("DisputeCloseBuyerTest - FAIL: Bob purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Bob incorrectly saved as funded")
# check the sale saved correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("DisputeCloseBuyerTest - FAIL: Alice purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Alice incorrectly saved as funded")
# fund order
spend = {
"address": payment_address,
"amount": payment_amount,
"feeLevel": "NORMAL"
}
api_url = bob["gateway_url"] + "wallet/spend"
r = requests.post(api_url, data=json.dumps(spend, indent=4))
if r.status_code == 404:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Spend post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("DisputeCloseBuyerTest - FAIL: Spend POST failed. Reason: %s", resp["reason"])
time.sleep(20)
# check bob detected payment
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("DisputeCloseBuyerTest - FAIL: Bob failed to detect his payment")
if resp["funded"] == False:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Bob incorrectly saved as unfunded")
# check alice detected payment
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("DisputeCloseBuyerTest - FAIL: Alice failed to detect payment")
if resp["funded"] == False:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Alice incorrectly saved as unfunded")
# Bob open dispute
dispute = {
"orderId": orderId,
"claim": "Bastard ripped me off"
}
api_url = bob["gateway_url"] + "ob/opendispute/"
r = requests.post(api_url, data=json.dumps(dispute, indent=4))
if r.status_code == 404:
raise TestFailure("DisputeCloseBuyerTest - FAIL: OpenDispute post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("DisputeCloseBuyerTest - FAIL: OpenDispute POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# Bob check dispute opened correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "DISPUTED":
raise TestFailure("DisputeCloseBuyerTest - FAIL: Bob failed to detect his dispute")
# Alice check dispute opened correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "DISPUTED":
raise TestFailure("DisputeCloseBuyerTest - FAIL: Alice failed to detect the dispute")
# Charlie check dispute opened correctly
api_url = charlie["gateway_url"] + "ob/case/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Couldn't load case from Clarlie")
resp = json.loads(r.text, object_pairs_hook=OrderedDict)
if resp["state"] != "DISPUTED":
raise TestFailure("DisputeCloseBuyerTest - FAIL: Charlie failed to detect the dispute")
# Charlie close dispute
dispute_resolution = {
"OrderID": orderId,
"Resolution": "I'm siding with Bob",
"BuyerPercentage": 100,
"VendorPercentage": 0
}
api_url = charlie["gateway_url"] + "ob/closedispute/"
r = requests.post(api_url, data=json.dumps(dispute_resolution, indent=4))
if r.status_code == 404:
raise TestFailure("DisputeCloseBuyerTest - FAIL: CloseDispute post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("DisputeCloseBuyerTest - FAIL: CloseDispute POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# Alice check dispute closed correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "DECIDED":
self.print_logs(alice, "ob.log")
raise TestFailure("DisputeCloseBuyerTest - FAIL: Alice failed to detect the dispute resolution")
# Bob check dispute closed correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text, object_pairs_hook=OrderedDict)
if resp["state"] != "DECIDED":
raise TestFailure("DisputeCloseBuyerTest - FAIL: Bob failed to detect the dispute resolution")
# Charlie check dispute closed correctly
api_url = charlie["gateway_url"] + "ob/case/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Couldn't load case from Charlie")
resp = json.loads(r.text, object_pairs_hook=OrderedDict)
if resp["state"] != "RESOLVED":
raise TestFailure("DisputeCloseBuyerTest - FAIL: Charlie failed to detect the dispute resolution")
# Bob relase funds
release = {
"OrderID": orderId,
}
api_url = bob["gateway_url"] + "ob/releasefunds/"
r = requests.post(api_url, data=json.dumps(release, indent=4))
if r.status_code == 404:
raise TestFailure("DisputeCloseBuyerTest - FAIL: ReleaseFunds post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("DisputeCloseBuyerTest - FAIL: ReleaseFunds POST failed. Reason: %s", resp["reason"])
time.sleep(20)
self.send_bitcoin_cmd("generate", 1)
time.sleep(2)
# Check bob received payout
api_url = bob["gateway_url"] + "wallet/balance"
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
confirmed = int(resp["confirmed"])
#unconfirmed = int(resp["unconfirmed"])
if confirmed <= (generated_coins*100000000) - payment_amount:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Bob failed to detect dispute payout")
elif r.status_code == 404:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Receive coins endpoint not found")
else:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Unknown response")
# Bob check payout transaction recorded
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text, object_pairs_hook=OrderedDict)
if len(resp["paymentAddressTransactions"]) != 2:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Bob failed to record payout transaction")
if resp["state"] != "RESOLVED":
raise TestFailure("DisputeCloseBuyerTest - FAIL: Bob failed to set state to RESOLVED")
# Alice check payout transaction recorded
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text, object_pairs_hook=OrderedDict)
if len(resp["paymentAddressTransactions"]) != 2:
raise TestFailure("DisputeCloseBuyerTest - FAIL: Alice failed to record payout transaction")
if resp["state"] != "RESOLVED":
raise TestFailure("DisputeCloseBuyerTest - FAIL: Alice failed to set state to RESOLVED")
print("DisputeCloseBuyerTest - PASS")
if __name__ == '__main__':
print("Running DisputeCloseBuyerTest")
DisputeCloseBuyerTest().main(["--regtest", "--disableexchangerates"])
|
|
# Copyright (c) 2010 Howard Hughes Medical Institute.
# All rights reserved.
# Use is subject to Janelia Farm Research Campus Software Copyright 1.1 license terms.
# http://license.janelia.org/license/jfrc_copyright_1_1.html
import neuroptikon
from os import path
from neuro_object import NeuroObject
from neurite import Neurite
from arborization import Arborization
from gap_junction import GapJunction
from innervation import Innervation
from stimulus import Stimulus
from synapse import Synapse
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import xml.etree.ElementTree as ElementTree
from pydispatch import dispatcher
class Neuron(NeuroObject):
class Polarity: # pylint: disable=W0232
UNIPOLAR = 'UNIPOLAR'
BIPOLAR = 'BIPOLAR'
PSEUDOUNIPOLAR = 'PSEUDOUNIPOLAR'
MULTIPOLAR = 'MULTIPOLAR'
class Function: # pylint: disable=W0232
SENSORY = 'SENSORY'
INTERNEURON = 'INTERNEURON'
MOTOR = 'MOTOR'
#TODO Refactor neuron image into an object
Functions = [Function.SENSORY, Function.INTERNEURON, Function.MOTOR]
def __init__(self, network, neuronClass = None, *args, **keywordArgs):
"""
Neurons represent individual neural cells in the network.
You create a neuron by messaging the network:
>>> neuron1 = network.createNeuron(...)
"""
# Upconvert old 'function' singleton param to list expected by new 'functions' param.
if 'function' in keywordArgs:
functions = set([function])
del function
# Upconvert old 'neurotransmitter' singleton param to list expected by new 'neurotransmitters' param.
if 'neurotransmitter' in keywordArgs:
neurotransmitters = [neurotransmitter]
del neurotransmitter
# Pull out the keyword arguments specific to this class before we call super.
# We need to do this so we can know if the caller specified an argument or not.
# For example, the caller might specify a neuron class and one attribute to override. We need to know which attributes _not_ to set.
localAttrNames = ['activation', 'functions', 'neurotransmitters', 'polarity', 'region', 'neuronImage', 'links']
localKeywordArgs = {}
for attrName in localAttrNames:
if attrName in keywordArgs:
localKeywordArgs[attrName] = keywordArgs[attrName]
del keywordArgs[attrName]
NeuroObject.__init__(self, network, *args, **keywordArgs)
self._neurites = []
self.neuronClass = neuronClass
self.activation = None
self._functions = set()
self.neurotransmitters = []
self.polarity = None
self.region = None
self._synapses = []
self.neuronImage = []
self.links = []
for attrName in localAttrNames:
if attrName == 'functions':
attrValue = set()
elif attrName in ('neurotransmitters', 'links', 'neuronImage'):
attrValue = []
else:
attrValue = None
if attrName in localKeywordArgs:
# The user has explicitly set the attribute.
if attrName == 'functions':
attrValue = set(localKeywordArgs[attrName])
elif attrName == 'neuronImage':
for img in localKeywordArgs[attrName]:
if img['path']:
imageLabel = img['label']
imageLocation = img['path']
myImage = self.Img(imageLabel, imageLocation)
attrValue.append(myImage)
else:
attrValue = localKeywordArgs[attrName]
elif self.neuronClass:
attrValue = getattr(self.neuronClass, attrName) # Inherit the value from the class
if attrName == 'functions':
attrName = '_functions'
setattr(self, attrName, attrValue)
if self.region is not None:
self.region.neurons.append(self)
def defaultName(self):
# Try to build a name based on connections.
# TODO: should send/received be ignored, i.e. should the connector always be '- '?
connections = []
for connection in self.connections():
sends = receives = False
if isinstance(connection, Arborization):
otherName = connection.region.name
sends = connection.sendsOutput
receives = connection.receivesInput
elif isinstance(connection, GapJunction):
neurons = [neurite.neuron() for neurite in connection.neurites()]
neurons.remove(self)
otherName = neurons[0].name
sends = receives = True
elif isinstance(connection, Innervation):
otherName = connection.muscle.name
sends = True
elif isinstance(connection, Stimulus):
otherName = connection.name
receives = True
elif isinstance(connection, Synapse):
if connection.preSynapticNeurite.neuron() == self:
# TODO: check if other neuron names are nameless
otherName = ', '.join([(partner.name if isinstance(partner, Neuron) else partner.neuron().name) for partner in connection.postSynapticPartners])
sends = True
else:
otherName = connection.preSynapticNeurite.neuron().name
receives = True
if otherName is None:
return None
if sends and receives:
connector = '<->'
elif sends:
connector = '->'
elif receives:
connector = '<-'
else:
connector = '-'
connections += [connector + otherName]
return 'Neuron ' + ' & '.join(connections)
@classmethod
def _fromXMLElement(cls, network, xmlElement):
#TODO need to add links and images when I get this working
neuron = super(Neuron, cls)._fromXMLElement(network, xmlElement)
classId = xmlElement.findtext('Class')
if classId is None:
classId = xmlElement.findtext('class')
neuron.neuronClass = neuroptikon.library.neuronClass(classId)
if classId is not None and neuron.neuronClass is None:
raise ValueError, gettext('Neuron class "%s" does not exist') % (classId)
neuron.neurotransmitters = []
for ntName in ['Neurotransmitter', 'neurotransmitter']:
for ntElement in xmlElement.findall(ntName):
ntId = ntElement.text
if ntId is not None:
nt = neuroptikon.library.neurotransmitter(ntId)
if nt is None:
raise ValueError, gettext('Neurotransmitter "%s" does not exist') % (ntId)
else:
neuron.neurotransmitters.append(nt)
neuron.activation = xmlElement.findtext('Activation')
if neuron.activation is None:
neuron.activation = xmlElement.findtext('activation')
neuron._functions = set()
for functionName in ['Function', 'function']:
for functionElement in xmlElement.findall(functionName):
if functionElement.text in Neuron.Functions:
neuron.setHasFunction(functionElement.text, True)
neuron.polarity = xmlElement.findtext('Polarity')
if neuron.polarity is None:
neuron.polarity = xmlElement.findtext('polarity')
regionId = xmlElement.get('somaRegionId')
neuron.region = network.objectWithId(regionId)
if regionId is not None and neuron.region is None:
raise ValueError, gettext('Region with id "%s" does not exist') % (regionId)
if neuron.region is not None:
neuron.region.neurons.append(neuron)
neuron._synapses = []
neuron._neurites = []
for neuriteElement in xmlElement.findall('Neurite'):
neurite = Neurite._fromXMLElement(network, neuriteElement)
if neurite is None:
raise ValueError, gettext('Could not create neurite')
neurite.root = neuron
neuron._neurites.append(neurite)
network.addObject(neurite)
return neuron
def _toXMLElement(self, parentElement):
#TODO need to add links and images when I get this working
neuronElement = NeuroObject._toXMLElement(self, parentElement)
if self.neuronClass is not None:
ElementTree.SubElement(neuronElement, 'Class').text = self.neuronClass.identifier
for neurotransmitter in self.neurotransmitters:
ElementTree.SubElement(neuronElement, 'Neurotransmitter').text = neurotransmitter.identifier
if self.activation is not None:
ElementTree.SubElement(neuronElement, 'Activation').text = self.activation
for function in self._functions:
ElementTree.SubElement(neuronElement, 'Function').text = function
if self.polarity is not None:
ElementTree.SubElement(neuronElement, 'Polarity').text = self.polarity
if self.region is not None:
ElementTree.SubElement(neuronElement, 'SomaRegionId').text = str(self.region.networkId)
for neurite in self._neurites:
neurite._toXMLElement(neuronElement)
return neuronElement
def _needsScriptRef(self):
return len(self._neurites) > 0 or NeuroObject._needsScriptRef(self)
def _creationScriptParams(self, scriptRefs):
args, keywords = NeuroObject._creationScriptParams(self, scriptRefs)
if self.neuronClass is not None:
keywords['neuronClass'] = 'library.neuronClass(\'' + self.neuronClass.identifier + '\')'
if len(self.neurotransmitters) > 0:
ntCalls = []
for neurotransmitter in self.neurotransmitters:
ntCalls.append('library.neurotransmitter(\'' + neurotransmitter.identifier + '\')')
keywords['neurotransmitters'] = '[' + ', '.join(ntCalls) + ']'
if self.activation is not None:
keywords['activation'] = '\'' + self.activation + '\'' # TODO: this should be 'NeuralActivation.' + self.activation
if len(self._functions) > 0:
keywords['functions'] = '[Neuron.Function.' + ', Neuron.Function.'.join(self._functions) + ']'
if self.polarity is not None:
keywords['polarity'] = 'Neuron.Polarity.' + self.polarity
if self.region is not None:
keywords['region'] = scriptRefs[self.region.networkId]
return (args, keywords)
def _creationScriptChildren(self):
return NeuroObject._creationScriptChildren(self) + self._neurites
def createNeurite(self, *args, **keywords):
"""
DEPRECATED: Please use :meth:`extendNeurite() <Network.Neuron.Neuron.extendNeurite>` instead.
"""
return self.extendNeurite(*args, **keywords)
def extendNeurite(self, *args, **keywords):
"""
Create and return a :class:`neurite <Network.Neurite.Neurite>` object that extends from the soma of this neuron.
"""
neurite = Neurite(self.network, self, *args, **keywords)
self._neurites.append(neurite)
self.network.addObject(neurite)
return neurite
def neurites(self, recurse = True):
"""
Return a list of all :class:`neurite <Network.Neurite.Neurite>` extending from this neuron.
If recurse is True then all subsequently extending neurites will be included with the neurites that extend from the soma.
If no neurites extend from the soma of this neuron then an empty list will be returned.
"""
neurites = list(self._neurites)
if recurse:
for neurite in self._neurites:
neurites += neurite.neurites()
return neurites
def arborize(self, region, sendsOutput = True, receivesInput = True, *args, **keywordArgs):
"""
Convenience method for creating a :class:`neurite <Network.Neurite.Neurite>` and having it :class:`arborize <Network.Neurite.Neurite.arborize>` a :class:`region <Network.Region.Region>`.
Returns the arborization object that is created.
"""
return self.extendNeurite().arborize(region, sendsOutput, receivesInput, *args, **keywordArgs)
def arborizations(self):
"""
Return a list of all :class:`arborizations <Network.Arborization.Arborization>` extending from this neuron.
If this neuron does not arborize any regions then an empty list will be returned.
"""
arborizations = []
for neurite in self._neurites:
arborizations += neurite.arborizations()
return arborizations
def synapseOn(self, otherObject, *args, **keywordArgs):
"""
Convenience method that creates a :class:`neurite <Network.Neurite.Neurite>` for this neuron and then creates a :class:`synapse <Network.Synapse.Synapse>` with the other object.
Returns the synapse object that is created.
"""
neurite = self.extendNeurite()
return neurite.synapseOn(otherObject, activation = self.activation, *args, **keywordArgs)
def synapses(self, includePre = True, includePost = True):
"""
Return a list of all :class:`synapses <Network.Synapse.Synapse>` in which the :class:`neurite's <Network.Neurite.Neurite>` of this neuron are pre- or post-synaptic.
If includePre is False then synapses where this neuron is pre-synaptic will be excluded. If includePost is False then synapses where this neuron is post-synaptic will be excluded.
If this neuron does not form a synapse with any other neurons then an empty list will be returned.
"""
synapses = []
if includePost:
synapses += self._synapses
for neurite in self._neurites:
synapses += neurite.synapses(includePre = includePre, includePost = includePost)
return synapses
def gapJunctionWith(self, otherObject, *args, **keywordArgs):
"""
Convenience method that creates a :class:`neurite <Network.Neurite.Neurite>` for this neuron and then creates a :class:`gap junction <Network.GapJunction.GapJunction>` with the other object.
Returns the gap junction object that is created.
"""
neurite = self.extendNeurite()
return neurite.gapJunctionWith(otherObject, *args, **keywordArgs)
def gapJunctions(self):
"""
Return a list of all :class:`gap junctions <Network.GapJunction.GapJunction>` in which the :class:`neurite's <Network.Neurite.Neurite>` of this neuron are involved.
If this neuron does not form a gap junction with any other neurons then an empty list will be returned.
"""
junctions = []
for neurite in self._neurites:
junctions += neurite.gapJunctions()
return junctions
def innervate(self, muscle, *args, **keywordArgs):
"""
Convenience method that creates a :class:`neurite <Network.Neurite.Neurite>` and has it innervate the :class:`muscle <Network.Muscle.Muscle>`.
Returns the :class:`innervation <Network.Innervation.Innervation>` object that is created.
"""
neurite = self.extendNeurite()
return neurite.innervate(muscle, *args, **keywordArgs)
def innervations(self):
"""
Return a list of all :class:`innervations <Network.Innervation.Innervation>` involving this neuron's :class:`neurite's <Network.Neurite.Neurite>`.
If this neuron does not innervate any :class:`muscles <Network.Muscle.Muscle>` then an empty list will be returned.
"""
innervations = []
for neurite in self._neurites:
innervations += neurite.innervations()
return innervations
def connections(self, recurse = True):
"""
Return a list of all objects that connect to this neuron and optionally any extending :class:`neurites <Network.Neurite.Neurite>`.
The list may contain any number of :class:`arborizations <Network.Arborization.Arborization>`, :class:`gap junctions <Network.GapJunction.GapJunction>`, :class:`innervations <Network.Innervation.Innervation>`, :class:`stimuli <Network.Stimulus.Stimulus>` or :class:`synapses <Network.Synapse.Synapse>`.
"""
return NeuroObject.connections(self, recurse) + self._synapses
def inputs(self, recurse = True):
"""
Return a list of all objects that send information into this neuron and optionally any extending :class:`neurites <Network.Neurite.Neurite>`.
The list may contain any number of :class:`arborizations <Network.Arborization.Arborization>`, :class:`gap junctions <Network.GapJunction.GapJunction>`, :class:`stimuli <Network.Stimulus.Stimulus>` or :class:`synapses <Network.Synapse.Synapse>`.
"""
return NeuroObject.inputs(self, recurse) + self._synapses
def outputs(self, recurse = True):
"""
Return a list of all objects that receive information from this neuron and optionally any extending :class:`neurites <Network.Neurite.Neurite>`.
The list may contain any number of :class:`arborizations <Network.Arborization.Arborization>`, :class:`gap junctions <Network.GapJunction.GapJunction>`, :class:`innervations <Network.Innervation.Innervation>` or :class:`synapses <Network.Synapse.Synapse>`.
"""
return NeuroObject.outputs(self, recurse)
def childObjects(self):
return list(self._neurites)
def dependentObjects(self):
return NeuroObject.dependentObjects(self) + self._synapses + self.neurites()
def disconnectFromNetwork(self):
if self.region:
self.region.neurons.remove(self)
def setHasFunction(self, function, hasFunction):
"""
Set whether or not this neuron has the indicated function.
>>> neuron1.setHasFunction(Neuron.Function.SENSORY, True)
The function argument should be one of the attributes of Neuron.Function.
The hasFunction argument should indicate whether or not this neuron has the indicated function.
"""
if hasFunction and function not in self._functions:
self._functions.add(function)
dispatcher.send(('set', 'functions'), self)
elif not hasFunction and function in self._functions:
self._functions.remove(function)
dispatcher.send(('set', 'functions'), self)
def hasFunction(self, function):
"""
Return whether or not this neuron has the indicated function.
>>> # Show all sensory neurons in red.
>>> if neuron.hasFunction(Neuron.Function.SENSORY):
... display.setVisibleColor(neuron, (1.0, 0.0, 0.0))
The function argument should be one of the attributes of Neuron.Function.
"""
return function in self._functions
def searchPost(self, preRegion = None, postRegion = None, postNeuron = None, activation = None, neurotransmitter = None):
"""
Searches for post-synaptic sites
"""
# limit synapse to where neuron is presynaptic
synapses = [connection for connection in self.synapses() if connection.preSynapticNeuron() == self]
synapses = self._filterSynapsesForSearch(synapses, preRegion = preRegion, postRegion = postRegion, activation = activation)
# get post synaptic neurons
neurons = []
for synapse in synapses:
neurons.extend(synapse.postSynapticNeurons())
neurons = self._filterNeuronsForSearch(neurons, neurotransmitter=neurotransmitter, name=postNeuron)
return neurons
def searchPre(self, preRegion = None, postRegion = None, preNeuron = None, activation = None, neurotransmitter = None):
"""
Searches for pre-synaptic sites
"""
# limit synapse to where neuron is postsynaptic
synapses = [connection for connection in self.synapses() if self in connection.postSynapticNeurons()]
synapses = self._filterSynapsesForSearch(synapses, preRegion = preRegion, postRegion = postRegion, activation = activation)
# get pre synaptic neurons
neurons = []
for synapse in synapses:
neurons.append(synapse.preSynapticNeuron())
neurons = self._filterNeuronsForSearch(neurons, neurotransmitter = neurotransmitter, name = preNeuron)
return neurons
def _filterNeuronsForSearch(self, neurons, neurotransmitter = None, name = None):
from re import search
if neurotransmitter:
neurons = [neuron for neuron in neurons if neurotransmitter in neuron.neurotransmitters]
if name:
neurons = [neuron for neuron in neurons if search(name, neuron.name)]
return neurons
def _filterSynapsesForSearch(self, synapses, preRegion = None, postRegion = None, activation = None):
from re import search
if preRegion:
synapses = [synapse for synapse in synapses if synapse.preSynapticRegion and search(preRegion, synapse.preSynapticRegion.name)]
if postRegion:
synapses = [synapse for synapse in synapses if synapse.postSynapticRegion and search(postRegion, synapse.postSynapticRegion.name)]
if activation:
synapses = [synapse for synapse in synapses if synapse.activation and synapse.activation == activation]
return synapses
@classmethod
def _defaultVisualizationParams(cls):
params = NeuroObject._defaultVisualizationParams()
params['shape'] = 'Ball'
params['size'] = (.01, .01, .01)
params['sizeIsAbsolute'] = True
return params
def defaultVisualizationParams(self):
params = self.__class__._defaultVisualizationParams()
if self.region:
params['parent'] = self.region
return params
|
|
from pepnet import SequenceInput, Output, Predictor
from pepnet.sequence_helpers import group_similar_sequences
import numpy as np
from sklearn.model_selection import LeaveOneGroupOut
from keras.callbacks import LearningRateScheduler
from keras.optimizers import RMSprop
from helpers import to_ic50, from_ic50, shuffle_data
from data import (
load_mass_spec_hits,
generate_negatives_from_proteome,
load_pseudosequences)
from callback_auc import CallbackAUC
from seaborn import plt
N_EPOCHS = 10
TRAINING_DECOY_FACTOR = 5
DECOY_WEIGHT_FOR_QUANTITATIVE_ASSAYS = 0.01
TEST_DECOY_FACTOR = 99
MASS_SPEC_OUTPUT_NAME = "neon mass spec"
BATCH_SIZE = 32
INITIAL_LEARNING_RATE = RMSprop().lr.get_value() * 1.25
print(INITIAL_LEARNING_RATE)
LEARNING_DECAY_RATE = 0.9
LOSS = "mse" # "binary_crossentropy"
MERGE = "multiply"
CONV_DROPOUT = 0.25
CONV_BATCH_NORMALIZATION = True
MERGE_DROPOUT = 0
MERGE_BATCH_NORMALIZATION = False
def make_model(output_names):
mhc = SequenceInput(
length=34,
name="mhc",
encoding="index",
variable_length=True,
embedding_dim=32,
embedding_mask_zero=False,
dense_layer_sizes=[32],
dense_activation="tanh",
dense_batch_normalization=MERGE_BATCH_NORMALIZATION,
dense_dropout=MERGE_DROPOUT)
peptide = SequenceInput(
length=45,
name="peptide",
encoding="index",
add_start_tokens=True,
add_stop_tokens=True,
embedding_dim=32,
embedding_mask_zero=True,
variable_length=True,
conv_filter_sizes=[9],
conv_activation="relu",
conv_output_dim=32,
conv_dropout=CONV_DROPOUT,
conv_batch_normalization=CONV_BATCH_NORMALIZATION,
n_conv_layers=2,
# conv_weight_source=mhc,
global_pooling=True,
global_pooling_batch_normalization=True,
global_pooling_dropout=0.25,
dense_layer_sizes=[32],
dense_activation="sigmoid",
dense_batch_normalization=MERGE_BATCH_NORMALIZATION,
dense_dropout=MERGE_DROPOUT)
outputs = []
for output_name in output_names:
if "IC50" in output_name or "EC50" in output_name:
transform = from_ic50
inverse = to_ic50
activation = "sigmoid"
elif "half life" in output_name:
transform = (lambda x: np.log10(x + 1))
inverse = (lambda x: (10.0 ** x) - 1)
activation = "relu"
else:
transform = None
inverse = None
activation = "sigmoid"
output = Output(
name=output_name,
transform=transform,
inverse_transform=inverse,
activation=activation,
loss=LOSS)
print(output)
outputs.append(output)
return Predictor(
inputs=[mhc, peptide],
outputs=outputs,
merge_mode=MERGE,
training_metrics=["accuracy"])
def plot_aucs(test_name, train_aucs, test_aucs):
fig = plt.figure(figsize=(8, 8))
axes = fig.gca()
axes.plot(
np.arange(N_EPOCHS), train_aucs)
axes.plot(
np.arange(N_EPOCHS), test_aucs)
plt.xlabel("epoch")
plt.ylabel("AUC")
plt.xlim(0, 15)
plt.ylim(0.5, 1.0)
plt.legend(["train", "test (%s)" % test_name])
fig.savefig("auc_%s.png" % test_name)
def augment_with_decoys(
hit_peptides,
hit_mhc_alleles,
hit_weights,
decoy_multiple=TRAINING_DECOY_FACTOR):
n_hits = len(hit_peptides)
decoy_peptides = generate_negatives_from_proteome(
hit_peptides, factor=decoy_multiple)
n_decoys = len(decoy_peptides)
assert n_decoys == int(n_hits * decoy_multiple)
decoy_mhc_alleles = list(np.random.choice(hit_mhc_alleles, size=n_decoys))
# Mass spec validation set
mass_spec_peptides = hit_peptides + decoy_peptides
mass_spec_mhc_alleles = hit_mhc_alleles + decoy_mhc_alleles
n_mass_spec = len(mass_spec_peptides)
assert n_mass_spec == n_hits + n_decoys
Y_mass_spec = np.zeros(len(mass_spec_peptides), dtype="int32")
Y_mass_spec[:len(hit_peptides)] = 1
assert Y_mass_spec.sum() == len(hit_peptides)
weights = np.ones(n_mass_spec, dtype="float32")
hits_to_decoys = hit_weights.sum() / float(n_decoys)
weights[:n_hits] = hit_weights
weights[n_hits:] = min(1.0, hits_to_decoys)
return mass_spec_peptides, mass_spec_mhc_alleles, Y_mass_spec, weights
def learning_rate_schedule(epoch):
lr = INITIAL_LEARNING_RATE * LEARNING_DECAY_RATE ** epoch
print("-- setting learning rate for epoch %d to %f" % (epoch, lr))
return lr
def main():
mhc_pseudosequences_dict = load_pseudosequences()
hits_dict = load_mass_spec_hits()
hit_peptides = []
hit_mhc_alleles = []
hit_weights = []
for (allele, peptides) in hits_dict.items():
shuffled_peptides, _, weights = group_similar_sequences(peptides)
assert len(shuffled_peptides) == len(peptides), \
"Exepcted %d peptides but got back %d" % (
len(peptides),
len(shuffled_peptides))
hit_peptides.extend(shuffled_peptides)
hit_mhc_alleles.extend([allele] * len(peptides))
hit_weights.extend(weights)
n_hits = len(hit_peptides)
assert set(hit_mhc_alleles) == set(hits_dict.keys())
hit_weights = np.array(hit_weights)
mass_spec_peptides, mass_spec_mhc_alleles, Y_mass_spec, weights = \
augment_with_decoys(
hit_peptides=hit_peptides,
hit_mhc_alleles=hit_mhc_alleles,
hit_weights=hit_weights)
n_mass_spec = len(mass_spec_peptides)
mass_spec_peptides, mass_spec_mhc_alleles, Y_mass_spec, weights = \
shuffle_data(
peptides=mass_spec_peptides,
alleles=mass_spec_mhc_alleles,
Y=Y_mass_spec,
weights=weights)
# get the pseudosequences for all samples
mass_spec_mhc_seqs = [mhc_pseudosequences_dict[allele] for allele in mass_spec_mhc_alleles]
predictor = make_model(output_names=[MASS_SPEC_OUTPUT_NAME])
predictor.save_diagram()
output_name_list = [o.name for o in predictor.outputs]
output_is_quantitative_dict = {
output_name: any(
[(substr in output_name) for substr in ("IC50", "EC50", "half life")])
for output_name in output_name_list
}
print("Which outputs are quantitative: %s" % (output_is_quantitative_dict,))
# draw more random decoys to do PPV calculations
extra_decoy_peptides_for_ppv = generate_negatives_from_proteome(
hit_peptides, factor=TEST_DECOY_FACTOR - TRAINING_DECOY_FACTOR)
extra_decoy_mhc_seqs_for_ppv = list(
np.random.choice(
mass_spec_mhc_seqs,
size=len(extra_decoy_peptides_for_ppv)))
cv_iterator = LeaveOneGroupOut()
for train_idx, test_idx in cv_iterator.split(
mass_spec_peptides, Y_mass_spec, groups=mass_spec_mhc_alleles):
assert len(train_idx) < n_mass_spec
assert len(test_idx) < n_mass_spec
training_alleles = set([mass_spec_mhc_alleles[i] for i in train_idx])
left_out_alleles = set([mass_spec_mhc_alleles[i] for i in test_idx])
assert len(left_out_alleles) == 1, left_out_alleles
overlapping_alleles = training_alleles.intersection(left_out_alleles)
assert len(overlapping_alleles) == 0, overlapping_alleles
left_out_allele = left_out_alleles.pop()
print("\n\n===> Left out allele: %s" % left_out_allele)
print("===> Training alleles: %s" % (training_alleles,))
train_peptides = [mass_spec_peptides[i] for i in train_idx]
test_peptides = [mass_spec_peptides[i] for i in test_idx]
Y_train = Y_mass_spec[train_idx]
Y_test = Y_mass_spec[test_idx]
train_weights = weights[train_idx]
test_weights = weights[test_idx]
train_mhc_seqs = [mass_spec_mhc_seqs[i] for i in train_idx]
test_mhc_seqs = [mass_spec_mhc_seqs[i] for i in test_idx]
assert len(train_peptides) == len(Y_train) == len(train_weights) == len(train_mhc_seqs)
assert len(test_peptides) == len(Y_test) == len(test_weights) == len(test_mhc_seqs)
train_auc_callback = CallbackAUC(
name="train",
peptides=train_peptides,
mhc_seqs=train_mhc_seqs,
weights=train_weights,
labels=Y_train,
predictor=predictor)
test_auc_callback = CallbackAUC(
name="test",
peptides=test_peptides,
mhc_seqs=test_mhc_seqs,
weights=test_weights,
labels=Y_test,
predictor=predictor)
callbacks = [
train_auc_callback,
test_auc_callback,
LearningRateScheduler(learning_rate_schedule)
]
predictor.fit(
{
"peptide": train_peptides,
"mhc": train_mhc_seqs
},
Y_train,
epochs=N_EPOCHS,
sample_weight=train_weights,
callbacks=callbacks,
batch_size=BATCH_SIZE)
plot_aucs(
test_name=left_out_allele,
train_aucs=train_auc_callback.aucs[MASS_SPEC_OUTPUT_NAME],
test_aucs=test_auc_callback.aucs[MASS_SPEC_OUTPUT_NAME])
combined_peptides_for_ppv = test_peptides + extra_decoy_peptides_for_ppv
combined_mhc_seqs_for_ppv = test_mhc_seqs + extra_decoy_mhc_seqs_for_ppv
Y_combined_for_ppv = np.zeros(len(combined_peptides_for_ppv))
Y_combined_for_ppv[:len(Y_test)] = Y_test
Y_pred_for_ppv_dict = predictor.predict_scores({
"peptide": combined_peptides_for_ppv,
"mhc": combined_mhc_seqs_for_ppv})
for output_name in predictor.output_names:
print("-- %s" % output_name)
Y_pred_for_ppv = Y_pred_for_ppv_dict[output_name]
descending_indices = np.argsort(-Y_pred_for_ppv)
n_hits = Y_test.sum()
ppv = Y_combined_for_ppv[descending_indices[:n_hits]].mean()
print("----> PPV @ %dX decoys for allele=%s %0.4f" % (
TEST_DECOY_FACTOR,
left_out_allele,
ppv))
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
"""Utilities for preprocessing sequence data.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import random
import json
from six.moves import range
import six
def pad_sequences(sequences, maxlen=None, dtype='int32',
padding='pre', truncating='pre', value=0.):
"""Pads sequences to the same length.
This function transforms a list of
`num_samples` sequences (lists of integers)
into a 2D Numpy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence otherwise.
Sequences that are shorter than `num_timesteps`
are padded with `value` at the end.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding is the default.
# Arguments
sequences: List of lists, where each element is a sequence.
maxlen: Int, maximum length of all sequences.
dtype: Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, 'pre' or 'post':
pad either before or after each sequence.
truncating: String, 'pre' or 'post':
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value.
# Returns
x: Numpy array with shape `(len(sequences), maxlen)`
# Raises
ValueError: In case of invalid values for `truncating` or `padding`,
or in case of invalid shape for a `sequences` entry.
"""
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
lengths = []
for x in sequences:
if not hasattr(x, '__len__'):
raise ValueError('`sequences` must be a list of iterables. '
'Found non-iterable: ' + str(x))
lengths.append(len(x))
num_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.unicode_)
if isinstance(value, six.string_types) and dtype != object and not is_dtype_str:
raise ValueError("`dtype` {} is not compatible with `value`'s type: {}\n"
"You should set `dtype=object` for variable length strings."
.format(dtype, type(value)))
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" '
'not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s '
'is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
def make_sampling_table(size, sampling_factor=1e-5):
"""Generates a word rank-based probabilistic sampling table.
Used for generating the `sampling_table` argument for `skipgrams`.
`sampling_table[i]` is the probability of sampling
the word i-th most common word in a dataset
(more common words should be sampled less frequently, for balance).
The sampling probabilities are generated according
to the sampling distribution used in word2vec:
```
p(word) = (min(1, sqrt(word_frequency / sampling_factor) /
(word_frequency / sampling_factor)))
```
We assume that the word frequencies follow Zipf's law (s=1) to derive
a numerical approximation of frequency(rank):
`frequency(rank) ~ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))`
where `gamma` is the Euler-Mascheroni constant.
# Arguments
size: Int, number of possible words to sample.
sampling_factor: The sampling factor in the word2vec formula.
# Returns
A 1D Numpy array of length `size` where the ith entry
is the probability that a word of rank i should be sampled.
"""
gamma = 0.577
rank = np.arange(size)
rank[0] = 1
inv_fq = rank * (np.log(rank) + gamma) + 0.5 - 1. / (12. * rank)
f = sampling_factor * inv_fq
return np.minimum(1., f / np.sqrt(f))
def skipgrams(sequence, vocabulary_size,
window_size=4, negative_samples=1., shuffle=True,
categorical=False, sampling_table=None, seed=None):
"""Generates skipgram word pairs.
This function transforms a sequence of word indexes (list of integers)
into tuples of words of the form:
- (word, word in the same window), with label 1 (positive samples).
- (word, random word from the vocabulary), with label 0 (negative samples).
Read more about Skipgram in this gnomic paper by Mikolov et al.:
[Efficient Estimation of Word Representations in
Vector Space](http://arxiv.org/pdf/1301.3781v3.pdf)
# Arguments
sequence: A word sequence (sentence), encoded as a list
of word indices (integers). If using a `sampling_table`,
word indices are expected to match the rank
of the words in a reference dataset (e.g. 10 would encode
the 10-th most frequently occurring token).
Note that index 0 is expected to be a non-word and will be skipped.
vocabulary_size: Int, maximum possible word index + 1
window_size: Int, size of sampling windows (technically half-window).
The window of a word `w_i` will be
`[i - window_size, i + window_size+1]`.
negative_samples: Float >= 0. 0 for no negative (i.e. random) samples.
1 for same number as positive samples.
shuffle: Whether to shuffle the word couples before returning them.
categorical: bool. if False, labels will be
integers (eg. `[0, 1, 1 .. ]`),
if `True`, labels will be categorical, e.g.
`[[1,0],[0,1],[0,1] .. ]`.
sampling_table: 1D array of size `vocabulary_size` where the entry i
encodes the probability to sample a word of rank i.
seed: Random seed.
# Returns
couples, labels: where `couples` are int pairs and
`labels` are either 0 or 1.
# Note
By convention, index 0 in the vocabulary is
a non-word and will be skipped.
"""
couples = []
labels = []
for i, wi in enumerate(sequence):
if not wi:
continue
if sampling_table is not None:
if sampling_table[wi] < random.random():
continue
window_start = max(0, i - window_size)
window_end = min(len(sequence), i + window_size + 1)
for j in range(window_start, window_end):
if j != i:
wj = sequence[j]
if not wj:
continue
couples.append([wi, wj])
if categorical:
labels.append([0, 1])
else:
labels.append(1)
if negative_samples > 0:
num_negative_samples = int(len(labels) * negative_samples)
words = [c[0] for c in couples]
random.shuffle(words)
couples += [[words[i % len(words)],
random.randint(1, vocabulary_size - 1)]
for i in range(num_negative_samples)]
if categorical:
labels += [[1, 0]] * num_negative_samples
else:
labels += [0] * num_negative_samples
if shuffle:
if seed is None:
seed = random.randint(0, 10e6)
random.seed(seed)
random.shuffle(couples)
random.seed(seed)
random.shuffle(labels)
return couples, labels
def _remove_long_seq(maxlen, seq, label):
"""Removes sequences that exceed the maximum length.
# Arguments
maxlen: Int, maximum length of the output sequences.
seq: List of lists, where each sublist is a sequence.
label: List where each element is an integer.
# Returns
new_seq, new_label: shortened lists for `seq` and `label`.
"""
new_seq, new_label = [], []
for x, y in zip(seq, label):
if len(x) < maxlen:
new_seq.append(x)
new_label.append(y)
return new_seq, new_label
class TimeseriesGenerator(object):
"""Utility class for generating batches of temporal data.
This class takes in a sequence of data-points gathered at
equal intervals, along with time series parameters such as
stride, length of history, etc., to produce batches for
training/validation.
# Arguments
data: Indexable generator (such as list or Numpy array)
containing consecutive data points (timesteps).
The data should be at 2D, and axis 0 is expected
to be the time dimension.
targets: Targets corresponding to timesteps in `data`.
It should have same length as `data`.
length: Length of the output sequences (in number of timesteps).
sampling_rate: Period between successive individual timesteps
within sequences. For rate `r`, timesteps
`data[i]`, `data[i-r]`, ... `data[i - length]`
are used for create a sample sequence.
stride: Period between successive output sequences.
For stride `s`, consecutive output samples would
be centered around `data[i]`, `data[i+s]`, `data[i+2*s]`, etc.
start_index: Data points earlier than `start_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
end_index: Data points later than `end_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
shuffle: Whether to shuffle output samples,
or instead draw them in chronological order.
reverse: Boolean: if `true`, timesteps in each output sample will be
in reverse chronological order.
batch_size: Number of timeseries samples in each batch
(except maybe the last one).
# Returns
A [Sequence](/utils/#sequence) instance.
# Examples
```python
from keras.preprocessing.sequence import TimeseriesGenerator
import numpy as np
data = np.array([[i] for i in range(50)])
targets = np.array([[i] for i in range(50)])
data_gen = TimeseriesGenerator(data, targets,
length=10, sampling_rate=2,
batch_size=2)
assert len(data_gen) == 20
batch_0 = data_gen[0]
x, y = batch_0
assert np.array_equal(x,
np.array([[[0], [2], [4], [6], [8]],
[[1], [3], [5], [7], [9]]]))
assert np.array_equal(y,
np.array([[10], [11]]))
```
"""
def __init__(self, data, targets, length,
sampling_rate=1,
stride=1,
start_index=0,
end_index=None,
shuffle=False,
reverse=False,
batch_size=128):
if len(data) != len(targets):
raise ValueError('Data and targets have to be' +
' of same length. '
'Data length is {}'.format(len(data)) +
' while target length is {}'.format(len(targets)))
self.data = data
self.targets = targets
self.length = length
self.sampling_rate = sampling_rate
self.stride = stride
self.start_index = start_index + length
if end_index is None:
end_index = len(data) - 1
self.end_index = end_index
self.shuffle = shuffle
self.reverse = reverse
self.batch_size = batch_size
if self.start_index > self.end_index:
raise ValueError('`start_index+length=%i > end_index=%i` '
'is disallowed, as no part of the sequence '
'would be left to be used as current step.'
% (self.start_index, self.end_index))
def __len__(self):
return (self.end_index - self.start_index +
self.batch_size * self.stride) // (self.batch_size * self.stride)
def _empty_batch(self, num_rows):
samples_shape = [num_rows, self.length // self.sampling_rate]
samples_shape.extend(self.data.shape[1:])
targets_shape = [num_rows]
targets_shape.extend(self.targets.shape[1:])
return np.empty(samples_shape), np.empty(targets_shape)
def __getitem__(self, index):
if self.shuffle:
rows = np.random.randint(
self.start_index, self.end_index + 1, size=self.batch_size)
else:
i = self.start_index + self.batch_size * self.stride * index
rows = np.arange(i, min(i + self.batch_size *
self.stride, self.end_index + 1), self.stride)
samples, targets = self._empty_batch(len(rows))
for j, row in enumerate(rows):
indices = range(rows[j] - self.length, rows[j], self.sampling_rate)
samples[j] = self.data[indices]
targets[j] = self.targets[rows[j]]
if self.reverse:
return samples[:, ::-1, ...], targets
return samples, targets
def get_config(self):
'''Returns the TimeseriesGenerator configuration as Python dictionary.
# Returns
A Python dictionary with the TimeseriesGenerator configuration.
'''
data = self.data
if type(self.data).__module__ == np.__name__:
data = self.data.tolist()
try:
json_data = json.dumps(data)
except:
raise TypeError('Data not JSON Serializable:', data)
targets = self.targets
if type(self.targets).__module__ == np.__name__:
targets = self.targets.tolist()
try:
json_targets = json.dumps(targets)
except:
raise TypeError('Targets not JSON Serializable:', targets)
return {
'data': json_data,
'targets': json_targets,
'length': self.length,
'sampling_rate': self.sampling_rate,
'stride': self.stride,
'start_index': self.start_index,
'end_index': self.end_index,
'shuffle': self.shuffle,
'reverse': self.reverse,
'batch_size': self.batch_size
}
def to_json(self, **kwargs):
"""Returns a JSON string containing the timeseries generator
configuration. To load a generator from a JSON string, use
`keras.preprocessing.sequence.timeseries_generator_from_json(json_string)`.
# Arguments
**kwargs: Additional keyword arguments
to be passed to `json.dumps()`.
# Returns
A JSON string containing the tokenizer configuration.
"""
config = self.get_config()
timeseries_generator_config = {
'class_name': self.__class__.__name__,
'config': config
}
return json.dumps(timeseries_generator_config, **kwargs)
def timeseries_generator_from_json(json_string):
"""Parses a JSON timeseries generator configuration file and
returns a timeseries generator instance.
# Arguments
json_string: JSON string encoding a timeseries
generator configuration.
# Returns
A Keras TimeseriesGenerator instance
"""
full_config = json.loads(json_string)
config = full_config.get('config')
data = json.loads(config.pop('data'))
config['data'] = data
targets = json.loads(config.pop('targets'))
config['targets'] = targets
return TimeseriesGenerator(**config)
|
|
from . import cas
from .alarm import Severity, Alarm
import collections
import operator
import threading
import time
import sys
import logging
if sys.hexversion >= 0x02070000:
from logging import NullHandler
else:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger('pcaspy').addHandler(NullHandler())
class Manager(object):
pvs = {} #: PV dict using port name as key and {pv base name: pv instance} as value
pvf = {} #: PV dict using PV full name as key
driver = {} #: Driver dict
# Yes, this is a global instance
manager = Manager()
# decorator to register driver
def registerDriver(driver_init_func):
def wrap(*args, **kargs):
driver_instance = args[0]
port = driver_instance.port
driver_init_func(*args, **kargs)
manager.driver[port] = driver_instance
return wrap
# Driver metaclass to decorate subclass.__init__ to
# register subclass object
class DriverType(type):
def __init__(cls, name, bases, dct):
if name != 'Driver':
cls.__init__ = registerDriver(cls.__init__)
type.__init__(cls, name, bases, dct)
class Data(object):
def __init__(self):
self.value = 0
self.flag = False
self.severity = Severity.INVALID_ALARM
self.alarm = Alarm.UDF_ALARM
self.udf = True
self.mask = 0
self.time = cas.epicsTimeStamp()
def __repr__(self):
return "value=%s alarm=%s severity=%s flag=%s mask=%s time=%s" % \
(self.value, Alarm.nameOf(self.alarm), Severity.nameOf(self.severity), self.flag, self.mask, self.time)
# Define empty DriverBase using metaclass syntax compatible with both Python 2 and Python 3
DriverBase = DriverType(str('DriverBase'), (), {
'__doc__': 'Driver base class'
})
class Driver(DriverBase):
"""
This class reacts to PV's read/write requests. The default behavior is to accept any value of a write request
and return it to a read request, an echo alike.
To specify the behavior, override methods :meth:`read` and :meth:`write` in a derived class.
"""
port = 'default'
def __init__(self):
"""
Initialize parameters database. This method must be called by subclasses in the first place.
"""
self.pvDB = {}
# init pvData with pv instance
for reason, pv in manager.pvs[self.port].items():
data = Data()
data.value = pv.info.value
self.pvDB[reason] = data
def read(self, reason):
"""
Read PV current value
:param str reason: PV base name
:return: PV current value
This method is invoked by server library when clients issue read access to a PV.
By default it returns the value stored in the parameter library by calling :meth:`getParam`.
The derived class might leave this method untouched and update the PV values from
a separate polling thread. See :ref:`shell-command-example`, :ref:`simscope-example`.
.. note:: This method is called by the server library main thread. Time consuming tasks
should not be performed here. It is suggested to work in an auxiliary thread.
"""
return self.getParam(reason)
def write(self, reason, value):
"""
Write PV new value
:param str reason: PV base name
:param value: PV new value
:return: True if the new value is accepted, False if rejected.
This method is invoked by server library when clients write to a PV.
By default it stores the value in the parameter library by calling :meth:`setParam`.
.. note:: This method is called by the server library main thread. Time consuming tasks
should not be performed here. It is suggested to work in an auxiliary thread.
"""
self.setParam(reason, value)
return True
def setParam(self, reason, value):
"""set PV value and request update
:param str reason: PV base name
:param value: PV new value
Store the PV's new value if it is indeed different from the old.
For list and numpy array, a copy will be made.
This new value will be pushed to registered client the next time when :meth:`updatePVs` is called.
The timestamp will be updated to the current time anyway.
Alarm and severity status are updated as well. For numeric type, the alarm/severity is determined as the
following:
======================== ============ ============
value alarm severity
======================== ============ ============
value < *lolo* LOLO_ALARM MAJOR_ALARM
*lolo* < value < *low* LOW_ALARM MINOR_ALARM
*low* < value < *high* NO_ALARM NO_ALARM
*high* < value < *hihi* HIGH_ALARM MINOR_ALARM
value > *hihi* HIHI_ALARM MAJOR_ALARM
======================== ============ ============
For enumerate type, the alarm severity is defined by field *states*. And if severity is other than NO_ALARM,
the alarm status is STATE_ALARM.
"""
# make a copy of mutable objects, list, numpy.ndarray
if isinstance(value, list):
value = value[:]
elif 'numpy.ndarray' in str(type(value)):
value = value.copy()
# check whether value update is needed
pv = manager.pvs[self.port][reason]
self.pvDB[reason].mask |= pv.info.checkValue(value)
self.pvDB[reason].value = value
self.pvDB[reason].time = cas.epicsTimeStamp()
if self.pvDB[reason].mask:
self.pvDB[reason].flag = True
# check whether alarm/severity update is needed
alarm, severity = pv.info.checkAlarm(value)
self.setParamStatus(reason, alarm, severity)
logging.getLogger('pcaspy.Driver.setParam')\
.debug('%s: %s', reason, self.pvDB[reason])
def setParamStatus(self, reason, alarm=None, severity=None):
"""set PV status and severity and request update
:param str reason: PV base name
:param alarm: alarm state
:param severity: severity state
The PVs' alarm status and severity are automatically set in :meth:`setParam`.
If the status and severity need to be set explicitly to override the defaults, :meth:`setParamStatus` must
be called *after* :meth:`setParam`.
The new alarm status/severity will be pushed to registered clients the next time when :meth:`updatePVs` is called.
"""
if alarm is not None and self.pvDB[reason].alarm != alarm:
self.pvDB[reason].alarm = alarm
self.pvDB[reason].mask |= cas.DBE_ALARM
self.pvDB[reason].flag = True
if severity is not None and self.pvDB[reason].severity != severity:
self.pvDB[reason].severity = severity
self.pvDB[reason].mask |= cas.DBE_ALARM
self.pvDB[reason].flag = True
def setParamEnums(self, reason, enums, states=None):
""" set PV enumerate strings and severity states
:param str reason: PV base name
:param list enums: string representation of the enumerate states
:param list states: alarm severity of the enumerate states.
The number of elements in *states* must match that of *enums*.
If *None* is given, the list is populated with *Severity.NO_ALARM*.
The new enumerate strings will be pushed to registered clients the next time when :meth:`updatePVs` is called.
.. note:: The monitoring client needs to use *DBR_GR_XXX* or *DBR_CTRL_XXX* request type and *DBE_PROPERTY*
event mask when issuing the subscription. This requires EPICS base 3.14.12.6+.
"""
if states is None:
states = [Alarm.NO_ALARM] * len(enums)
if len(enums) != len(states):
raise ValueError('enums and states must have the same length')
pv = manager.pvs[self.port][reason]
if pv.info.enums != enums:
pv.info.enums = enums
pv.info.states = states
self.pvDB[reason].mask |= cas.DBE_PROPERTY
self.pvDB[reason].flag = True
def setParamInfo(self, reason, info):
"""
set PV meta info, limits, precision, limits, units.
:param str reason: PV base name
:param dict info: information dictionary, same as used in :meth:`SimpleServer.createPV`.
The new meta information will be pushed to registered clients the next time when :meth:`updatePVs` is called.
.. note:: The monitoring client needs to use *DBR_GR_XXX* or *DBR_CTRL_XXX* request type and *DBE_PROPERTY*
event mask when issuing the subscription. This requires EPICS base 3.14.12.6+.
"""
# copy new information
pv = manager.pvs[self.port][reason]
for k, v in info.items():
if hasattr(pv.info, k):
setattr(pv.info, k, v)
pv.info.validateLimit()
# recheck alarm
alarm, severity = pv.info.checkAlarm(self.pvDB[reason].value)
self.setParamStatus(reason, alarm, severity)
# mark event mask and flag
self.pvDB[reason].mask |= cas.DBE_PROPERTY
self.pvDB[reason].flag = True
def getParam(self, reason):
"""retrieve PV value
:param str reason: PV base name
:return: PV current value
"""
return self.pvDB[reason].value
def getParamDB(self, reason):
"""
Return the PV data information
:param str reason: PV base name
:return: PV current data information
:rtype: :class:`Data`
"""
return self.pvDB[reason]
def getParamInfo(self, reason, info_keys=None):
"""
Get PV info fields. This function returns a dictionary with info/value pairs,
where each entry of the info_keys-parameter results in a dictionary entry if
the PVInfo-object has such an attribute. Attributes that do not exist are ignored.
Valid attributes are the same as used in :meth:`SimpleServer.createPV`.
If no info_keys are specified, all PV info keys are returned.
:param str reason: PV base name
:param list info_keys: List of keys for what information to obtain
:return: Dictionary with PV info fields and their current values
:rtype: dict
"""
pv = manager.pvs[self.port][reason]
if info_keys is None:
info_keys = ['states', 'prec', 'unit', 'lolim', 'hilim',
'hihi', 'lolo', 'high', 'low', 'scan', 'asyn', 'adel', 'mdel',
'asg', 'port', 'enums', 'count', 'type', 'value']
info_dict = {}
for key in info_keys:
if hasattr(pv.info, key):
info_dict[key] = getattr(pv.info, key)
return info_dict
def callbackPV(self, reason):
"""Inform asynchronous write completion
:param str reason: PV base name
"""
pv = manager.pvs[self.port][reason]
if pv.info.asyn:
pv.endAsyncWrite(cas.S_casApp_success)
def updatePVs(self):
"""Post update events on all PVs with value, alarm status or metadata changed"""
for reason in self.pvDB:
self.updatePV(reason)
def updatePV(self, reason):
"""Post update event on the PV if value, alarm status or metadata changes
:param str reason: PV base name
"""
pv = manager.pvs[self.port][reason]
if self.pvDB[reason].flag and pv.info.scan == 0:
self.pvDB[reason].flag = False
pv.updateValue(self.pvDB[reason])
self.pvDB[reason].mask = 0
# map aitType to string representation
_ait_d = {'enum': cas.aitEnumEnum16,
'str': cas.aitEnumString,
'string': cas.aitEnumString,
'float': cas.aitEnumFloat64,
'int': cas.aitEnumInt32,
'short': cas.aitEnumInt16,
'char': cas.aitEnumUint8,
}
# map aitType to gddAppType_dbr_ctrl_xxx
_dbr_d = {
cas.aitEnumUint8: 32,
cas.aitEnumInt16: 29,
cas.aitEnumInt32: 33,
cas.aitEnumFloat64: 34,
}
class PVInfo(object):
def __init__(self, info):
# initialize from info dict with defaults
self.count = info.get('count', 1)
self.type = _ait_d[info.get('type', 'float')]
# check the number of enum states and
# the state string do not exceed the maximum
enums = info.get('enums', [])
if len(enums) > cas.MAX_ENUM_STATES:
sys.stderr.write('enums exceeds the maximum allowed states %d\n' % cas.MAX_ENUM_STATES)
enums = enums[:cas.MAX_ENUM_STATES]
self.enums = []
for enum in enums:
if len(enum) >= cas.MAX_ENUM_STRING_SIZE:
sys.stderr.write('enums state "%s" exceeds the maximum length %d\n'
% (enum, cas.MAX_ENUM_STRING_SIZE-1))
enum = enum[:cas.MAX_ENUM_STRING_SIZE-1]
self.enums.append(enum)
self.states = info.get('states', [])
# initialize enum severity states if not specified
if not self.states:
self.states = len(self.enums) * [Severity.NO_ALARM]
self.prec = info.get('prec', 0.0)
self.unit = info.get('unit', '')
self.lolim = info.get('lolim', 0.0)
self.hilim = info.get('hilim', 0.0)
self.hihi = info.get('hihi', 0.0)
self.lolo = info.get('lolo', 0.0)
self.high = info.get('high', 0.0)
self.low = info.get('low', 0.0)
self.adel = info.get('adel', 0.0)
self.mdel = info.get('mdel', 0.0)
self.scan = info.get('scan', 0)
self.asyn = info.get('asyn', False)
self.asg = info.get('asg', '')
self.reason = ''
self.port = info.get('port', 'default')
# validate alarm limit
self.valid_low_high = False
self.valid_lolo_hihi = False
self.validateLimit()
# initialize value based on type and count
if self.type in [cas.aitEnumString, cas.aitEnumFixedString, cas.aitEnumUint8]:
value = ''
else:
value = 0
if self.count > 1 and self.type is not cas.aitEnumUint8:
value = [value] * self.count
self.value = info.get('value', value)
# initialize last monitor/archive value
self.mlst = self.value
self.alst = self.value
def validateLimit(self):
# validate alarm limit
if self.lolo >= self.hihi:
self.valid_lolo_hihi = False
else:
self.valid_lolo_hihi = True
if self.low >= self.high:
self.valid_low_high = False
else:
self.valid_low_high = True
def checkValue(self, newValue):
"""Check value change event"""
mask = 0
# array type always gets notified
if self.count > 1:
mask = (cas.DBE_LOG | cas.DBE_VALUE)
# string type's equality is checked
elif self.type in [cas.aitEnumString, cas.aitEnumFixedString]:
if self.mlst != newValue:
mask |= cas.DBE_VALUE
self.mlst = newValue
if self.alst != newValue:
mask |= cas.DBE_LOG
self.alst = newValue
# scalar numeric type is checked against archive and monitor deadband
else:
if abs(self.mlst - newValue) > self.mdel:
mask |= cas.DBE_VALUE
self.mlst = newValue
if abs(self.alst - newValue) > self.adel:
mask |= cas.DBE_LOG
self.alst = newValue
return mask
def checkAlarm(self, value):
if self.type == cas.aitEnumEnum16:
return self._checkEnumAlarm(value)
elif self.type in [cas.aitEnumFloat64, cas.aitEnumInt32, cas.aitEnumInt16]:
return self._checkNumericAlarm(value)
elif self.type in [cas.aitEnumString, cas.aitEnumFixedString, cas.aitEnumUint8]:
return Alarm.NO_ALARM, Severity.NO_ALARM
else:
return None, None
def _checkNumericAlarm(self, value):
severity = Severity.NO_ALARM
alarm = Alarm.NO_ALARM
if self.valid_low_high:
if self._compareNumeric(value, self.low, operator.le):
alarm = Alarm.LOW_ALARM
severity = Severity.MINOR_ALARM
elif self._compareNumeric(value, self.high, operator.ge):
alarm = Alarm.HIGH_ALARM
severity = Severity.MINOR_ALARM
if self.valid_lolo_hihi:
if self._compareNumeric(value, self.lolo, operator.le):
alarm = Alarm.LOLO_ALARM
severity = Severity.MAJOR_ALARM
elif self._compareNumeric(value, self.hihi, operator.ge):
alarm = Alarm.HIHI_ALARM
severity = Severity.MAJOR_ALARM
return alarm, severity
def _checkEnumAlarm(self, value):
if 0 <= value < len(self.states):
severity = self.states[value]
if severity == Severity.NO_ALARM:
alarm = Alarm.NO_ALARM
else:
alarm = Alarm.STATE_ALARM
else:
severity = Severity.MAJOR_ALARM
alarm = Alarm.STATE_ALARM
return alarm, severity
def _compareNumeric(self, value, limit, op):
"""
Compare value and limit with comparison operator.
:param value: numeric scalar or sequence
:param limit: numeric scalar
:param op: comparision operators, le, ge etc
"""
if isinstance(value, collections.Iterable):
return any(op(v, limit) for v in value)
else:
return op(value, limit)
class SimplePV(cas.casPV):
"""
This class represent the PV entity and its associated attributes.
It is to be created by server application on startup.
It derives from :cpp:class:`PV` and implements the virtual methods.
.. note:: This is considered an internal class and should not be referenced by module users.
"""
def __init__(self, name, info):
cas.casPV.__init__(self)
self.name = name
self.info = info
self.interest = False
if info.asg:
self.setAccessSecurityGroup(info.asg)
# scan thread
if self.info.scan > 0:
self.tid = threading.Thread(target=self.scan)
self.tid.setDaemon(True)
self.tid.start()
def scan(self):
while True:
driver = manager.driver.get(self.info.port)
if driver:
# read value from driver and write to driver's param database
newValue = driver.read(self.info.reason)
driver.setParam(self.info.reason, newValue)
# post update events if necessary
dbValue = driver.getParamDB(self.info.reason)
if dbValue.flag:
dbValue.flag = False
self.updateValue(dbValue)
dbValue.mask = 0
time.sleep(self.info.scan)
def interestRegister(self):
self.interest = True
return cas.S_casApp_success
def interestDelete(self):
self.interest = False
def writeValue(self, gddValue):
# get driver object
driver = manager.driver.get(self.info.port)
if not driver:
logging.getLogger('pcaspy.SimplePV.writeValue').\
warning('%s: No driver is registered for port %s', self.info.reason, self.info.port)
return cas.S_casApp_undefined
# call out driver support
success = driver.write(self.info.reason, gddValue.get())
if success is False:
logging.getLogger('pcaspy.SimplePV.writeValue').\
warning('%s: Driver rejects value %s', self.info.reason, gddValue.get())
driver.setParamStatus(self.info.reason, Alarm.WRITE_ALARM, Severity.INVALID_ALARM)
driver.updatePV(self.info.reason)
return success
def write(self, context, value):
# delegate asynchronous write to python writeNotify method
# only if writeNotify not present in C++ library
if not cas.EPICS_HAS_WRITENOTIFY and self.info.asyn:
return self.writeNotify(context, value)
else:
self.writeValue(value)
return cas.S_casApp_success
def writeNotify(self, context, value):
# postpone request if one already in process
if self.hasAsyncWrite():
return cas.S_casApp_postponeAsyncIO
# do asynchronous only if PV supports
if self.info.asyn:
# register async write io
self.startAsyncWrite(context)
# call out driver
success = self.writeValue(value)
# if not successful, clean the async write io
# pass status S_cas_success instead of cas.S_casApp_canceledAsyncIO
# so that client wont see error message.
if not success:
self.endAsyncWrite(cas.S_cas_success)
# server library expects status S_casApp_asynCompletion if async write io has been initiated.
return cas.S_casApp_asyncCompletion
else:
# call out driver
success = self.writeValue(value)
return cas.S_casApp_success
def updateValue(self, dbValue):
if not self.interest:
return
gddValue = cas.gdd(16, self.info.type) # gddAppType_value
if self.info.count > 1:
gddValue.setDimension(1)
gddValue.setBound(0, 0, self.info.count)
gddValue.put(dbValue.value)
gddValue.setTimeStamp(dbValue.time)
gddValue.setStatSevr(dbValue.alarm, dbValue.severity)
if self.info.type == cas.aitEnumEnum16:
gddCtrl = cas.gdd.createDD(31) # gddAppType_dbr_ctrl_enum
gddCtrl[1].put(gddValue)
gddCtrl[2].put(self.info.enums)
elif self.info.type == cas.aitEnumString: # string type has no control info
gddCtrl = gddValue
else:
gddCtrl = cas.gdd.createDD(_dbr_d[self.info.type]) # gddAppType_dbr_ctrl_xxx
gddCtrl[1].put(self.info.unit)
gddCtrl[2].put(self.info.low)
gddCtrl[3].put(self.info.high)
gddCtrl[4].put(self.info.lolo)
gddCtrl[5].put(self.info.hihi)
gddCtrl[6].put(self.info.lolim)
gddCtrl[7].put(self.info.hilim)
gddCtrl[8].put(self.info.lolim)
gddCtrl[9].put(self.info.hilim)
if self.info.type == cas.aitEnumFloat64:
gddCtrl[10].put(self.info.prec)
gddCtrl[11].put(gddValue)
else:
gddCtrl[10].put(gddValue)
self.postEvent(dbValue.mask, gddCtrl)
def getValue(self, value):
# get driver object
driver = manager.driver.get(self.info.port)
if not driver:
logging.getLogger('pcaspy.SimplePV.getValue')\
.warning('%s: No driver is registered for port %s', self.info.reason, self.info.port)
return cas.S_casApp_undefined
# set gdd type if necessary
if value.primitiveType() == cas.aitEnumInvalid:
value.setPrimType(self.info.type)
# set gdd value
if self.info.scan > 0:
newValue = driver.getParam(self.info.reason)
else:
newValue = driver.read(self.info.reason)
if newValue is None:
logging.getLogger('pcaspy.SimplePV.getValue')\
.warning('%s: Driver returns None', self.info.reason)
return cas.S_casApp_undefined
logging.getLogger('pcaspy.SimplePV.getValue')\
.debug('%s: Read value %s', self.info.reason, newValue)
value.put(newValue)
# set gdd info
dbValue = driver.getParamDB(self.info.reason)
value.setStatSevr(dbValue.alarm, dbValue.severity)
value.setTimeStamp(dbValue.time)
return cas.S_casApp_success
def getPrecision(self, prec):
prec.put(self.info.prec)
return cas.S_casApp_success
def getUnits(self, unit):
unit.put(self.info.unit)
return cas.S_casApp_success
def getEnums(self, enums):
if self.info.enums:
enums.put(self.info.enums)
return cas.S_casApp_success
def getHighLimit(self, hilim):
hilim.put(self.info.hilim)
return cas.S_casApp_success
def getLowLimit(self, lolim):
lolim.put(self.info.lolim)
return cas.S_casApp_success
def getHighAlarmLimit(self, hilim):
hilim.put(self.info.hihi)
return cas.S_casApp_success
def getLowAlarmLimit(self, lolim):
lolim.put(self.info.lolo)
return cas.S_casApp_success
def getHighWarnLimit(self, hilim):
hilim.put(self.info.high)
return cas.S_casApp_success
def getLowWarnLimit(self, lolim):
lolim.put(self.info.low)
return cas.S_casApp_success
def bestExternalType(self):
return self.info.type
def maxDimension(self):
if self.info.count > 1:
return 1
else:
return 0
def maxBound(self, dims):
if dims == 0:
return self.info.count
else:
return 0
def getName(self):
return self.name
class SimpleServer(cas.caServer):
"""
This class encapsulates transactions performed by channel access server.
It stands between the channel access client and the driver object.
It answers the basic channel access discover requests and forwards the
read/write requests to driver object.
It derives from :cpp:class:`caServer`. In addition to implement the virtual methods,
it adds method :meth:`createPV` to create the PVs and :meth:`process` to process server requests.
::
server = SimpleServer()
server.createPV(prefix, pvdb)
while True:
server.process(0.1)
"""
def __init__(self):
cas.caServer.__init__(self)
def __del__(self):
cas.asCaStop()
def pvExistTest(self, context, addr, fullname):
if fullname in manager.pvf:
return cas.pverExistsHere
else:
return cas.pverDoesNotExistHere
def pvAttach(self, context, fullname):
return manager.pvf.get(fullname, cas.S_casApp_pvNotFound)
@staticmethod
def createPV(prefix, pvdb):
"""
Create PV based on prefix and database definition pvdb
:param str prefix: Name prefixing the *base_name* defined in *pvdb*
:param dict pvdb: PV database configuration
pvdb is a Python *dict* assuming the following format,
::
pvdb = {
'base_name' : {
'field_name' : value,
},
}
The base_name is unique and will be prefixed to create PV full name.
This PV configuration is expressed again in a dict. The *field_name*
is used to configure the PV properties.
.. _database-field-definition:
.. table:: Database Field Definition
======== ======= ===============================================
Field Default Description
======== ======= ===============================================
type 'float' PV data type. enum, string, char, float or int
count 1 Number of elements
enums [] String representations of the enumerate states
states [] Severity values of the enumerate states.
Any of the following, Severity.NO_ALARM, Severity.MINOR_ALARM,
Severity.MAJOR_ALARM, Severity.INVALID_ALARM
prec 0 Data precision
unit '' Physical meaning of data
lolim 0 Data low limit for graphics display
hilim 0 Data high limit for graphics display
low 0 Data low limit for alarm
high 0 Data high limit for alarm
lolo 0 Data low low limit for alarm
hihi 0 Data high high limit for alarm
adel 0 Archive deadband
mdel 0 Monitor, value change deadband
scan 0 Scan period in second. 0 means passive
asyn False Process finishes asynchronously if True
asg '' Access security group name
value 0 or '' Data initial value
======== ======= ===============================================
The data type supported has been greatly reduced from C++ PCAS to match Python native types.
Numeric types are 'float' and 'int', corresponding to DBF_DOUBLE and DBF_LONG of EPICS IOC.
The display limits are defined by *lolim* abd *hilim*.
The alarm limits are defined by *low*, *high*, *lolo*, *hihi*.
Fixed width string, 40 characters as of EPICS 3.14, is of type 'string'.
Long string is supported using 'char' type and specify the *count* field large enough.
'enum' type defines a list of choices by *enums* field, and optional associated severity by *states*.
The *adel* and *mdel* fields specify the deadbands to trigger an archive and value change event respectively.
*asyn* if set to be True. Any channel access client write with callback option, i.e. calling `ca_put_callback`,
will be noticed only when :meth:`Driver.callbackPV` being called.
"""
for basename, pvinfo in pvdb.items():
pvinfo = PVInfo(pvinfo)
pvinfo.reason = basename
pvinfo.name = prefix + basename
pv = SimplePV(pvinfo.name, pvinfo)
manager.pvf[pvinfo.name] = pv
if pvinfo.port not in manager.pvs:
manager.pvs[pvinfo.port] = {}
manager.pvs[pvinfo.port][basename] = pv
@staticmethod
def initAccessSecurityFile(filename, **subst):
"""
Load access security configuration file
:param str filename: Name of the access security configuration file
:param subst: Substitute macros specified by keyword arguments
.. note::
This must be called before :meth:`createPV`.
"""
macro = ','.join(['%s=%s' % (k, v) for k, v in subst.items()])
cas.asInitFile(filename, macro)
cas.asCaStart()
@staticmethod
def process(delay):
"""
Process server transactions.
:param float delay: Processing time in second
This method should be called so frequent so that the incoming channel access
requests are answered in time. Normally called in the loop::
server = SimpleServer()
...
while True:
server.process(0.1)
"""
cas.process(delay)
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import typing
from gapic.schema import metadata
from gapic.schema import naming
from gapic.schema import wrappers
from google.api import annotations_pb2, routing_pb2
from google.api import client_pb2
from google.api import http_pb2
from google.protobuf import descriptor_pb2 as desc
def make_service(
name: str = "Placeholder",
host: str = "",
methods: typing.Tuple[wrappers.Method] = (),
scopes: typing.Tuple[str] = (),
visible_resources: typing.Optional[
typing.Mapping[str, wrappers.CommonResource]
] = None,
) -> wrappers.Service:
visible_resources = visible_resources or {}
# Define a service descriptor, and set a host and oauth scopes if
# appropriate.
service_pb = desc.ServiceDescriptorProto(name=name)
if host:
service_pb.options.Extensions[client_pb2.default_host] = host
service_pb.options.Extensions[client_pb2.oauth_scopes] = ','.join(scopes)
# Return a service object to test.
return wrappers.Service(
service_pb=service_pb,
methods={m.name: m for m in methods},
visible_resources=visible_resources,
)
# FIXME (lukesneeringer): This test method is convoluted and it makes these
# tests difficult to understand and maintain.
def make_service_with_method_options(
*,
http_rule: http_pb2.HttpRule = None,
method_signature: str = '',
in_fields: typing.Tuple[desc.FieldDescriptorProto] = (),
visible_resources: typing.Optional[typing.Mapping[str, wrappers.CommonResource]] = None,
) -> wrappers.Service:
# Declare a method with options enabled for long-running operations and
# field headers.
method = get_method(
'DoBigThing',
'foo.bar.ThingRequest',
'google.longrunning.operations_pb2.Operation',
lro_response_type='foo.baz.ThingResponse',
lro_metadata_type='foo.qux.ThingMetadata',
in_fields=in_fields,
http_rule=http_rule,
method_signature=method_signature,
)
# Define a service descriptor.
service_pb = desc.ServiceDescriptorProto(name='ThingDoer')
# Return a service object to test.
return wrappers.Service(
service_pb=service_pb,
methods={method.name: method},
visible_resources=visible_resources or {},
)
def get_method(name: str,
in_type: str,
out_type: str,
lro_response_type: str = '',
lro_metadata_type: str = '', *,
in_fields: typing.Tuple[desc.FieldDescriptorProto] = (),
http_rule: http_pb2.HttpRule = None,
method_signature: str = '',
) -> wrappers.Method:
input_ = get_message(in_type, fields=in_fields)
output = get_message(out_type)
lro = None
# Define a method descriptor. Set the field headers if appropriate.
method_pb = desc.MethodDescriptorProto(
name=name,
input_type=input_.ident.proto,
output_type=output.ident.proto,
)
if lro_response_type:
lro = wrappers.OperationInfo(
response_type=get_message(lro_response_type),
metadata_type=get_message(lro_metadata_type),
)
if http_rule:
ext_key = annotations_pb2.http
method_pb.options.Extensions[ext_key].MergeFrom(http_rule)
if method_signature:
ext_key = client_pb2.method_signature
method_pb.options.Extensions[ext_key].append(method_signature)
return wrappers.Method(
method_pb=method_pb,
input=input_,
output=output,
lro=lro,
meta=input_.meta,
)
def get_message(dot_path: str, *,
fields: typing.Tuple[desc.FieldDescriptorProto] = (),
) -> wrappers.MessageType:
# Pass explicit None through (for lro_metadata).
if dot_path is None:
return None
# Note: The `dot_path` here is distinct from the canonical proto path
# because it includes the module, which the proto path does not.
#
# So, if trying to test the DescriptorProto message here, the path
# would be google.protobuf.descriptor.DescriptorProto (whereas the proto
# path is just google.protobuf.DescriptorProto).
pieces = dot_path.split('.')
pkg, module, name = pieces[:-2], pieces[-2], pieces[-1]
return wrappers.MessageType(
fields={i.name: wrappers.Field(
field_pb=i,
enum=get_enum(i.type_name) if i.type_name else None,
) for i in fields},
nested_messages={},
nested_enums={},
message_pb=desc.DescriptorProto(name=name, field=fields),
meta=metadata.Metadata(address=metadata.Address(
name=name,
package=tuple(pkg),
module=module,
)),
)
def make_method(
name: str,
input_message: wrappers.MessageType = None,
output_message: wrappers.MessageType = None,
package: typing.Union[typing.Tuple[str], str] = 'foo.bar.v1',
module: str = 'baz',
http_rule: http_pb2.HttpRule = None,
signatures: typing.Sequence[str] = (),
is_deprecated: bool = False,
routing_rule: routing_pb2.RoutingRule = None,
**kwargs
) -> wrappers.Method:
# Use default input and output messages if they are not provided.
input_message = input_message or make_message('MethodInput')
output_message = output_message or make_message('MethodOutput')
# Create the method pb2.
method_pb = desc.MethodDescriptorProto(
name=name,
input_type=str(input_message.meta.address),
output_type=str(output_message.meta.address),
**kwargs
)
if routing_rule:
ext_key = routing_pb2.routing
method_pb.options.Extensions[ext_key].MergeFrom(routing_rule)
# If there is an HTTP rule, process it.
if http_rule:
ext_key = annotations_pb2.http
method_pb.options.Extensions[ext_key].MergeFrom(http_rule)
# If there are signatures, include them.
for sig in signatures:
ext_key = client_pb2.method_signature
method_pb.options.Extensions[ext_key].append(sig)
if isinstance(package, str):
package = tuple(package.split('.'))
if is_deprecated:
method_pb.options.deprecated = True
# Instantiate the wrapper class.
return wrappers.Method(
method_pb=method_pb,
input=input_message,
output=output_message,
meta=metadata.Metadata(address=metadata.Address(
name=name,
package=package,
module=module,
parent=(f'{name}Service',),
)),
)
def make_field(
name: str = 'my_field',
number: int = 1,
repeated: bool = False,
message: wrappers.MessageType = None,
enum: wrappers.EnumType = None,
meta: metadata.Metadata = None,
oneof: str = None,
**kwargs
) -> wrappers.Field:
T = desc.FieldDescriptorProto.Type
if message:
kwargs.setdefault('type_name', str(message.meta.address))
kwargs['type'] = 'TYPE_MESSAGE'
elif enum:
kwargs.setdefault('type_name', str(enum.meta.address))
kwargs['type'] = 'TYPE_ENUM'
else:
kwargs.setdefault('type', T.Value('TYPE_BOOL'))
if isinstance(kwargs['type'], str):
kwargs['type'] = T.Value(kwargs['type'])
label = kwargs.pop('label', 3 if repeated else 1)
field_pb = desc.FieldDescriptorProto(
name=name,
label=label,
number=number,
**kwargs
)
return wrappers.Field(
field_pb=field_pb,
enum=enum,
message=message,
meta=meta or metadata.Metadata(),
oneof=oneof,
)
def make_message(
name: str,
package: str = 'foo.bar.v1',
module: str = 'baz',
fields: typing.Sequence[wrappers.Field] = (),
meta: metadata.Metadata = None,
options: desc.MethodOptions = None,
) -> wrappers.MessageType:
message_pb = desc.DescriptorProto(
name=name,
field=[i.field_pb for i in fields],
options=options,
)
return wrappers.MessageType(
message_pb=message_pb,
fields=collections.OrderedDict((i.name, i) for i in fields),
nested_messages={},
nested_enums={},
meta=meta or metadata.Metadata(address=metadata.Address(
name=name,
package=tuple(package.split('.')),
module=module,
)),
)
def get_enum(dot_path: str) -> wrappers.EnumType:
pieces = dot_path.split('.')
pkg, module, name = pieces[:-2], pieces[-2], pieces[-1]
return wrappers.EnumType(
enum_pb=desc.EnumDescriptorProto(name=name),
meta=metadata.Metadata(address=metadata.Address(
name=name,
package=tuple(pkg),
module=module,
)),
values=[],
)
def make_enum(
name: str,
package: str = 'foo.bar.v1',
module: str = 'baz',
values: typing.Sequence[typing.Tuple[str, int]] = (),
meta: metadata.Metadata = None,
options: desc.EnumOptions = None,
) -> wrappers.EnumType:
enum_value_pbs = [
desc.EnumValueDescriptorProto(name=i[0], number=i[1])
for i in values
]
enum_pb = desc.EnumDescriptorProto(
name=name,
value=enum_value_pbs,
options=options,
)
return wrappers.EnumType(
enum_pb=enum_pb,
values=[wrappers.EnumValueType(enum_value_pb=evpb)
for evpb in enum_value_pbs],
meta=meta or metadata.Metadata(address=metadata.Address(
name=name,
package=tuple(package.split('.')),
module=module,
)),
)
def make_naming(**kwargs) -> naming.Naming:
kwargs.setdefault('name', 'Hatstand')
kwargs.setdefault('namespace', ('Google', 'Cloud'))
kwargs.setdefault('version', 'v1')
kwargs.setdefault('product_name', 'Hatstand')
return naming.NewNaming(**kwargs)
def make_enum_pb2(
name: str,
*values: typing.Sequence[str],
**kwargs
) -> desc.EnumDescriptorProto:
enum_value_pbs = [
desc.EnumValueDescriptorProto(name=n, number=i)
for i, n in enumerate(values)
]
enum_pb = desc.EnumDescriptorProto(name=name, value=enum_value_pbs, **kwargs)
return enum_pb
def make_message_pb2(
name: str,
fields: tuple = (),
oneof_decl: tuple = (),
**kwargs
) -> desc.DescriptorProto:
return desc.DescriptorProto(name=name, field=fields, oneof_decl=oneof_decl, **kwargs)
def make_field_pb2(name: str, number: int,
type: int = 11, # 11 == message
type_name: str = None,
oneof_index: int = None
) -> desc.FieldDescriptorProto:
return desc.FieldDescriptorProto(
name=name,
number=number,
type=type,
type_name=type_name,
oneof_index=oneof_index,
)
def make_oneof_pb2(name: str) -> desc.OneofDescriptorProto:
return desc.OneofDescriptorProto(
name=name,
)
def make_file_pb2(name: str = 'my_proto.proto', package: str = 'example.v1', *,
messages: typing.Sequence[desc.DescriptorProto] = (),
enums: typing.Sequence[desc.EnumDescriptorProto] = (),
services: typing.Sequence[desc.ServiceDescriptorProto] = (),
locations: typing.Sequence[desc.SourceCodeInfo.Location] = (),
) -> desc.FileDescriptorProto:
return desc.FileDescriptorProto(
name=name,
package=package,
message_type=messages,
enum_type=enums,
service=services,
source_code_info=desc.SourceCodeInfo(location=locations),
)
def make_doc_meta(
*,
leading: str = '',
trailing: str = '',
detached: typing.List[str] = [],
) -> desc.SourceCodeInfo.Location:
return metadata.Metadata(
documentation=desc.SourceCodeInfo.Location(
leading_comments=leading,
trailing_comments=trailing,
leading_detached_comments=detached,
),
)
|
|
"""The data_access module contains the DataAccess class and some methods for managing transactions.
"""
import logging
import time
from contextlib import contextmanager
from bidon.util import get_value
__all__ = ["DataAccess", "RollbackTransaction", "Upsert", "transaction", "autocommit"]
logger = logging.getLogger(__name__)
class DataAccess(object):
"""A thin wrapper over a DB API2 database connection.
Provides a context manager, direct access to the execute method, and wrappers over some common
methods to reduce the amount of boilerplate SQL required.
This can work with several database engines, such as MySQL and SQLite, but is primarily targeted
at Postgres, and includes functionality specific to Postgres features.
"""
def __init__(self, core, *, search_path=None):
"""Initialize a DataAccess object with a DataAccessCore
:param core: a DataAccessCore instance
:param search_path: Postgres only. a list of schema names, which is the order in
which to search for unqualified objects.
"""
self.core = core
self.connection = None
self.last_query = None
self.row_count = -1
self._search_path = search_path
def __enter__(self):
"""Opens DataAccess and returns self as context manager."""
return self.open()
def __exit__(self, ex_type, ex_value, ex_traceback):
"""Closes DataAccess, requesting commit if no execption was thrown.
:param ex_type: the type of the exception
:param ex_value: the parameters passed to the exception
:param ex_traceback: the traceback object attached to the exception
"""
self.close(commit=ex_type is None)
def _update_cursor_stats(self, cursor):
"""Sets DataAccess members according to the given cursor.
:param cursor: a cursor object
"""
self.row_count = cursor.rowcount
@property
def sql_writer(self):
"""Returns the SqlWriter object associated with the core."""
return self.core.sql_writer
def commit(self):
"""Commit the pending statements."""
logger.debug("Commit")
self.connection.commit()
def rollback(self):
"""Rollback the pending statements."""
logger.debug("Rollback")
self.connection.rollback()
@property
def autocommit(self):
"""Returns the current autocommit state"""
return self.core.get_autocommit(self.connection)
@autocommit.setter
def autocommit(self, value):
"""Set the autocommit value.
:param value: the new autocommit value
"""
logger.debug("Setting autocommit from %s to %s", self.autocommit, value)
self.core.set_autocommit(self.connection, value)
def _configure_connection(self, name, value):
"""Sets a Postgres run-time connection configuration parameter.
:param name: the name of the parameter
:param value: a list of values matching the placeholders
"""
self.update("pg_settings", dict(setting=value), dict(name=name))
def open(self, *, autocommit=False):
"""Sets the connection with the core's open method.
:param autocommit: the default autocommit state
:type autocommit: boolean
:return: self
"""
if self.connection is not None:
raise Exception("Connection already set")
self.connection = self.core.open()
self.autocommit = autocommit
if self._search_path:
self._configure_connection(
"search_path",
self._search_path)
return self
def close(self, *, commit=True):
"""Closes the connection via the core's close method.
:param commit: if true the current transaction is commited, otherwise it is rolled back
:type commit: boolean
:return: self
"""
self.core.close(self.connection, commit=commit)
self.connection = None
return self
def execute(self, query_string, params=None):
"""Executes a query. Returns the resulting cursor.
:query_string: the parameterized query string
:params: can be either a tuple or a dictionary, and must match the parameterization style of the
query
:return: a cursor object
"""
cr = self.connection.cursor()
logger.info("SQL: %s (%s)", query_string, params)
self.last_query = (query_string, params)
t0 = time.time()
cr.execute(query_string, params or self.core.empty_params)
ms = (time.time() - t0) * 1000
logger.info("RUNTIME: %.2f ms", ms)
self._update_cursor_stats(cr)
return cr
def callproc(self, name, params, param_types=None):
"""Calls a procedure.
:param name: the name of the procedure
:param params: a list or tuple of parameters to pass to the procedure.
:param param_types: a list or tuple of type names. If given, each param will be cast via
sql_writers typecast method. This is useful to disambiguate procedure calls
when several parameters are null and therefore cause overload resoluation
issues.
:return: a 2-tuple of (cursor, params)
"""
if param_types:
placeholders = [self.sql_writer.typecast(self.sql_writer.to_placeholder(), t)
for t in param_types]
else:
placeholders = [self.sql_writer.to_placeholder() for p in params]
# TODO: This may be Postgres specific...
qs = "select * from {0}({1});".format(name, ", ".join(placeholders))
return self.execute(qs, params), params
def get_callproc_signature(self, name, param_types):
"""Returns a procedure's signature from the name and list of types.
:name: the name of the procedure
:params: can be either strings, or 2-tuples. 2-tuples must be of the form (name, db_type).
:return: the procedure's signature
"""
if isinstance(param_types[0], (list, tuple)):
params = [self.sql_writer.to_placeholder(*pt) for pt in param_types]
else:
params = [self.sql_writer.to_placeholder(None, pt) for pt in param_types]
return name + self.sql_writer.to_tuple(params)
def find(self, table_name, constraints=None, *, columns=None, order_by=None):
"""Returns the first record that matches the given criteria.
:table_name: the name of the table to search on
:constraints: is any construct that can be parsed by SqlWriter.parse_constraints.
:columns: either a string or a list of column names
:order_by: the order by clause
"""
query_string, params = self.sql_writer.get_find_all_query(
table_name, constraints, columns=columns, order_by=order_by)
query_string += " limit 1;"
return self.execute(query_string, params).fetchone()
def find_all(self, table_name, constraints=None, *, columns=None, order_by=None, limiting=None):
"""Returns all records that match a given criteria.
:table_name: the name of the table to search on
:constraints: is any construct that can be parsed by SqlWriter.parse_constraints.
:columns: either a string or a list of column names
:order_by: the order by clause
"""
query_string, params = self.sql_writer.get_find_all_query(
table_name, constraints, columns=columns, order_by=order_by, limiting=limiting)
query_string += ";"
return self.execute(query_string, params)
def page(self, table_name, paging, constraints=None, *, columns=None, order_by=None,
get_count=True):
"""Performs a find_all method with paging.
:param table_name: the name of the table to search on
:param paging: is a tuple containing (page, page_size).
:param constraints: is any construct that can be parsed by SqlWriter.parse_constraints.
:param columns: either a string or a list of column names
:param order_by: the order by clause
:param get_count: if True, the total number of records that would be included without paging are
returned. If False, None is returned for the count.
:return: a 2-tuple of (records, total_count)
"""
if get_count:
count = self.count(table_name, constraints)
else:
count = None
page, page_size = paging
limiting = None
if page_size > 0:
limiting = (page_size, page * page_size)
records = list(self.find_all(
table_name, constraints, columns=columns, order_by=order_by, limiting=limiting))
return (records, count)
def update(self, table_name, values, constraints=None, *, returning=None):
"""Builds and executes and update statement.
:param table_name: the name of the table to update
:param values: can be either a dict or an enuerable of 2-tuples in the form (column, value).
:param constraints: can be any construct that can be parsed by SqlWriter.parse_constraints.
However, you cannot mix tuples and dicts between values and constraints.
:param returning: the columns to return after updating. Only works for cores that support the
returning syntax
:return: a cursor object
"""
if constraints is None:
constraints = "1=1"
assignments, assignment_params = self.sql_writer.parse_constraints(
values, ", ", is_assignment=True)
where, where_params = self.sql_writer.parse_constraints(constraints, " and ")
returns = ""
if returning and self.core.supports_returning_syntax:
returns = " returning {0}".format(returning)
sql = "update {0} set {1} where {2}{3};".format(table_name, assignments, where, returns)
params = assignment_params
if constraints is None or isinstance(constraints, str):
pass
elif isinstance(constraints, dict):
if isinstance(params, list):
raise ValueError("you cannot mix enumerable and dict values and constraints")
params = params or {}
params.update(where_params)
else:
if isinstance(params, dict):
raise ValueError("you cannot mix enumerable and dict values and constraints")
params = params or []
params.extend(where_params)
cr = self.execute(sql, params)
return cr
def insert(self, table_name, values, *, returning=None, upsert=None):
"""Builds and executes an insert statement.
:param table_name: the name of the table to insert into
:param values: can be either a dict or an enumerable of 2-tuples in the form (column, value).
:param returning: the columns to return after updating. Only works for cores that support the
returning syntax
:param upsert: an Upsert instance, defining how to perform the uspert.
:return: a cursor object
"""
if isinstance(values, dict):
names = values.keys()
placeholders = [self.sql_writer.to_placeholder(i) for i in names]
params = values
else:
names = [i[0] for i in values]
placeholders = [self.sql_writer.to_placeholder() for i in values]
params = [i[1] for i in values]
placeholders = self.sql_writer.to_tuple(placeholders)
names = self.sql_writer.to_tuple(names)
returns = ""
if returning and self.core.supports_returning_syntax:
returns = " returning {0}".format(returning)
if upsert:
if upsert.action == Upsert.DO_NOTHING:
u_action_fmt = " on conflict{} do nothing"
u_assignments = None
u_constraints = None
else:
u_action_fmt = " on conflict{} do update set {}{}"
u_assignments, u_assignment_params = self.sql_writer.parse_constraints(
values, ", ", is_assignment=True)
u_constraints = ""
if isinstance(values, dict):
if not upsert.force:
# Build constraints based on the full column name, and using !=. This will make it so
# that the record won't be updated if all the fields we care about are the same.
u_constraints, u_params = self.core.sql_writer.parse_constraints(
{"{}.{}".format(table_name, k): v for k, v in values.items()},
" or ",
comp="!=")
params.update(u_params)
else:
# Add assignment params
params.extend(u_assignment_params)
if not upsert.force:
# Build constraints based on the full column name, and using !=. This will make it so
# that the record won't be updated if all the fields we care about are the same.
u_constraints, u_params = self.core.sql_writer.parse_constraints(
[("{}.{}".format(table_name, col), val, "!=") for (col, val, *_) in values],
" or ")
params.extend(u_params)
if u_constraints:
u_constraints = " where {}".format(u_constraints)
u_action_str = u_action_fmt.format(upsert.target_str or "", u_assignments, u_constraints)
sql = "insert into {0} {1} values {2}{3}{4}".format(
table_name,
names,
placeholders,
u_action_str,
returns)
else:
sql = "insert into {0} {1} values {2}{3};".format(table_name, names, placeholders, returns)
cr = self.execute(sql, params)
return cr
def delete(self, table_name, constraints=None):
"""Builds and executes an delete statement.
:param table_name: the name of the table to delete from
:param constraints: can be any construct that can be parsed by SqlWriter.parse_constraints.
:return: a cursor object
"""
if constraints is None:
constraints = "1=1"
where, params = self.sql_writer.parse_constraints(constraints)
sql = "delete from {0} where {1};".format(table_name, where)
self.execute(sql, params)
def count(self, table_name, constraints=None, *, extract="index"):
"""Returns the count of records in a table.
If the default cursor is a tuple or named tuple, this method will work without specifying an
extract parameter. If it is a dict cursor, it is necessary to specify any value other than
'index' for extract. This method will not work with cursors that aren't like tuple, namedtuple
or dict cursors.
:param table_name: the name of the table to count records on
:param constraints: can be any construct that can be parsed by SqlWriter.parse_constraints.
:param extract: the property to pull the count value from the cursor
:return: the nuber of records matching the constraints
"""
where, params = self.sql_writer.parse_constraints(constraints)
sql = "select count(*) as count from {0} where {1};".format(table_name, where or "1 = 1")
# NOTE: Won't work right with dict cursor
return self.get_scalar(self.execute(sql, params), 0 if extract == "index" else "count")
def get_scalar(self, cursor, index=0):
"""Returns a single value from the first returned record from a cursor.
By default it will get cursor.fecthone()[0] which works with tuples and namedtuples. For dict
cursor it is necessary to specify index. This method will not work with cursors that aren't
indexable.
:param cursor: a cursor object
:param index: the index of the cursor to return the value from
"""
if isinstance(index, int):
return cursor.fetchone()[index]
else:
return get_value(cursor.fetchone(), index)
class Upsert(object):
"""This class holds configuration information for performing an upsert."""
DO_NOTHING = 0x01
DO_UPDATE = 0x02
def __init__(self, action=None, target=None, force=False):
"""Initialize the Upsert configuration class.
:param action: one of Upsert.DO_NOTHING, Upsert.DO_UPDATE. Defaults to DO_NOTHING
:param target: either a string of an index name, a string of a tuple e.g. '(col1, col2)' or a
tuple of column names
:param force: if False, the upsert will not execute when all of the columns to be updated will
not change. If True, no check is done and the update will be performed anyway
:type force: boolean
"""
self.action = action or self.DO_NOTHING
self.target = target
self.force = force
@property
def target_str(self):
"""Returns the string representation of the target property."""
if isinstance(self.target, tuple):
return "({})".format(", ".join(self.target))
else:
return self.target
class RollbackTransaction(Exception):
"""This Exception class is handled specially by transaction. It will cause the current transaction
to be rolled back, but the exception won't be reraised.
"""
pass
@contextmanager
def transaction(data_access):
"""Wrap statements in a transaction. If the statements succeed, commit, otherwise rollback.
:param data_access: a DataAccess instance
"""
old_autocommit = data_access.autocommit
data_access.autocommit = False
try:
yield data_access
except RollbackTransaction as ex:
data_access.rollback()
except Exception as ex:
data_access.rollback()
raise ex
else:
data_access.commit()
finally:
data_access.autocommit = old_autocommit
@contextmanager
def autocommit(data_access):
"""Make statements autocommit.
:param data_access: a DataAccess instance
"""
if not data_access.autocommit:
data_access.commit()
old_autocommit = data_access.autocommit
data_access.autocommit = True
try:
yield data_access
finally:
data_access.autocommit = old_autocommit
|
|
# coding: utf-8
# flake8: noqa
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
__version__ = "1.0.0"
# import apis into sdk package
from onshape_client.oas.api.accounts_api import AccountsApi
from onshape_client.oas.api.app_elements_api import AppElementsApi
from onshape_client.oas.api.applications_api import ApplicationsApi
from onshape_client.oas.api.assemblies_api import AssembliesApi
from onshape_client.oas.api.billing_api import BillingApi
from onshape_client.oas.api.blob_elements_api import BlobElementsApi
from onshape_client.oas.api.companies_api import CompaniesApi
from onshape_client.oas.api.documents_api import DocumentsApi
from onshape_client.oas.api.drawings_api import DrawingsApi
from onshape_client.oas.api.elements_api import ElementsApi
from onshape_client.oas.api.feature_studios_api import FeatureStudiosApi
from onshape_client.oas.api.folders_api import FoldersApi
from onshape_client.oas.api.metadata_api import MetadataApi
from onshape_client.oas.api.open_api_api import OpenAPIApi
from onshape_client.oas.api.part_studios_api import PartStudiosApi
from onshape_client.oas.api.parts_api import PartsApi
from onshape_client.oas.api.release_management_api import ReleaseManagementApi
from onshape_client.oas.api.revisions_api import RevisionsApi
from onshape_client.oas.api.teams_api import TeamsApi
from onshape_client.oas.api.thumbnails_api import ThumbnailsApi
from onshape_client.oas.api.translations_api import TranslationsApi
from onshape_client.oas.api.users_api import UsersApi
from onshape_client.oas.api.versions_api import VersionsApi
from onshape_client.oas.api.webhooks_api import WebhooksApi
from onshape_client.oas.api.workflow_api import WorkflowApi
from onshape_client.oas.api.default_api import DefaultApi
# import ApiClient
from onshape_client.oas.api_client import ApiClient
# import Configuration
from onshape_client.oas.configuration import Configuration
# import exceptions
from onshape_client.oas.exceptions import OpenApiException
from onshape_client.oas.exceptions import ApiTypeError
from onshape_client.oas.exceptions import ApiValueError
from onshape_client.oas.exceptions import ApiKeyError
from onshape_client.oas.exceptions import ApiException
# import models into sdk package
from onshape_client.oas.models.address import Address
from onshape_client.oas.models.body_part import BodyPart
from onshape_client.oas.models.body_part_media_type import BodyPartMediaType
from onshape_client.oas.models.bt_acl_entry_info import BTAclEntryInfo
from onshape_client.oas.models.bt_acl_info import BTAclInfo
from onshape_client.oas.models.bt_active_sheet_metal_filter2944 import (
BTActiveSheetMetalFilter2944,
)
from onshape_client.oas.models.bt_active_sheet_metal_filter2944_all_of import (
BTActiveSheetMetalFilter2944AllOf,
)
from onshape_client.oas.models.bt_active_workflow_info import BTActiveWorkflowInfo
from onshape_client.oas.models.bt_address_info import BTAddressInfo
from onshape_client.oas.models.bt_allow_edge_point_filter2371 import (
BTAllowEdgePointFilter2371,
)
from onshape_client.oas.models.bt_allow_edge_point_filter2371_all_of import (
BTAllowEdgePointFilter2371AllOf,
)
from onshape_client.oas.models.bt_allow_flattened_geometry_filter2140 import (
BTAllowFlattenedGeometryFilter2140,
)
from onshape_client.oas.models.bt_allow_flattened_geometry_filter2140_all_of import (
BTAllowFlattenedGeometryFilter2140AllOf,
)
from onshape_client.oas.models.bt_allow_mesh_geometry_filter1026 import (
BTAllowMeshGeometryFilter1026,
)
from onshape_client.oas.models.bt_allow_mesh_geometry_filter1026_all_of import (
BTAllowMeshGeometryFilter1026AllOf,
)
from onshape_client.oas.models.bt_allowed_mate_type_filter1511 import (
BTAllowedMateTypeFilter1511,
)
from onshape_client.oas.models.bt_allowed_mate_type_filter1511_all_of import (
BTAllowedMateTypeFilter1511AllOf,
)
from onshape_client.oas.models.bt_and_filter110 import BTAndFilter110
from onshape_client.oas.models.bt_and_filter110_all_of import BTAndFilter110AllOf
from onshape_client.oas.models.bt_app_associative_data_info_array import (
BTAppAssociativeDataInfoArray,
)
from onshape_client.oas.models.bt_app_element_basic_info import BTAppElementBasicInfo
from onshape_client.oas.models.bt_app_element_change_params import (
BTAppElementChangeParams,
)
from onshape_client.oas.models.bt_app_element_commit_transaction_params import (
BTAppElementCommitTransactionParams,
)
from onshape_client.oas.models.bt_app_element_content_delta_info import (
BTAppElementContentDeltaInfo,
)
from onshape_client.oas.models.bt_app_element_content_entry_info import (
BTAppElementContentEntryInfo,
)
from onshape_client.oas.models.bt_app_element_content_info import (
BTAppElementContentInfo,
)
from onshape_client.oas.models.bt_app_element_history_entry_info import (
BTAppElementHistoryEntryInfo,
)
from onshape_client.oas.models.bt_app_element_history_info import (
BTAppElementHistoryInfo,
)
from onshape_client.oas.models.bt_app_element_modify_info import BTAppElementModifyInfo
from onshape_client.oas.models.bt_app_element_params import BTAppElementParams
from onshape_client.oas.models.bt_app_element_reference_info import (
BTAppElementReferenceInfo,
)
from onshape_client.oas.models.bt_app_element_reference_params import (
BTAppElementReferenceParams,
)
from onshape_client.oas.models.bt_app_element_reference_resolve_info import (
BTAppElementReferenceResolveInfo,
)
from onshape_client.oas.models.bt_app_element_references_resolve_info import (
BTAppElementReferencesResolveInfo,
)
from onshape_client.oas.models.bt_app_element_start_transaction_params import (
BTAppElementStartTransactionParams,
)
from onshape_client.oas.models.bt_app_element_update_params import (
BTAppElementUpdateParams,
)
from onshape_client.oas.models.bt_application_element_thumbnail_params import (
BTApplicationElementThumbnailParams,
)
from onshape_client.oas.models.bt_application_element_thumbnail_params_array import (
BTApplicationElementThumbnailParamsArray,
)
from onshape_client.oas.models.bt_assembly_definition_info import (
BTAssemblyDefinitionInfo,
)
from onshape_client.oas.models.bt_assembly_feature_data_info import (
BTAssemblyFeatureDataInfo,
)
from onshape_client.oas.models.bt_assembly_feature_info import BTAssemblyFeatureInfo
from onshape_client.oas.models.bt_assembly_feature_list_response1174 import (
BTAssemblyFeatureListResponse1174,
)
from onshape_client.oas.models.bt_assembly_feature_list_response1174_all_of import (
BTAssemblyFeatureListResponse1174AllOf,
)
from onshape_client.oas.models.bt_assembly_insert_transformed_instances_response import (
BTAssemblyInsertTransformedInstancesResponse,
)
from onshape_client.oas.models.bt_assembly_instance_definition_params import (
BTAssemblyInstanceDefinitionParams,
)
from onshape_client.oas.models.bt_assembly_instance_info import BTAssemblyInstanceInfo
from onshape_client.oas.models.bt_assembly_mated_entity import BTAssemblyMatedEntity
from onshape_client.oas.models.bt_assembly_occurrence_info import (
BTAssemblyOccurrenceInfo,
)
from onshape_client.oas.models.bt_assembly_parts_info import BTAssemblyPartsInfo
from onshape_client.oas.models.bt_assembly_ps_feature_info import (
BTAssemblyPSFeatureInfo,
)
from onshape_client.oas.models.bt_assembly_transform_definition_params import (
BTAssemblyTransformDefinitionParams,
)
from onshape_client.oas.models.bt_assembly_transformed_instances_definition_params import (
BTAssemblyTransformedInstancesDefinitionParams,
)
from onshape_client.oas.models.bt_associative_data_info import BTAssociativeDataInfo
from onshape_client.oas.models.bt_base_info import BTBaseInfo
from onshape_client.oas.models.bt_base_sm_joint_table_row_metadata2232 import (
BTBaseSMJointTableRowMetadata2232,
)
from onshape_client.oas.models.bt_base_sm_joint_table_row_metadata2232_all_of import (
BTBaseSMJointTableRowMetadata2232AllOf,
)
from onshape_client.oas.models.bt_bill_of_materials_table1073 import (
BTBillOfMaterialsTable1073,
)
from onshape_client.oas.models.bt_bill_of_materials_table1073_all_of import (
BTBillOfMaterialsTable1073AllOf,
)
from onshape_client.oas.models.bt_bill_of_materials_table_row1425 import (
BTBillOfMaterialsTableRow1425,
)
from onshape_client.oas.models.bt_bill_of_materials_table_row1425_all_of import (
BTBillOfMaterialsTableRow1425AllOf,
)
from onshape_client.oas.models.bt_bill_of_materials_table_row_metadata1300 import (
BTBillOfMaterialsTableRowMetadata1300,
)
from onshape_client.oas.models.bt_bill_of_materials_table_row_metadata1300_all_of import (
BTBillOfMaterialsTableRowMetadata1300AllOf,
)
from onshape_client.oas.models.bt_bill_of_materials_unique_item_id2029 import (
BTBillOfMaterialsUniqueItemId2029,
)
from onshape_client.oas.models.bt_billing_plan_info import BTBillingPlanInfo
from onshape_client.oas.models.bt_body_type_filter112 import BTBodyTypeFilter112
from onshape_client.oas.models.bt_body_type_filter112_all_of import (
BTBodyTypeFilter112AllOf,
)
from onshape_client.oas.models.bt_bounding_box1052 import BTBoundingBox1052
from onshape_client.oas.models.bt_bounding_box_info import BTBoundingBoxInfo
from onshape_client.oas.models.bt_cache_data_path191 import BTCacheDataPath191
from onshape_client.oas.models.bt_card_info import BTCardInfo
from onshape_client.oas.models.bt_circle_description1145 import BTCircleDescription1145
from onshape_client.oas.models.bt_circle_description1145_all_of import (
BTCircleDescription1145AllOf,
)
from onshape_client.oas.models.bt_closed_curve_filter1206 import BTClosedCurveFilter1206
from onshape_client.oas.models.bt_closed_curve_filter1206_all_of import (
BTClosedCurveFilter1206AllOf,
)
from onshape_client.oas.models.bt_cloud_storage_account_info import (
BTCloudStorageAccountInfo,
)
from onshape_client.oas.models.bt_cloud_storage_account_info_all_of import (
BTCloudStorageAccountInfoAllOf,
)
from onshape_client.oas.models.bt_cloud_storage_object_info import (
BTCloudStorageObjectInfo,
)
from onshape_client.oas.models.bt_color_info import BTColorInfo
from onshape_client.oas.models.bt_color_params import BTColorParams
from onshape_client.oas.models.bt_comment_attachment_info import BTCommentAttachmentInfo
from onshape_client.oas.models.bt_comment_info import BTCommentInfo
from onshape_client.oas.models.bt_common_unit_info import BTCommonUnitInfo
from onshape_client.oas.models.bt_common_units_info import BTCommonUnitsInfo
from onshape_client.oas.models.bt_company_info import BTCompanyInfo
from onshape_client.oas.models.bt_company_summary_info import BTCompanySummaryInfo
from onshape_client.oas.models.bt_computed_configuration_input_spec2525 import (
BTComputedConfigurationInputSpec2525,
)
from onshape_client.oas.models.bt_cone_description860 import BTConeDescription860
from onshape_client.oas.models.bt_cone_description860_all_of import (
BTConeDescription860AllOf,
)
from onshape_client.oas.models.bt_configuration_info import BTConfigurationInfo
from onshape_client.oas.models.bt_configuration_params import BTConfigurationParams
from onshape_client.oas.models.bt_configuration_response2019 import (
BTConfigurationResponse2019,
)
from onshape_client.oas.models.bt_configuration_update_call2933 import (
BTConfigurationUpdateCall2933,
)
from onshape_client.oas.models.bt_configuration_update_call2933_all_of import (
BTConfigurationUpdateCall2933AllOf,
)
from onshape_client.oas.models.bt_configured_dimension_column_info2168 import (
BTConfiguredDimensionColumnInfo2168,
)
from onshape_client.oas.models.bt_configured_dimension_column_info2168_all_of import (
BTConfiguredDimensionColumnInfo2168AllOf,
)
from onshape_client.oas.models.bt_configured_feature_column_info1014 import (
BTConfiguredFeatureColumnInfo1014,
)
from onshape_client.oas.models.bt_configured_feature_column_info1014_all_of import (
BTConfiguredFeatureColumnInfo1014AllOf,
)
from onshape_client.oas.models.bt_configured_parameter_column_info2900 import (
BTConfiguredParameterColumnInfo2900,
)
from onshape_client.oas.models.bt_configured_parameter_column_info2900_all_of import (
BTConfiguredParameterColumnInfo2900AllOf,
)
from onshape_client.oas.models.bt_configured_part_properties_table2740 import (
BTConfiguredPartPropertiesTable2740,
)
from onshape_client.oas.models.bt_configured_part_properties_table2740_all_of import (
BTConfiguredPartPropertiesTable2740AllOf,
)
from onshape_client.oas.models.bt_configured_suppression_column_info2498 import (
BTConfiguredSuppressionColumnInfo2498,
)
from onshape_client.oas.models.bt_configured_values_column_info1025 import (
BTConfiguredValuesColumnInfo1025,
)
from onshape_client.oas.models.bt_configured_values_column_info1025_all_of import (
BTConfiguredValuesColumnInfo1025AllOf,
)
from onshape_client.oas.models.bt_construction_object_filter113 import (
BTConstructionObjectFilter113,
)
from onshape_client.oas.models.bt_construction_object_filter113_all_of import (
BTConstructionObjectFilter113AllOf,
)
from onshape_client.oas.models.bt_copy_document_info import BTCopyDocumentInfo
from onshape_client.oas.models.bt_copy_document_params import BTCopyDocumentParams
from onshape_client.oas.models.bt_copy_element_params import BTCopyElementParams
from onshape_client.oas.models.bt_curve_description1583 import BTCurveDescription1583
from onshape_client.oas.models.bt_curve_geometry114 import BTCurveGeometry114
from onshape_client.oas.models.bt_curve_geometry_circle115 import (
BTCurveGeometryCircle115,
)
from onshape_client.oas.models.bt_curve_geometry_circle115_all_of import (
BTCurveGeometryCircle115AllOf,
)
from onshape_client.oas.models.bt_curve_geometry_conic2284 import (
BTCurveGeometryConic2284,
)
from onshape_client.oas.models.bt_curve_geometry_conic2284_all_of import (
BTCurveGeometryConic2284AllOf,
)
from onshape_client.oas.models.bt_curve_geometry_ellipse1189 import (
BTCurveGeometryEllipse1189,
)
from onshape_client.oas.models.bt_curve_geometry_ellipse1189_all_of import (
BTCurveGeometryEllipse1189AllOf,
)
from onshape_client.oas.models.bt_curve_geometry_interpolated_spline116 import (
BTCurveGeometryInterpolatedSpline116,
)
from onshape_client.oas.models.bt_curve_geometry_interpolated_spline116_all_of import (
BTCurveGeometryInterpolatedSpline116AllOf,
)
from onshape_client.oas.models.bt_curve_geometry_line117 import BTCurveGeometryLine117
from onshape_client.oas.models.bt_curve_geometry_line117_all_of import (
BTCurveGeometryLine117AllOf,
)
from onshape_client.oas.models.bt_curve_geometry_spline118 import (
BTCurveGeometrySpline118,
)
from onshape_client.oas.models.bt_curve_geometry_spline118_all_of import (
BTCurveGeometrySpline118AllOf,
)
from onshape_client.oas.models.bt_custom_property_definition_info import (
BTCustomPropertyDefinitionInfo,
)
from onshape_client.oas.models.bt_custom_property_definition_params import (
BTCustomPropertyDefinitionParams,
)
from onshape_client.oas.models.bt_cylinder_description686 import (
BTCylinderDescription686,
)
from onshape_client.oas.models.bt_cylinder_description686_all_of import (
BTCylinderDescription686AllOf,
)
from onshape_client.oas.models.bt_default_unit_info import BTDefaultUnitInfo
from onshape_client.oas.models.bt_default_units_info import BTDefaultUnitsInfo
from onshape_client.oas.models.bt_diff_info import BTDiffInfo
from onshape_client.oas.models.bt_discount_info import BTDiscountInfo
from onshape_client.oas.models.bt_document_element_info import BTDocumentElementInfo
from onshape_client.oas.models.bt_document_element_processing_info import (
BTDocumentElementProcessingInfo,
)
from onshape_client.oas.models.bt_document_info import BTDocumentInfo
from onshape_client.oas.models.bt_document_label_info import BTDocumentLabelInfo
from onshape_client.oas.models.bt_document_merge_info import BTDocumentMergeInfo
from onshape_client.oas.models.bt_document_params import BTDocumentParams
from onshape_client.oas.models.bt_document_processing_info import (
BTDocumentProcessingInfo,
)
from onshape_client.oas.models.bt_document_processing_info_all_of import (
BTDocumentProcessingInfoAllOf,
)
from onshape_client.oas.models.bt_document_search_hit_info import (
BTDocumentSearchHitInfo,
)
from onshape_client.oas.models.bt_document_search_params import BTDocumentSearchParams
from onshape_client.oas.models.bt_document_summary_info import BTDocumentSummaryInfo
from onshape_client.oas.models.bt_document_summary_info_all_of import (
BTDocumentSummaryInfoAllOf,
)
from onshape_client.oas.models.bt_document_summary_search_info import (
BTDocumentSummarySearchInfo,
)
from onshape_client.oas.models.bt_document_summary_search_info_all_of import (
BTDocumentSummarySearchInfoAllOf,
)
from onshape_client.oas.models.bt_document_version_element_ids1897 import (
BTDocumentVersionElementIds1897,
)
from onshape_client.oas.models.bt_document_with_version_and_element_id import (
BTDocumentWithVersionAndElementId,
)
from onshape_client.oas.models.bt_document_with_version_id import (
BTDocumentWithVersionId,
)
from onshape_client.oas.models.bt_drawing_params import BTDrawingParams
from onshape_client.oas.models.bt_edge_topology_filter122 import BTEdgeTopologyFilter122
from onshape_client.oas.models.bt_edge_topology_filter122_all_of import (
BTEdgeTopologyFilter122AllOf,
)
from onshape_client.oas.models.bt_editing_logic2350 import BTEditingLogic2350
from onshape_client.oas.models.bt_element_location_params import BTElementLocationParams
from onshape_client.oas.models.bt_element_reference725 import BTElementReference725
from onshape_client.oas.models.bt_ellipse_description866 import BTEllipseDescription866
from onshape_client.oas.models.bt_ellipse_description866_all_of import (
BTEllipseDescription866AllOf,
)
from onshape_client.oas.models.bt_encoded_configuration_info import (
BTEncodedConfigurationInfo,
)
from onshape_client.oas.models.bt_entity_type_filter124 import BTEntityTypeFilter124
from onshape_client.oas.models.bt_entity_type_filter124_all_of import (
BTEntityTypeFilter124AllOf,
)
from onshape_client.oas.models.bt_explosion2754 import BTExplosion2754
from onshape_client.oas.models.bt_explosion2754_all_of import BTExplosion2754AllOf
from onshape_client.oas.models.bt_explosion_step_feature3008 import (
BTExplosionStepFeature3008,
)
from onshape_client.oas.models.bt_export_model_arc_edge_geometry1257 import (
BTExportModelArcEdgeGeometry1257,
)
from onshape_client.oas.models.bt_export_model_arc_edge_geometry1257_all_of import (
BTExportModelArcEdgeGeometry1257AllOf,
)
from onshape_client.oas.models.bt_export_model_bodies_response734 import (
BTExportModelBodiesResponse734,
)
from onshape_client.oas.models.bt_export_model_body1272 import BTExportModelBody1272
from onshape_client.oas.models.bt_export_model_coedge1342 import BTExportModelCoedge1342
from onshape_client.oas.models.bt_export_model_edge1782 import BTExportModelEdge1782
from onshape_client.oas.models.bt_export_model_edge_geometry1125 import (
BTExportModelEdgeGeometry1125,
)
from onshape_client.oas.models.bt_export_model_face1363 import BTExportModelFace1363
from onshape_client.oas.models.bt_export_model_loop1182 import BTExportModelLoop1182
from onshape_client.oas.models.bt_export_model_params import BTExportModelParams
from onshape_client.oas.models.bt_export_model_vertex858 import BTExportModelVertex858
from onshape_client.oas.models.bt_export_tessellated_body3398 import (
BTExportTessellatedBody3398,
)
from onshape_client.oas.models.bt_export_tessellated_edges_body890 import (
BTExportTessellatedEdgesBody890,
)
from onshape_client.oas.models.bt_export_tessellated_edges_body890_all_of import (
BTExportTessellatedEdgesBody890AllOf,
)
from onshape_client.oas.models.bt_export_tessellated_edges_edge1364 import (
BTExportTessellatedEdgesEdge1364,
)
from onshape_client.oas.models.bt_export_tessellated_edges_response327 import (
BTExportTessellatedEdgesResponse327,
)
from onshape_client.oas.models.bt_export_tessellated_faces_body1321 import (
BTExportTessellatedFacesBody1321,
)
from onshape_client.oas.models.bt_export_tessellated_faces_body1321_all_of import (
BTExportTessellatedFacesBody1321AllOf,
)
from onshape_client.oas.models.bt_export_tessellated_faces_face1192 import (
BTExportTessellatedFacesFace1192,
)
from onshape_client.oas.models.bt_export_tessellated_faces_facet1417 import (
BTExportTessellatedFacesFacet1417,
)
from onshape_client.oas.models.bt_export_tessellated_faces_response898 import (
BTExportTessellatedFacesResponse898,
)
from onshape_client.oas.models.bt_external_element_reference_info import (
BTExternalElementReferenceInfo,
)
from onshape_client.oas.models.bt_external_reference1936 import BTExternalReference1936
from onshape_client.oas.models.bt_external_reference1936_all_of import (
BTExternalReference1936AllOf,
)
from onshape_client.oas.models.bt_feature_api_base1430 import BTFeatureApiBase1430
from onshape_client.oas.models.bt_feature_definition_call1406 import (
BTFeatureDefinitionCall1406,
)
from onshape_client.oas.models.bt_feature_definition_call1406_all_of import (
BTFeatureDefinitionCall1406AllOf,
)
from onshape_client.oas.models.bt_feature_definition_response1617 import (
BTFeatureDefinitionResponse1617,
)
from onshape_client.oas.models.bt_feature_definition_response1617_all_of import (
BTFeatureDefinitionResponse1617AllOf,
)
from onshape_client.oas.models.bt_feature_filter127 import BTFeatureFilter127
from onshape_client.oas.models.bt_feature_filter127_all_of import (
BTFeatureFilter127AllOf,
)
from onshape_client.oas.models.bt_feature_list_response2457 import (
BTFeatureListResponse2457,
)
from onshape_client.oas.models.bt_feature_list_response2457_all_of import (
BTFeatureListResponse2457AllOf,
)
from onshape_client.oas.models.bt_feature_script_eval_call2377 import (
BTFeatureScriptEvalCall2377,
)
from onshape_client.oas.models.bt_feature_script_eval_response1859 import (
BTFeatureScriptEvalResponse1859,
)
from onshape_client.oas.models.bt_feature_spec129 import BTFeatureSpec129
from onshape_client.oas.models.bt_feature_specs_response664 import (
BTFeatureSpecsResponse664,
)
from onshape_client.oas.models.bt_feature_state1688 import BTFeatureState1688
from onshape_client.oas.models.bt_feature_studio_contents2239 import (
BTFeatureStudioContents2239,
)
from onshape_client.oas.models.bt_feature_studio_contents2239_all_of import (
BTFeatureStudioContents2239AllOf,
)
from onshape_client.oas.models.bt_feature_type_filter962 import BTFeatureTypeFilter962
from onshape_client.oas.models.bt_feature_type_filter962_all_of import (
BTFeatureTypeFilter962AllOf,
)
from onshape_client.oas.models.bt_flat_sheet_metal_filter3018 import (
BTFlatSheetMetalFilter3018,
)
from onshape_client.oas.models.bt_flat_sheet_metal_filter3018_all_of import (
BTFlatSheetMetalFilter3018AllOf,
)
from onshape_client.oas.models.bt_folder_info import BTFolderInfo
from onshape_client.oas.models.bt_folder_info_all_of import BTFolderInfoAllOf
from onshape_client.oas.models.bt_foreign_data_response1070 import (
BTForeignDataResponse1070,
)
from onshape_client.oas.models.bt_full_element_id756 import BTFullElementId756
from onshape_client.oas.models.bt_full_element_id_and_part_id643 import (
BTFullElementIdAndPartId643,
)
from onshape_client.oas.models.bt_full_element_id_and_part_id643_all_of import (
BTFullElementIdAndPartId643AllOf,
)
from onshape_client.oas.models.bt_full_element_id_with_document1729 import (
BTFullElementIdWithDocument1729,
)
from onshape_client.oas.models.bt_full_element_id_with_document1729_all_of import (
BTFullElementIdWithDocument1729AllOf,
)
from onshape_client.oas.models.bt_geometry_filter130 import BTGeometryFilter130
from onshape_client.oas.models.bt_geometry_filter130_all_of import (
BTGeometryFilter130AllOf,
)
from onshape_client.oas.models.bt_global_tree_magic_node_info import (
BTGlobalTreeMagicNodeInfo,
)
from onshape_client.oas.models.bt_global_tree_magic_node_info_all_of import (
BTGlobalTreeMagicNodeInfoAllOf,
)
from onshape_client.oas.models.bt_global_tree_node_info import BTGlobalTreeNodeInfo
from onshape_client.oas.models.bt_global_tree_node_list_response import (
BTGlobalTreeNodeListResponse,
)
from onshape_client.oas.models.bt_global_tree_node_list_response_bt_team_info import (
BTGlobalTreeNodeListResponseBTTeamInfo,
)
from onshape_client.oas.models.bt_graphics_appearance1152 import (
BTGraphicsAppearance1152,
)
from onshape_client.oas.models.bt_id_translation_info import BTIdTranslationInfo
from onshape_client.oas.models.bt_id_translation_params import BTIdTranslationParams
from onshape_client.oas.models.bt_id_translation_result_info import (
BTIdTranslationResultInfo,
)
from onshape_client.oas.models.bt_identity_info import BTIdentityInfo
from onshape_client.oas.models.bt_image_filter853 import BTImageFilter853
from onshape_client.oas.models.bt_image_filter853_all_of import BTImageFilter853AllOf
from onshape_client.oas.models.bt_import import BTImport
from onshape_client.oas.models.bt_inherited_acl_info import BTInheritedAclInfo
from onshape_client.oas.models.bt_inner_array_parameter_location2368 import (
BTInnerArrayParameterLocation2368,
)
from onshape_client.oas.models.bt_inner_array_parameter_location2368_all_of import (
BTInnerArrayParameterLocation2368AllOf,
)
from onshape_client.oas.models.bt_inner_derived_parameter_location591 import (
BTInnerDerivedParameterLocation591,
)
from onshape_client.oas.models.bt_inner_derived_parameter_location591_all_of import (
BTInnerDerivedParameterLocation591AllOf,
)
from onshape_client.oas.models.bt_inner_parameter_location1715 import (
BTInnerParameterLocation1715,
)
from onshape_client.oas.models.bt_insertable_info import BTInsertableInfo
from onshape_client.oas.models.bt_insertables_list_response import (
BTInsertablesListResponse,
)
from onshape_client.oas.models.bt_key_mouse_values_info import BTKeyMouseValuesInfo
from onshape_client.oas.models.bt_line_description1559 import BTLineDescription1559
from onshape_client.oas.models.bt_line_description1559_all_of import (
BTLineDescription1559AllOf,
)
from onshape_client.oas.models.bt_link_to_latest_document_info import (
BTLinkToLatestDocumentInfo,
)
from onshape_client.oas.models.bt_link_to_latest_document_params import (
BTLinkToLatestDocumentParams,
)
from onshape_client.oas.models.bt_list_response_bt_company_info import (
BTListResponseBTCompanyInfo,
)
from onshape_client.oas.models.bt_list_response_bt_insertable_info import (
BTListResponseBTInsertableInfo,
)
from onshape_client.oas.models.bt_list_response_bt_metadata_property_summary_info import (
BTListResponseBTMetadataPropertySummaryInfo,
)
from onshape_client.oas.models.bt_list_response_bt_purchase_info import (
BTListResponseBTPurchaseInfo,
)
from onshape_client.oas.models.bt_list_response_bt_revision_info import (
BTListResponseBTRevisionInfo,
)
from onshape_client.oas.models.bt_list_response_bt_translation_request_info import (
BTListResponseBTTranslationRequestInfo,
)
from onshape_client.oas.models.bt_list_response_bt_webhook_info import (
BTListResponseBTWebhookInfo,
)
from onshape_client.oas.models.bt_location_info226 import BTLocationInfo226
from onshape_client.oas.models.bt_mass_properties_bulk_info import (
BTMassPropertiesBulkInfo,
)
from onshape_client.oas.models.bt_mass_properties_info_null import (
BTMassPropertiesInfoNull,
)
from onshape_client.oas.models.bt_mate_connector_cs_info import BTMateConnectorCSInfo
from onshape_client.oas.models.bt_mate_connector_filter163 import (
BTMateConnectorFilter163,
)
from onshape_client.oas.models.bt_mate_connector_filter163_all_of import (
BTMateConnectorFilter163AllOf,
)
from onshape_client.oas.models.bt_mate_filter162 import BTMateFilter162
from onshape_client.oas.models.bt_material_library_metadata_info import (
BTMaterialLibraryMetadataInfo,
)
from onshape_client.oas.models.bt_material_library_settings_info import (
BTMaterialLibrarySettingsInfo,
)
from onshape_client.oas.models.bt_material_params import BTMaterialParams
from onshape_client.oas.models.bt_material_property_params import (
BTMaterialPropertyParams,
)
from onshape_client.oas.models.bt_metadata_category_summary_info import (
BTMetadataCategorySummaryInfo,
)
from onshape_client.oas.models.bt_metadata_enum_value_info import (
BTMetadataEnumValueInfo,
)
from onshape_client.oas.models.bt_metadata_property_config_summary_info import (
BTMetadataPropertyConfigSummaryInfo,
)
from onshape_client.oas.models.bt_metadata_property_info import BTMetadataPropertyInfo
from onshape_client.oas.models.bt_metadata_property_summary_info import (
BTMetadataPropertySummaryInfo,
)
from onshape_client.oas.models.bt_metadata_property_ui_hints_info import (
BTMetadataPropertyUiHintsInfo,
)
from onshape_client.oas.models.bt_metadata_property_validator_info import (
BTMetadataPropertyValidatorInfo,
)
from onshape_client.oas.models.bt_metadata_schema_info import BTMetadataSchemaInfo
from onshape_client.oas.models.bt_microversion_id366 import BTMicroversionId366
from onshape_client.oas.models.bt_microversion_id_and_configuration2338 import (
BTMicroversionIdAndConfiguration2338,
)
from onshape_client.oas.models.bt_microversion_info import BTMicroversionInfo
from onshape_client.oas.models.bt_model_element_params import BTModelElementParams
from onshape_client.oas.models.bt_model_format_full_info import BTModelFormatFullInfo
from onshape_client.oas.models.bt_model_format_info import BTModelFormatInfo
from onshape_client.oas.models.bt_modifiable_entity_only_filter1593 import (
BTModifiableEntityOnlyFilter1593,
)
from onshape_client.oas.models.bt_modifiable_entity_only_filter1593_all_of import (
BTModifiableEntityOnlyFilter1593AllOf,
)
from onshape_client.oas.models.bt_move_element_info import BTMoveElementInfo
from onshape_client.oas.models.bt_move_element_params import BTMoveElementParams
from onshape_client.oas.models.bt_name_value_pair import BTNameValuePair
from onshape_client.oas.models.bt_named_view_info import BTNamedViewInfo
from onshape_client.oas.models.bt_named_views_info import BTNamedViewsInfo
from onshape_client.oas.models.bt_node_reference21 import BTNodeReference21
from onshape_client.oas.models.bt_not_filter165 import BTNotFilter165
from onshape_client.oas.models.bt_not_filter165_all_of import BTNotFilter165AllOf
from onshape_client.oas.models.bt_notice227 import BTNotice227
from onshape_client.oas.models.bt_nullable_quantity_range1340 import (
BTNullableQuantityRange1340,
)
from onshape_client.oas.models.bt_nullable_quantity_range1340_all_of import (
BTNullableQuantityRange1340AllOf,
)
from onshape_client.oas.models.bt_object_id import BTObjectId
from onshape_client.oas.models.bt_occurrence74 import BTOccurrence74
from onshape_client.oas.models.bt_occurrence_filter166 import BTOccurrenceFilter166
from onshape_client.oas.models.bt_occurrence_filter166_all_of import (
BTOccurrenceFilter166AllOf,
)
from onshape_client.oas.models.bt_or_filter167 import BTOrFilter167
from onshape_client.oas.models.bt_owner3114 import BTOwner3114
from onshape_client.oas.models.bt_owner_info import BTOwnerInfo
from onshape_client.oas.models.bt_parameter_group_spec3469 import (
BTParameterGroupSpec3469,
)
from onshape_client.oas.models.bt_parameter_lookup_table_entry1667 import (
BTParameterLookupTableEntry1667,
)
from onshape_client.oas.models.bt_parameter_lookup_table_list_entry1916 import (
BTParameterLookupTableListEntry1916,
)
from onshape_client.oas.models.bt_parameter_spec6 import BTParameterSpec6
from onshape_client.oas.models.bt_parameter_spec_appearance1740 import (
BTParameterSpecAppearance1740,
)
from onshape_client.oas.models.bt_parameter_spec_array2600 import (
BTParameterSpecArray2600,
)
from onshape_client.oas.models.bt_parameter_spec_array2600_all_of import (
BTParameterSpecArray2600AllOf,
)
from onshape_client.oas.models.bt_parameter_spec_boolean170 import (
BTParameterSpecBoolean170,
)
from onshape_client.oas.models.bt_parameter_spec_database1071 import (
BTParameterSpecDatabase1071,
)
from onshape_client.oas.models.bt_parameter_spec_derived736 import (
BTParameterSpecDerived736,
)
from onshape_client.oas.models.bt_parameter_spec_enum171 import BTParameterSpecEnum171
from onshape_client.oas.models.bt_parameter_spec_enum171_all_of import (
BTParameterSpecEnum171AllOf,
)
from onshape_client.oas.models.bt_parameter_spec_feature_list703 import (
BTParameterSpecFeatureList703,
)
from onshape_client.oas.models.bt_parameter_spec_foreign_id172 import (
BTParameterSpecForeignId172,
)
from onshape_client.oas.models.bt_parameter_spec_lookup_table_path761 import (
BTParameterSpecLookupTablePath761,
)
from onshape_client.oas.models.bt_parameter_spec_lookup_table_path761_all_of import (
BTParameterSpecLookupTablePath761AllOf,
)
from onshape_client.oas.models.bt_parameter_spec_material2700 import (
BTParameterSpecMaterial2700,
)
from onshape_client.oas.models.bt_parameter_spec_nullable_quantity715 import (
BTParameterSpecNullableQuantity715,
)
from onshape_client.oas.models.bt_parameter_spec_nullable_quantity715_all_of import (
BTParameterSpecNullableQuantity715AllOf,
)
from onshape_client.oas.models.bt_parameter_spec_quantity173 import (
BTParameterSpecQuantity173,
)
from onshape_client.oas.models.bt_parameter_spec_quantity173_all_of import (
BTParameterSpecQuantity173AllOf,
)
from onshape_client.oas.models.bt_parameter_spec_query174 import BTParameterSpecQuery174
from onshape_client.oas.models.bt_parameter_spec_query174_all_of import (
BTParameterSpecQuery174AllOf,
)
from onshape_client.oas.models.bt_parameter_spec_reference2789 import (
BTParameterSpecReference2789,
)
from onshape_client.oas.models.bt_parameter_spec_reference_assembly2821 import (
BTParameterSpecReferenceAssembly2821,
)
from onshape_client.oas.models.bt_parameter_spec_reference_blob1367 import (
BTParameterSpecReferenceBlob1367,
)
from onshape_client.oas.models.bt_parameter_spec_reference_cad_import1792 import (
BTParameterSpecReferenceCADImport1792,
)
from onshape_client.oas.models.bt_parameter_spec_reference_image1722 import (
BTParameterSpecReferenceImage1722,
)
from onshape_client.oas.models.bt_parameter_spec_reference_json1816 import (
BTParameterSpecReferenceJSON1816,
)
from onshape_client.oas.models.bt_parameter_spec_reference_part_studio1256 import (
BTParameterSpecReferencePartStudio1256,
)
from onshape_client.oas.models.bt_parameter_spec_reference_part_studio1256_all_of import (
BTParameterSpecReferencePartStudio1256AllOf,
)
from onshape_client.oas.models.bt_parameter_spec_reference_table1520 import (
BTParameterSpecReferenceTable1520,
)
from onshape_client.oas.models.bt_parameter_spec_reference_with_configuration2950 import (
BTParameterSpecReferenceWithConfiguration2950,
)
from onshape_client.oas.models.bt_parameter_spec_reference_with_configuration2950_all_of import (
BTParameterSpecReferenceWithConfiguration2950AllOf,
)
from onshape_client.oas.models.bt_parameter_spec_string175 import (
BTParameterSpecString175,
)
from onshape_client.oas.models.bt_parameter_spec_string175_all_of import (
BTParameterSpecString175AllOf,
)
from onshape_client.oas.models.bt_parameter_visibility_always_hidden176 import (
BTParameterVisibilityAlwaysHidden176,
)
from onshape_client.oas.models.bt_parameter_visibility_condition177 import (
BTParameterVisibilityCondition177,
)
from onshape_client.oas.models.bt_parameter_visibility_logical178 import (
BTParameterVisibilityLogical178,
)
from onshape_client.oas.models.bt_parameter_visibility_logical178_all_of import (
BTParameterVisibilityLogical178AllOf,
)
from onshape_client.oas.models.bt_parameter_visibility_on_equal180 import (
BTParameterVisibilityOnEqual180,
)
from onshape_client.oas.models.bt_parameter_visibility_on_equal180_all_of import (
BTParameterVisibilityOnEqual180AllOf,
)
from onshape_client.oas.models.bt_parameter_visibility_on_mate_dof_type2114 import (
BTParameterVisibilityOnMateDOFType2114,
)
from onshape_client.oas.models.bt_part_appearance_info import BTPartAppearanceInfo
from onshape_client.oas.models.bt_part_appearance_params import BTPartAppearanceParams
from onshape_client.oas.models.bt_part_material1445 import BTPartMaterial1445
from onshape_client.oas.models.bt_part_material_info import BTPartMaterialInfo
from onshape_client.oas.models.bt_part_material_property1453 import (
BTPartMaterialProperty1453,
)
from onshape_client.oas.models.bt_part_material_property_info import (
BTPartMaterialPropertyInfo,
)
from onshape_client.oas.models.bt_part_metadata_info import BTPartMetadataInfo
from onshape_client.oas.models.bt_plan_subscriber_info import BTPlanSubscriberInfo
from onshape_client.oas.models.bt_plane_description692 import BTPlaneDescription692
from onshape_client.oas.models.bt_plane_description692_all_of import (
BTPlaneDescription692AllOf,
)
from onshape_client.oas.models.bt_plane_orientation_filter1700 import (
BTPlaneOrientationFilter1700,
)
from onshape_client.oas.models.bt_plane_orientation_filter1700_all_of import (
BTPlaneOrientationFilter1700AllOf,
)
from onshape_client.oas.models.bt_project_info import BTProjectInfo
from onshape_client.oas.models.bt_property_value_param import BTPropertyValueParam
from onshape_client.oas.models.bt_published_workflow_id import BTPublishedWorkflowId
from onshape_client.oas.models.bt_published_workflow_info import BTPublishedWorkflowInfo
from onshape_client.oas.models.bt_purchase_info import BTPurchaseInfo
from onshape_client.oas.models.bt_purchase_user_params import BTPurchaseUserParams
from onshape_client.oas.models.bt_quantity_range181 import BTQuantityRange181
from onshape_client.oas.models.bt_query_filter183 import BTQueryFilter183
from onshape_client.oas.models.bt_rbac_permission_scheme_info import (
BTRbacPermissionSchemeInfo,
)
from onshape_client.oas.models.bt_rbac_role_info import BTRbacRoleInfo
from onshape_client.oas.models.bt_release_comment_list_info import (
BTReleaseCommentListInfo,
)
from onshape_client.oas.models.bt_release_package_info import BTReleasePackageInfo
from onshape_client.oas.models.bt_release_package_item_error import (
BTReleasePackageItemError,
)
from onshape_client.oas.models.bt_release_package_item_info import (
BTReleasePackageItemInfo,
)
from onshape_client.oas.models.bt_release_package_item_params import (
BTReleasePackageItemParams,
)
from onshape_client.oas.models.bt_release_package_params import BTReleasePackageParams
from onshape_client.oas.models.bt_revision_approver_info import BTRevisionApproverInfo
from onshape_client.oas.models.bt_revision_info import BTRevisionInfo
from onshape_client.oas.models.bt_root_assembly_info import BTRootAssemblyInfo
from onshape_client.oas.models.bt_root_diff_info import BTRootDiffInfo
from onshape_client.oas.models.bt_section_plane_info import BTSectionPlaneInfo
from onshape_client.oas.models.bt_set_feature_rollback_call1899 import (
BTSetFeatureRollbackCall1899,
)
from onshape_client.oas.models.bt_set_feature_rollback_call1899_all_of import (
BTSetFeatureRollbackCall1899AllOf,
)
from onshape_client.oas.models.bt_set_feature_rollback_response1042 import (
BTSetFeatureRollbackResponse1042,
)
from onshape_client.oas.models.bt_setting_info import BTSettingInfo
from onshape_client.oas.models.bt_setting_param import BTSettingParam
from onshape_client.oas.models.bt_shaded_views_info import BTShadedViewsInfo
from onshape_client.oas.models.bt_share_entry_params import BTShareEntryParams
from onshape_client.oas.models.bt_share_params import BTShareParams
from onshape_client.oas.models.bt_sketch_object_filter184 import BTSketchObjectFilter184
from onshape_client.oas.models.bt_sketch_object_filter184_all_of import (
BTSketchObjectFilter184AllOf,
)
from onshape_client.oas.models.bt_sphere_description1263 import BTSphereDescription1263
from onshape_client.oas.models.bt_sphere_description1263_all_of import (
BTSphereDescription1263AllOf,
)
from onshape_client.oas.models.bt_spline_description2118 import BTSplineDescription2118
from onshape_client.oas.models.bt_spline_description2118_all_of import (
BTSplineDescription2118AllOf,
)
from onshape_client.oas.models.bt_spun_description657 import BTSpunDescription657
from onshape_client.oas.models.bt_spun_description657_all_of import (
BTSpunDescription657AllOf,
)
from onshape_client.oas.models.bt_string_format_block_pattern1755 import (
BTStringFormatBlockPattern1755,
)
from onshape_client.oas.models.bt_string_format_block_pattern1755_all_of import (
BTStringFormatBlockPattern1755AllOf,
)
from onshape_client.oas.models.bt_string_format_condition683 import (
BTStringFormatCondition683,
)
from onshape_client.oas.models.bt_string_format_match_pattern2446 import (
BTStringFormatMatchPattern2446,
)
from onshape_client.oas.models.bt_string_format_match_pattern2446_all_of import (
BTStringFormatMatchPattern2446AllOf,
)
from onshape_client.oas.models.bt_string_maximum_length_pattern2593 import (
BTStringMaximumLengthPattern2593,
)
from onshape_client.oas.models.bt_string_maximum_length_pattern2593_all_of import (
BTStringMaximumLengthPattern2593AllOf,
)
from onshape_client.oas.models.bt_string_minimum_length_pattern895 import (
BTStringMinimumLengthPattern895,
)
from onshape_client.oas.models.bt_string_minimum_length_pattern895_all_of import (
BTStringMinimumLengthPattern895AllOf,
)
from onshape_client.oas.models.bt_sub_assembly_info import BTSubAssemblyInfo
from onshape_client.oas.models.bt_substitute_approver_info import (
BTSubstituteApproverInfo,
)
from onshape_client.oas.models.bt_surface_description1564 import (
BTSurfaceDescription1564,
)
from onshape_client.oas.models.bt_sweep_description1473 import BTSweepDescription1473
from onshape_client.oas.models.bt_sweep_description1473_all_of import (
BTSweepDescription1473AllOf,
)
from onshape_client.oas.models.bt_table1825 import BTTable1825
from onshape_client.oas.models.bt_table_assembly_cross_highlight_data2675 import (
BTTableAssemblyCrossHighlightData2675,
)
from onshape_client.oas.models.bt_table_assembly_cross_highlight_data_item2659 import (
BTTableAssemblyCrossHighlightDataItem2659,
)
from onshape_client.oas.models.bt_table_base_cross_highlight_data2609 import (
BTTableBaseCrossHighlightData2609,
)
from onshape_client.oas.models.bt_table_base_row_metadata3181 import (
BTTableBaseRowMetadata3181,
)
from onshape_client.oas.models.bt_table_cell1114 import BTTableCell1114
from onshape_client.oas.models.bt_table_cell_parameter2399 import (
BTTableCellParameter2399,
)
from onshape_client.oas.models.bt_table_cell_parameter2399_all_of import (
BTTableCellParameter2399AllOf,
)
from onshape_client.oas.models.bt_table_cell_property_parameter2983 import (
BTTableCellPropertyParameter2983,
)
from onshape_client.oas.models.bt_table_cell_property_parameter2983_all_of import (
BTTableCellPropertyParameter2983AllOf,
)
from onshape_client.oas.models.bt_table_column_info1222 import BTTableColumnInfo1222
from onshape_client.oas.models.bt_table_column_spec1967 import BTTableColumnSpec1967
from onshape_client.oas.models.bt_table_cross_highlight_data1753 import (
BTTableCrossHighlightData1753,
)
from onshape_client.oas.models.bt_table_response1546 import BTTableResponse1546
from onshape_client.oas.models.bt_table_row1054 import BTTableRow1054
from onshape_client.oas.models.bt_table_spec915 import BTTableSpec915
from onshape_client.oas.models.bt_table_test_cell_double2509 import (
BTTableTestCellDouble2509,
)
from onshape_client.oas.models.bt_table_test_cell_double2509_all_of import (
BTTableTestCellDouble2509AllOf,
)
from onshape_client.oas.models.bt_table_test_cell_string2112 import (
BTTableTestCellString2112,
)
from onshape_client.oas.models.bt_table_test_cell_string2112_all_of import (
BTTableTestCellString2112AllOf,
)
from onshape_client.oas.models.bt_team_info import BTTeamInfo
from onshape_client.oas.models.bt_team_info_all_of import BTTeamInfoAllOf
from onshape_client.oas.models.bt_team_summary_info import BTTeamSummaryInfo
from onshape_client.oas.models.bt_text_object_filter1515 import BTTextObjectFilter1515
from onshape_client.oas.models.bt_text_object_filter1515_all_of import (
BTTextObjectFilter1515AllOf,
)
from onshape_client.oas.models.bt_text_stroke_filter461 import BTTextStrokeFilter461
from onshape_client.oas.models.bt_text_stroke_filter461_all_of import (
BTTextStrokeFilter461AllOf,
)
from onshape_client.oas.models.bt_thumbnail_info import BTThumbnailInfo
from onshape_client.oas.models.bt_thumbnail_size_info import BTThumbnailSizeInfo
from onshape_client.oas.models.bt_torus_description1834 import BTTorusDescription1834
from onshape_client.oas.models.bt_torus_description1834_all_of import (
BTTorusDescription1834AllOf,
)
from onshape_client.oas.models.bt_translate_format_params import BTTranslateFormatParams
from onshape_client.oas.models.bt_translation_request_info import (
BTTranslationRequestInfo,
)
from onshape_client.oas.models.bt_translation_request_params import (
BTTranslationRequestParams,
)
from onshape_client.oas.models.bt_tree_node20 import BTTreeNode20
from onshape_client.oas.models.bt_unique_document_item_params import (
BTUniqueDocumentItemParams,
)
from onshape_client.oas.models.bt_units_display_precision import BTUnitsDisplayPrecision
from onshape_client.oas.models.bt_units_maximum_display_precision_info import (
BTUnitsMaximumDisplayPrecisionInfo,
)
from onshape_client.oas.models.bt_update_features_call1748 import (
BTUpdateFeaturesCall1748,
)
from onshape_client.oas.models.bt_update_features_call1748_all_of import (
BTUpdateFeaturesCall1748AllOf,
)
from onshape_client.oas.models.bt_update_features_response1333 import (
BTUpdateFeaturesResponse1333,
)
from onshape_client.oas.models.bt_update_features_response1333_all_of import (
BTUpdateFeaturesResponse1333AllOf,
)
from onshape_client.oas.models.bt_update_mesh_units_params import (
BTUpdateMeshUnitsParams,
)
from onshape_client.oas.models.bt_update_reference_params import BTUpdateReferenceParams
from onshape_client.oas.models.bt_update_release_package_params import (
BTUpdateReleasePackageParams,
)
from onshape_client.oas.models.bt_update_workflowable_test_object_params import (
BTUpdateWorkflowableTestObjectParams,
)
from onshape_client.oas.models.bt_user_app_settings_info import BTUserAppSettingsInfo
from onshape_client.oas.models.bt_user_app_settings_params import (
BTUserAppSettingsParams,
)
from onshape_client.oas.models.bt_user_basic_summary_info import BTUserBasicSummaryInfo
from onshape_client.oas.models.bt_user_o_auth2_summary_info import (
BTUserOAuth2SummaryInfo,
)
from onshape_client.oas.models.bt_user_settings_info import BTUserSettingsInfo
from onshape_client.oas.models.bt_user_summary_info import BTUserSummaryInfo
from onshape_client.oas.models.bt_user_summary_info_all_of import BTUserSummaryInfoAllOf
from onshape_client.oas.models.bt_vector2d1812 import BTVector2d1812
from onshape_client.oas.models.bt_vector3d389 import BTVector3d389
from onshape_client.oas.models.bt_version_info import BTVersionInfo
from onshape_client.oas.models.bt_version_info_all_of import BTVersionInfoAllOf
from onshape_client.oas.models.bt_version_info_base import BTVersionInfoBase
from onshape_client.oas.models.bt_version_info_base_all_of import BTVersionInfoBaseAllOf
from onshape_client.oas.models.bt_version_or_workspace_info import (
BTVersionOrWorkspaceInfo,
)
from onshape_client.oas.models.bt_version_or_workspace_params import (
BTVersionOrWorkspaceParams,
)
from onshape_client.oas.models.bt_view_data_info import BTViewDataInfo
from onshape_client.oas.models.bt_view_manipulation_mouse_key_mapping_info import (
BTViewManipulationMouseKeyMappingInfo,
)
from onshape_client.oas.models.bt_webhook_info import BTWebhookInfo
from onshape_client.oas.models.bt_webhook_options import BTWebhookOptions
from onshape_client.oas.models.bt_webhook_params import BTWebhookParams
from onshape_client.oas.models.bt_workflow_action_info import BTWorkflowActionInfo
from onshape_client.oas.models.bt_workflow_property_info import BTWorkflowPropertyInfo
from onshape_client.oas.models.bt_workflow_snapshot_info import BTWorkflowSnapshotInfo
from onshape_client.oas.models.bt_workflow_state_info import BTWorkflowStateInfo
from onshape_client.oas.models.bt_workflowable_test_object_info import (
BTWorkflowableTestObjectInfo,
)
from onshape_client.oas.models.bt_workspace_info import BTWorkspaceInfo
from onshape_client.oas.models.bt_workspace_part_params import BTWorkspacePartParams
from onshape_client.oas.models.btapi_application_summary_info import (
BTAPIApplicationSummaryInfo,
)
from onshape_client.oas.models.btfs_table953 import BTFSTable953
from onshape_client.oas.models.btfs_table953_all_of import BTFSTable953AllOf
from onshape_client.oas.models.btfs_table_column_info623 import BTFSTableColumnInfo623
from onshape_client.oas.models.btfs_table_row_metadata2262 import (
BTFSTableRowMetadata2262,
)
from onshape_client.oas.models.btfs_table_row_metadata2262_all_of import (
BTFSTableRowMetadata2262AllOf,
)
from onshape_client.oas.models.btfs_value1888 import BTFSValue1888
from onshape_client.oas.models.btfs_value_array1499 import BTFSValueArray1499
from onshape_client.oas.models.btfs_value_array1499_all_of import (
BTFSValueArray1499AllOf,
)
from onshape_client.oas.models.btfs_value_boolean1195 import BTFSValueBoolean1195
from onshape_client.oas.models.btfs_value_boolean1195_all_of import (
BTFSValueBoolean1195AllOf,
)
from onshape_client.oas.models.btfs_value_map2062 import BTFSValueMap2062
from onshape_client.oas.models.btfs_value_map_entry2077 import BTFSValueMapEntry2077
from onshape_client.oas.models.btfs_value_number772 import BTFSValueNumber772
from onshape_client.oas.models.btfs_value_number772_all_of import (
BTFSValueNumber772AllOf,
)
from onshape_client.oas.models.btfs_value_other1124 import BTFSValueOther1124
from onshape_client.oas.models.btfs_value_other1124_all_of import (
BTFSValueOther1124AllOf,
)
from onshape_client.oas.models.btfs_value_string1422 import BTFSValueString1422
from onshape_client.oas.models.btfs_value_string1422_all_of import (
BTFSValueString1422AllOf,
)
from onshape_client.oas.models.btfs_value_too_big1247 import BTFSValueTooBig1247
from onshape_client.oas.models.btfs_value_undefined2003 import BTFSValueUndefined2003
from onshape_client.oas.models.btfs_value_with_units1817 import BTFSValueWithUnits1817
from onshape_client.oas.models.btfs_value_with_units1817_all_of import (
BTFSValueWithUnits1817AllOf,
)
from onshape_client.oas.models.btm_array_parameter_item1843 import (
BTMArrayParameterItem1843,
)
from onshape_client.oas.models.btm_assembly_feature12218 import BTMAssemblyFeature12218
from onshape_client.oas.models.btm_assembly_feature21022 import BTMAssemblyFeature21022
from onshape_client.oas.models.btm_assembly_feature887 import BTMAssemblyFeature887
from onshape_client.oas.models.btm_assembly_feature887_all_of import (
BTMAssemblyFeature887AllOf,
)
from onshape_client.oas.models.btm_assembly_pattern_feature2241 import (
BTMAssemblyPatternFeature2241,
)
from onshape_client.oas.models.btm_assembly_pattern_feature2241_all_of import (
BTMAssemblyPatternFeature2241AllOf,
)
from onshape_client.oas.models.btm_configuration_parameter819 import (
BTMConfigurationParameter819,
)
from onshape_client.oas.models.btm_configuration_parameter_boolean2550 import (
BTMConfigurationParameterBoolean2550,
)
from onshape_client.oas.models.btm_configuration_parameter_boolean2550_all_of import (
BTMConfigurationParameterBoolean2550AllOf,
)
from onshape_client.oas.models.btm_configuration_parameter_enum105 import (
BTMConfigurationParameterEnum105,
)
from onshape_client.oas.models.btm_configuration_parameter_enum105_all_of import (
BTMConfigurationParameterEnum105AllOf,
)
from onshape_client.oas.models.btm_configuration_parameter_quantity1826 import (
BTMConfigurationParameterQuantity1826,
)
from onshape_client.oas.models.btm_configuration_parameter_quantity1826_all_of import (
BTMConfigurationParameterQuantity1826AllOf,
)
from onshape_client.oas.models.btm_configuration_parameter_string872 import (
BTMConfigurationParameterString872,
)
from onshape_client.oas.models.btm_configuration_parameter_string872_all_of import (
BTMConfigurationParameterString872AllOf,
)
from onshape_client.oas.models.btm_configured_value1341 import BTMConfiguredValue1341
from onshape_client.oas.models.btm_configured_value_by_boolean1501 import (
BTMConfiguredValueByBoolean1501,
)
from onshape_client.oas.models.btm_configured_value_by_boolean1501_all_of import (
BTMConfiguredValueByBoolean1501AllOf,
)
from onshape_client.oas.models.btm_configured_value_by_enum1923 import (
BTMConfiguredValueByEnum1923,
)
from onshape_client.oas.models.btm_configured_value_by_enum1923_all_of import (
BTMConfiguredValueByEnum1923AllOf,
)
from onshape_client.oas.models.btm_database_parameter2229 import (
BTMDatabaseParameter2229,
)
from onshape_client.oas.models.btm_database_parameter2229_all_of import (
BTMDatabaseParameter2229AllOf,
)
from onshape_client.oas.models.btm_enum_option592 import BTMEnumOption592
from onshape_client.oas.models.btm_feature134 import BTMFeature134
from onshape_client.oas.models.btm_feature_invalid1031 import BTMFeatureInvalid1031
from onshape_client.oas.models.btm_feature_query_with_occurrence157 import (
BTMFeatureQueryWithOccurrence157,
)
from onshape_client.oas.models.btm_feature_query_with_occurrence157_all_of import (
BTMFeatureQueryWithOccurrence157AllOf,
)
from onshape_client.oas.models.btm_geometry_mate1260 import BTMGeometryMate1260
from onshape_client.oas.models.btm_import136 import BTMImport136
from onshape_client.oas.models.btm_in_context_query2254 import BTMInContextQuery2254
from onshape_client.oas.models.btm_in_context_query2254_all_of import (
BTMInContextQuery2254AllOf,
)
from onshape_client.oas.models.btm_individual_co_edge_query1332 import (
BTMIndividualCoEdgeQuery1332,
)
from onshape_client.oas.models.btm_individual_co_edge_query1332_all_of import (
BTMIndividualCoEdgeQuery1332AllOf,
)
from onshape_client.oas.models.btm_individual_created_by_query137 import (
BTMIndividualCreatedByQuery137,
)
from onshape_client.oas.models.btm_individual_created_by_query137_all_of import (
BTMIndividualCreatedByQuery137AllOf,
)
from onshape_client.oas.models.btm_individual_occurrence_query626 import (
BTMIndividualOccurrenceQuery626,
)
from onshape_client.oas.models.btm_individual_query138 import BTMIndividualQuery138
from onshape_client.oas.models.btm_individual_query138_all_of import (
BTMIndividualQuery138AllOf,
)
from onshape_client.oas.models.btm_individual_query_base139 import (
BTMIndividualQueryBase139,
)
from onshape_client.oas.models.btm_individual_query_with_occurrence811 import (
BTMIndividualQueryWithOccurrence811,
)
from onshape_client.oas.models.btm_individual_query_with_occurrence811_all_of import (
BTMIndividualQueryWithOccurrence811AllOf,
)
from onshape_client.oas.models.btm_individual_query_with_occurrence_base904 import (
BTMIndividualQueryWithOccurrenceBase904,
)
from onshape_client.oas.models.btm_individual_sketch_region_query140 import (
BTMIndividualSketchRegionQuery140,
)
from onshape_client.oas.models.btm_individual_sketch_region_query140_all_of import (
BTMIndividualSketchRegionQuery140AllOf,
)
from onshape_client.oas.models.btm_individual_sketch_unique_vertices_query1472 import (
BTMIndividualSketchUniqueVerticesQuery1472,
)
from onshape_client.oas.models.btm_individual_sketch_unique_vertices_query1472_all_of import (
BTMIndividualSketchUniqueVerticesQuery1472AllOf,
)
from onshape_client.oas.models.btm_inference_query_with_occurrence1083 import (
BTMInferenceQueryWithOccurrence1083,
)
from onshape_client.oas.models.btm_inference_query_with_occurrence1083_all_of import (
BTMInferenceQueryWithOccurrence1083AllOf,
)
from onshape_client.oas.models.btm_mate64 import BTMMate64
from onshape_client.oas.models.btm_mate64_all_of import BTMMate64AllOf
from onshape_client.oas.models.btm_mate_connector66 import BTMMateConnector66
from onshape_client.oas.models.btm_mate_connector66_all_of import (
BTMMateConnector66AllOf,
)
from onshape_client.oas.models.btm_mate_group65 import BTMMateGroup65
from onshape_client.oas.models.btm_mate_relation1412 import BTMMateRelation1412
from onshape_client.oas.models.btm_non_geometric_item1864 import BTMNonGeometricItem1864
from onshape_client.oas.models.btm_non_geometric_item1864_all_of import (
BTMNonGeometricItem1864AllOf,
)
from onshape_client.oas.models.btm_parameter1 import BTMParameter1
from onshape_client.oas.models.btm_parameter_appearance627 import (
BTMParameterAppearance627,
)
from onshape_client.oas.models.btm_parameter_appearance627_all_of import (
BTMParameterAppearance627AllOf,
)
from onshape_client.oas.models.btm_parameter_array2025 import BTMParameterArray2025
from onshape_client.oas.models.btm_parameter_array2025_all_of import (
BTMParameterArray2025AllOf,
)
from onshape_client.oas.models.btm_parameter_blob_reference1679 import (
BTMParameterBlobReference1679,
)
from onshape_client.oas.models.btm_parameter_blob_reference1679_all_of import (
BTMParameterBlobReference1679AllOf,
)
from onshape_client.oas.models.btm_parameter_boolean144 import BTMParameterBoolean144
from onshape_client.oas.models.btm_parameter_boolean144_all_of import (
BTMParameterBoolean144AllOf,
)
from onshape_client.oas.models.btm_parameter_configured2222 import (
BTMParameterConfigured2222,
)
from onshape_client.oas.models.btm_parameter_configured2222_all_of import (
BTMParameterConfigured2222AllOf,
)
from onshape_client.oas.models.btm_parameter_derived864 import BTMParameterDerived864
from onshape_client.oas.models.btm_parameter_derived864_all_of import (
BTMParameterDerived864AllOf,
)
from onshape_client.oas.models.btm_parameter_enum145 import BTMParameterEnum145
from onshape_client.oas.models.btm_parameter_enum145_all_of import (
BTMParameterEnum145AllOf,
)
from onshape_client.oas.models.btm_parameter_feature_list1749 import (
BTMParameterFeatureList1749,
)
from onshape_client.oas.models.btm_parameter_feature_list1749_all_of import (
BTMParameterFeatureList1749AllOf,
)
from onshape_client.oas.models.btm_parameter_foreign_id146 import (
BTMParameterForeignId146,
)
from onshape_client.oas.models.btm_parameter_foreign_id146_all_of import (
BTMParameterForeignId146AllOf,
)
from onshape_client.oas.models.btm_parameter_invalid1664 import BTMParameterInvalid1664
from onshape_client.oas.models.btm_parameter_lookup_table_path1419 import (
BTMParameterLookupTablePath1419,
)
from onshape_client.oas.models.btm_parameter_lookup_table_path1419_all_of import (
BTMParameterLookupTablePath1419AllOf,
)
from onshape_client.oas.models.btm_parameter_material1388 import (
BTMParameterMaterial1388,
)
from onshape_client.oas.models.btm_parameter_material1388_all_of import (
BTMParameterMaterial1388AllOf,
)
from onshape_client.oas.models.btm_parameter_nullable_quantity807 import (
BTMParameterNullableQuantity807,
)
from onshape_client.oas.models.btm_parameter_nullable_quantity807_all_of import (
BTMParameterNullableQuantity807AllOf,
)
from onshape_client.oas.models.btm_parameter_quantity147 import BTMParameterQuantity147
from onshape_client.oas.models.btm_parameter_quantity147_all_of import (
BTMParameterQuantity147AllOf,
)
from onshape_client.oas.models.btm_parameter_query_list148 import (
BTMParameterQueryList148,
)
from onshape_client.oas.models.btm_parameter_query_list148_all_of import (
BTMParameterQueryList148AllOf,
)
from onshape_client.oas.models.btm_parameter_query_with_occurrence_list67 import (
BTMParameterQueryWithOccurrenceList67,
)
from onshape_client.oas.models.btm_parameter_query_with_occurrence_list67_all_of import (
BTMParameterQueryWithOccurrenceList67AllOf,
)
from onshape_client.oas.models.btm_parameter_reference2434 import (
BTMParameterReference2434,
)
from onshape_client.oas.models.btm_parameter_reference2434_all_of import (
BTMParameterReference2434AllOf,
)
from onshape_client.oas.models.btm_parameter_reference_assembly938 import (
BTMParameterReferenceAssembly938,
)
from onshape_client.oas.models.btm_parameter_reference_blob3281 import (
BTMParameterReferenceBlob3281,
)
from onshape_client.oas.models.btm_parameter_reference_cad_import2016 import (
BTMParameterReferenceCADImport2016,
)
from onshape_client.oas.models.btm_parameter_reference_image2014 import (
BTMParameterReferenceImage2014,
)
from onshape_client.oas.models.btm_parameter_reference_json790 import (
BTMParameterReferenceJSON790,
)
from onshape_client.oas.models.btm_parameter_reference_part_studio3302 import (
BTMParameterReferencePartStudio3302,
)
from onshape_client.oas.models.btm_parameter_reference_part_studio3302_all_of import (
BTMParameterReferencePartStudio3302AllOf,
)
from onshape_client.oas.models.btm_parameter_reference_table917 import (
BTMParameterReferenceTable917,
)
from onshape_client.oas.models.btm_parameter_reference_with_configuration3028 import (
BTMParameterReferenceWithConfiguration3028,
)
from onshape_client.oas.models.btm_parameter_string149 import BTMParameterString149
from onshape_client.oas.models.btm_parameter_string149_all_of import (
BTMParameterString149AllOf,
)
from onshape_client.oas.models.btm_part_studio_mate_connector_query1324 import (
BTMPartStudioMateConnectorQuery1324,
)
from onshape_client.oas.models.btm_sketch151 import BTMSketch151
from onshape_client.oas.models.btm_sketch151_all_of import BTMSketch151AllOf
from onshape_client.oas.models.btm_sketch_constraint2 import BTMSketchConstraint2
from onshape_client.oas.models.btm_sketch_curve4 import BTMSketchCurve4
from onshape_client.oas.models.btm_sketch_curve4_all_of import BTMSketchCurve4AllOf
from onshape_client.oas.models.btm_sketch_curve_segment155 import (
BTMSketchCurveSegment155,
)
from onshape_client.oas.models.btm_sketch_curve_segment155_all_of import (
BTMSketchCurveSegment155AllOf,
)
from onshape_client.oas.models.btm_sketch_geom_entity5 import BTMSketchGeomEntity5
from onshape_client.oas.models.btm_sketch_image_entity763 import BTMSketchImageEntity763
from onshape_client.oas.models.btm_sketch_image_entity763_all_of import (
BTMSketchImageEntity763AllOf,
)
from onshape_client.oas.models.btm_sketch_point158 import BTMSketchPoint158
from onshape_client.oas.models.btm_sketch_point158_all_of import BTMSketchPoint158AllOf
from onshape_client.oas.models.btm_sketch_text_entity1761 import BTMSketchTextEntity1761
from onshape_client.oas.models.btm_sketch_text_entity1761_all_of import (
BTMSketchTextEntity1761AllOf,
)
from onshape_client.oas.models.btp_annotation231 import BTPAnnotation231
from onshape_client.oas.models.btp_argument_declaration232 import (
BTPArgumentDeclaration232,
)
from onshape_client.oas.models.btp_argument_declaration232_all_of import (
BTPArgumentDeclaration232AllOf,
)
from onshape_client.oas.models.btp_builtin_identifier233 import BTPBuiltinIdentifier233
from onshape_client.oas.models.btp_builtin_identifier233_all_of import (
BTPBuiltinIdentifier233AllOf,
)
from onshape_client.oas.models.btp_conversion_function1362 import (
BTPConversionFunction1362,
)
from onshape_client.oas.models.btp_conversion_function1362_all_of import (
BTPConversionFunction1362AllOf,
)
from onshape_client.oas.models.btp_expression9 import BTPExpression9
from onshape_client.oas.models.btp_expression_access237 import BTPExpressionAccess237
from onshape_client.oas.models.btp_expression_access237_all_of import (
BTPExpressionAccess237AllOf,
)
from onshape_client.oas.models.btp_expression_as238 import BTPExpressionAs238
from onshape_client.oas.models.btp_expression_as238_all_of import (
BTPExpressionAs238AllOf,
)
from onshape_client.oas.models.btp_expression_builtin_call239 import (
BTPExpressionBuiltinCall239,
)
from onshape_client.oas.models.btp_expression_builtin_call239_all_of import (
BTPExpressionBuiltinCall239AllOf,
)
from onshape_client.oas.models.btp_expression_call240 import BTPExpressionCall240
from onshape_client.oas.models.btp_expression_call240_all_of import (
BTPExpressionCall240AllOf,
)
from onshape_client.oas.models.btp_expression_function1325 import (
BTPExpressionFunction1325,
)
from onshape_client.oas.models.btp_expression_function1325_all_of import (
BTPExpressionFunction1325AllOf,
)
from onshape_client.oas.models.btp_expression_group241 import BTPExpressionGroup241
from onshape_client.oas.models.btp_expression_group241_all_of import (
BTPExpressionGroup241AllOf,
)
from onshape_client.oas.models.btp_expression_is242 import BTPExpressionIs242
from onshape_client.oas.models.btp_expression_new243 import BTPExpressionNew243
from onshape_client.oas.models.btp_expression_new243_all_of import (
BTPExpressionNew243AllOf,
)
from onshape_client.oas.models.btp_expression_operator244 import (
BTPExpressionOperator244,
)
from onshape_client.oas.models.btp_expression_operator244_all_of import (
BTPExpressionOperator244AllOf,
)
from onshape_client.oas.models.btp_expression_switch2632 import BTPExpressionSwitch2632
from onshape_client.oas.models.btp_expression_switch2632_all_of import (
BTPExpressionSwitch2632AllOf,
)
from onshape_client.oas.models.btp_expression_try1271 import BTPExpressionTry1271
from onshape_client.oas.models.btp_expression_try1271_all_of import (
BTPExpressionTry1271AllOf,
)
from onshape_client.oas.models.btp_expression_var_reference245 import (
BTPExpressionVarReference245,
)
from onshape_client.oas.models.btp_expression_var_reference245_all_of import (
BTPExpressionVarReference245AllOf,
)
from onshape_client.oas.models.btp_function_declaration246 import (
BTPFunctionDeclaration246,
)
from onshape_client.oas.models.btp_function_or_predicate_declaration247 import (
BTPFunctionOrPredicateDeclaration247,
)
from onshape_client.oas.models.btp_function_or_predicate_declaration247_all_of import (
BTPFunctionOrPredicateDeclaration247AllOf,
)
from onshape_client.oas.models.btp_identifier8 import BTPIdentifier8
from onshape_client.oas.models.btp_identifier8_all_of import BTPIdentifier8AllOf
from onshape_client.oas.models.btp_literal253 import BTPLiteral253
from onshape_client.oas.models.btp_literal_array254 import BTPLiteralArray254
from onshape_client.oas.models.btp_literal_array254_all_of import (
BTPLiteralArray254AllOf,
)
from onshape_client.oas.models.btp_literal_boolean255 import BTPLiteralBoolean255
from onshape_client.oas.models.btp_literal_map256 import BTPLiteralMap256
from onshape_client.oas.models.btp_literal_map_entry257 import BTPLiteralMapEntry257
from onshape_client.oas.models.btp_literal_map_entry257_all_of import (
BTPLiteralMapEntry257AllOf,
)
from onshape_client.oas.models.btp_literal_number258 import BTPLiteralNumber258
from onshape_client.oas.models.btp_literal_string259 import BTPLiteralString259
from onshape_client.oas.models.btp_literal_string259_all_of import (
BTPLiteralString259AllOf,
)
from onshape_client.oas.models.btp_literal_undefined260 import BTPLiteralUndefined260
from onshape_client.oas.models.btp_module234 import BTPModule234
from onshape_client.oas.models.btp_module234_all_of import BTPModule234AllOf
from onshape_client.oas.models.btp_module_id235 import BTPModuleId235
from onshape_client.oas.models.btp_module_id235_all_of import BTPModuleId235AllOf
from onshape_client.oas.models.btp_name261 import BTPName261
from onshape_client.oas.models.btp_name261_all_of import BTPName261AllOf
from onshape_client.oas.models.btp_node7 import BTPNode7
from onshape_client.oas.models.btp_operator_declaration264 import (
BTPOperatorDeclaration264,
)
from onshape_client.oas.models.btp_operator_declaration264_all_of import (
BTPOperatorDeclaration264AllOf,
)
from onshape_client.oas.models.btp_predicate_declaration265 import (
BTPPredicateDeclaration265,
)
from onshape_client.oas.models.btp_procedure_declaration_base266 import (
BTPProcedureDeclarationBase266,
)
from onshape_client.oas.models.btp_procedure_declaration_base266_all_of import (
BTPProcedureDeclarationBase266AllOf,
)
from onshape_client.oas.models.btp_property_accessor23 import BTPPropertyAccessor23
from onshape_client.oas.models.btp_space10 import BTPSpace10
from onshape_client.oas.models.btp_statement269 import BTPStatement269
from onshape_client.oas.models.btp_statement269_all_of import BTPStatement269AllOf
from onshape_client.oas.models.btp_statement_assignment270 import (
BTPStatementAssignment270,
)
from onshape_client.oas.models.btp_statement_assignment270_all_of import (
BTPStatementAssignment270AllOf,
)
from onshape_client.oas.models.btp_statement_block271 import BTPStatementBlock271
from onshape_client.oas.models.btp_statement_block271_all_of import (
BTPStatementBlock271AllOf,
)
from onshape_client.oas.models.btp_statement_break272 import BTPStatementBreak272
from onshape_client.oas.models.btp_statement_break272_all_of import (
BTPStatementBreak272AllOf,
)
from onshape_client.oas.models.btp_statement_compressed_query1237 import (
BTPStatementCompressedQuery1237,
)
from onshape_client.oas.models.btp_statement_compressed_query1237_all_of import (
BTPStatementCompressedQuery1237AllOf,
)
from onshape_client.oas.models.btp_statement_constant_declaration273 import (
BTPStatementConstantDeclaration273,
)
from onshape_client.oas.models.btp_statement_constant_declaration273_all_of import (
BTPStatementConstantDeclaration273AllOf,
)
from onshape_client.oas.models.btp_statement_continue274 import BTPStatementContinue274
from onshape_client.oas.models.btp_statement_continue274_all_of import (
BTPStatementContinue274AllOf,
)
from onshape_client.oas.models.btp_statement_expression275 import (
BTPStatementExpression275,
)
from onshape_client.oas.models.btp_statement_expression275_all_of import (
BTPStatementExpression275AllOf,
)
from onshape_client.oas.models.btp_statement_if276 import BTPStatementIf276
from onshape_client.oas.models.btp_statement_if276_all_of import BTPStatementIf276AllOf
from onshape_client.oas.models.btp_statement_loop277 import BTPStatementLoop277
from onshape_client.oas.models.btp_statement_loop277_all_of import (
BTPStatementLoop277AllOf,
)
from onshape_client.oas.models.btp_statement_loop_for3278 import BTPStatementLoopFor3278
from onshape_client.oas.models.btp_statement_loop_for3278_all_of import (
BTPStatementLoopFor3278AllOf,
)
from onshape_client.oas.models.btp_statement_loop_for_in279 import (
BTPStatementLoopForIn279,
)
from onshape_client.oas.models.btp_statement_loop_for_in279_all_of import (
BTPStatementLoopForIn279AllOf,
)
from onshape_client.oas.models.btp_statement_loop_while280 import (
BTPStatementLoopWhile280,
)
from onshape_client.oas.models.btp_statement_loop_while280_all_of import (
BTPStatementLoopWhile280AllOf,
)
from onshape_client.oas.models.btp_statement_return281 import BTPStatementReturn281
from onshape_client.oas.models.btp_statement_return281_all_of import (
BTPStatementReturn281AllOf,
)
from onshape_client.oas.models.btp_statement_throw1080 import BTPStatementThrow1080
from onshape_client.oas.models.btp_statement_throw1080_all_of import (
BTPStatementThrow1080AllOf,
)
from onshape_client.oas.models.btp_statement_try1523 import BTPStatementTry1523
from onshape_client.oas.models.btp_statement_try1523_all_of import (
BTPStatementTry1523AllOf,
)
from onshape_client.oas.models.btp_statement_var_declaration282 import (
BTPStatementVarDeclaration282,
)
from onshape_client.oas.models.btp_top_level_constant_declaration283 import (
BTPTopLevelConstantDeclaration283,
)
from onshape_client.oas.models.btp_top_level_constant_declaration283_all_of import (
BTPTopLevelConstantDeclaration283AllOf,
)
from onshape_client.oas.models.btp_top_level_enum_declaration284 import (
BTPTopLevelEnumDeclaration284,
)
from onshape_client.oas.models.btp_top_level_enum_declaration284_all_of import (
BTPTopLevelEnumDeclaration284AllOf,
)
from onshape_client.oas.models.btp_top_level_import285 import BTPTopLevelImport285
from onshape_client.oas.models.btp_top_level_import285_all_of import (
BTPTopLevelImport285AllOf,
)
from onshape_client.oas.models.btp_top_level_node286 import BTPTopLevelNode286
from onshape_client.oas.models.btp_top_level_node286_all_of import (
BTPTopLevelNode286AllOf,
)
from onshape_client.oas.models.btp_top_level_type_declaration287 import (
BTPTopLevelTypeDeclaration287,
)
from onshape_client.oas.models.btp_top_level_type_declaration287_all_of import (
BTPTopLevelTypeDeclaration287AllOf,
)
from onshape_client.oas.models.btp_top_level_user_type_declaration288 import (
BTPTopLevelUserTypeDeclaration288,
)
from onshape_client.oas.models.btp_top_level_user_type_declaration288_all_of import (
BTPTopLevelUserTypeDeclaration288AllOf,
)
from onshape_client.oas.models.btp_type_name290 import BTPTypeName290
from onshape_client.oas.models.btp_type_name_standard291 import BTPTypeNameStandard291
from onshape_client.oas.models.btp_type_name_user292 import BTPTypeNameUser292
from onshape_client.oas.models.btp_type_name_user292_all_of import (
BTPTypeNameUser292AllOf,
)
from onshape_client.oas.models.btpl_value249 import BTPLValue249
from onshape_client.oas.models.btpl_value_access250 import BTPLValueAccess250
from onshape_client.oas.models.btpl_value_access250_all_of import (
BTPLValueAccess250AllOf,
)
from onshape_client.oas.models.btpl_value_box_dereference251 import (
BTPLValueBoxDereference251,
)
from onshape_client.oas.models.btpl_value_box_dereference251_all_of import (
BTPLValueBoxDereference251AllOf,
)
from onshape_client.oas.models.btpl_value_var_reference252 import (
BTPLValueVarReference252,
)
from onshape_client.oas.models.btsm_bend_table_row_metadata1705 import (
BTSMBendTableRowMetadata1705,
)
from onshape_client.oas.models.btsm_bend_table_row_metadata1705_all_of import (
BTSMBendTableRowMetadata1705AllOf,
)
from onshape_client.oas.models.btsm_definition_entity_type_filter1651 import (
BTSMDefinitionEntityTypeFilter1651,
)
from onshape_client.oas.models.btsm_definition_entity_type_filter1651_all_of import (
BTSMDefinitionEntityTypeFilter1651AllOf,
)
from onshape_client.oas.models.btsm_other_joint_table_row_metadata2640 import (
BTSMOtherJointTableRowMetadata2640,
)
from onshape_client.oas.models.card import Card
from onshape_client.oas.models.configuration_entry import ConfigurationEntry
from onshape_client.oas.models.configuration_info_entry import ConfigurationInfoEntry
from onshape_client.oas.models.content_disposition import ContentDisposition
from onshape_client.oas.models.coupon import Coupon
from onshape_client.oas.models.customer import Customer
from onshape_client.oas.models.customer_card_collection import CustomerCardCollection
from onshape_client.oas.models.customer_subscription_collection import (
CustomerSubscriptionCollection,
)
from onshape_client.oas.models.discount import Discount
from onshape_client.oas.models.entry import Entry
from onshape_client.oas.models.external_account import ExternalAccount
from onshape_client.oas.models.external_account_collection import (
ExternalAccountCollection,
)
from onshape_client.oas.models.form_data_body_part import FormDataBodyPart
from onshape_client.oas.models.form_data_content_disposition import (
FormDataContentDisposition,
)
from onshape_client.oas.models.global_permission_info import GlobalPermissionInfo
from onshape_client.oas.models.json_node import JsonNode
from onshape_client.oas.models.multi_part import MultiPart
from onshape_client.oas.models.next_charge import NextCharge
from onshape_client.oas.models.next_recurring_charge import NextRecurringCharge
from onshape_client.oas.models.parameterized_header import ParameterizedHeader
from onshape_client.oas.models.plan import Plan
from onshape_client.oas.models.prorated_charges import ProratedCharges
from onshape_client.oas.models.request_options import RequestOptions
from onshape_client.oas.models.role_map_entry import RoleMapEntry
from onshape_client.oas.models.shipping_details import ShippingDetails
from onshape_client.oas.models.subscription import Subscription
from onshape_client.oas.models.subscription_item import SubscriptionItem
from onshape_client.oas.models.subscription_item_collection import (
SubscriptionItemCollection,
)
from onshape_client.oas.models.three_d_secure import ThreeDSecure
from onshape_client.oas.models.transform_group import TransformGroup
from onshape_client.oas.models.update_params import UpdateParams
|
|
import asyncio
import json
import os
import traceback
import re
from .exceptions import ExtractionError
from .utils import get_header, md5sum
class BasePlaylistEntry:
def __init__(self):
self.filename = None
self.filename_thumbnail = None
self._is_downloading = False
self._waiting_futures = []
@property
def is_downloaded(self):
if self._is_downloading:
return False
return bool(self.filename)
@classmethod
def from_json(cls, playlist, jsonstring):
raise NotImplementedError
def to_json(self):
raise NotImplementedError
async def _download(self):
raise NotImplementedError
def get_ready_future(self):
"""
Returns a future that will fire when the song is ready to be played. The future will either fire with the result (being the entry) or an exception
as to why the song download failed.
"""
future = asyncio.Future()
if self.is_downloaded:
# In the event that we're downloaded, we're already ready for playback.
future.set_result(self)
else:
# If we request a ready future, let's ensure that it'll actually resolve at one point.
asyncio.ensure_future(self._download())
self._waiting_futures.append(future)
return future
def _for_each_future(self, cb):
"""
Calls `cb` for each future that is not cancelled. Absorbs and logs any errors that may have occurred.
"""
futures = self._waiting_futures
self._waiting_futures = []
for future in futures:
if future.cancelled():
continue
try:
cb(future)
except:
traceback.print_exc()
def __eq__(self, other):
return self is other
def __hash__(self):
return id(self)
class URLPlaylistEntry(BasePlaylistEntry):
def __init__(self, playlist, url, title, duration=0, expected_filename=None, **meta):
super().__init__()
self.playlist = playlist
self.url = url
self.title = title
self.duration = duration
self.expected_filename = expected_filename
self.meta = meta
self.download_folder = self.playlist.downloader.download_folder
@classmethod
def from_json(cls, playlist, jsonstring):
data = json.loads(jsonstring)
print(data)
# TODO: version check
url = data['url']
title = data['title']
duration = data['duration']
downloaded = data['downloaded']
filename = data['filename'] if downloaded else None
filename_thumbnail = data['filename_thumbnail'] if downloaded else None
meta = {}
# TODO: Better [name] fallbacks
if 'channel' in data['meta']:
ch = playlist.bot.get_channel(data['meta']['channel']['id'])
meta['channel'] = ch or data['meta']['channel']['name']
if 'author' in data['meta']:
meta['author'] = meta['channel'].server.get_member(data['meta']['author']['id'])
return cls(playlist, url, title, duration, filename, **meta)
def to_json(self):
data = {
'version': 1,
'type': self.__class__.__name__,
'url': self.url,
'title': self.title,
'duration': self.duration,
'downloaded': self.is_downloaded,
'filename': self.filename,
'filename_thumbnail': self.filename_thumbnail,
'meta': {
i: {
'type': self.meta[i].__class__.__name__,
'id': self.meta[i].id,
'name': self.meta[i].name
} for i in self.meta
}
# Actually I think I can just getattr instead, getattr(discord, type)
}
return json.dumps(data, indent=2)
# noinspection PyTypeChecker
async def _download(self):
if self._is_downloading:
return
self._is_downloading = True
try:
# Ensure the folder that we're going to move into exists.
if not os.path.exists(self.download_folder):
os.makedirs(self.download_folder)
# self.expected_filename: audio_cache\youtube-9R8aSKwTEMg-NOMA_-_Brain_Power.m4a
extractor = os.path.basename(self.expected_filename).split('-')[0]
# the generic extractor requires special handling
if extractor == 'generic':
# print("Handling generic")
# remove thumbnail images from list
imgPattern = re.compile('(\.(jpg|jpeg|png|gif|bmp))$', flags=re.IGNORECASE)
flistdir = [f.rsplit('-', 1)[0] for f in os.listdir(self.download_folder) if not imgPattern.search(f)]
expected_fname_noex, fname_ex = os.path.basename(self.expected_filename).rsplit('.', 1)
if expected_fname_noex in flistdir:
try:
rsize = int(await get_header(self.playlist.bot.aiosession, self.url, 'CONTENT-LENGTH'))
except:
rsize = 0
lfile = os.path.join(
self.download_folder,
os.listdir(self.download_folder)[flistdir.index(expected_fname_noex)]
)
# print("Resolved %s to %s" % (self.expected_filename, lfile))
lsize = os.path.getsize(lfile)
# print("Remote size: %s Local size: %s" % (rsize, lsize))
if lsize != rsize:
await self._really_download(hash=True)
else:
# print("[Download] Cached:", self.url)
self.filename = lfile
else:
# print("File not found in cache (%s)" % expected_fname_noex)
await self._really_download(hash=True)
else:
imgPattern = re.compile('(\.(jpg|jpeg|png|gif|bmp))$', flags=re.IGNORECASE)
ldir = [f for f in os.listdir(self.download_folder) if not imgPattern.search(f)]
flistdir = [f.rsplit('.', 1)[0] for f in ldir]
expected_fname_base = os.path.basename(self.expected_filename)
expected_fname_noex = expected_fname_base.rsplit('.', 1)[0]
# idk wtf this is but its probably legacy code
# or i have youtube to blame for changing shit again
if expected_fname_base in ldir:
self.filename = os.path.join(self.download_folder, expected_fname_base)
print("[Download] Cached:", self.url)
elif expected_fname_noex in flistdir:
print("[Download] Cached (different extension):", self.url)
self.filename = os.path.join(self.download_folder, ldir[flistdir.index(expected_fname_noex)])
print("Expected %s, got %s" % (
self.expected_filename.rsplit('.', 1)[-1],
self.filename.rsplit('.', 1)[-1]
))
else:
await self._really_download()
# Trigger ready callbacks.
self._for_each_future(lambda future: future.set_result(self))
except Exception as e:
traceback.print_exc()
self._for_each_future(lambda future: future.set_exception(e))
finally:
self._is_downloading = False
# noinspection PyShadowingBuiltins
async def _really_download(self, *, hash=False):
print("[Download] Started:", self.url)
try:
result = await self.playlist.downloader.extract_info(self.playlist.loop, self.url, download=True)
except Exception as e:
raise ExtractionError(e)
print("[Download] Complete:", self.url)
if result is None:
raise ExtractionError("ytdl broke and hell if I know why")
# What the fuck do I do now?
self.filename = unhashed_fname = self.playlist.downloader.ytdl.prepare_filename(result)
# Search for file name with an image suffix
imgPattern = re.compile(self.filename.lstrip(self.download_folder + os.sep).rsplit('.', 1)[0] + '(\.(jpg|jpeg|png|gif|bmp))$', re.IGNORECASE)
self.filename_thumbnail = next(os.path.join(self.download_folder, f) for f in os.listdir(self.download_folder) if imgPattern.search(f))
if hash:
# insert the 8 last characters of the file hash to the file name to ensure uniqueness
self.filename = md5sum(unhashed_fname, 8).join('-.').join(unhashed_fname.rsplit('.', 1))
if os.path.isfile(self.filename):
# Oh bother it was actually there.
os.unlink(unhashed_fname)
else:
# Move the temporary file to it's final location.
os.rename(unhashed_fname, self.filename)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=not-callable
# pylint: disable=redefined-builtin
"""Layers can merge several input tensors into a single output tensor.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.engine.topology import Layer
from tensorflow.python.framework import tensor_shape
class _Merge(Layer):
"""Generic merge layer for elementwise merge functions.
Used to implement `Sum`, `Average`, etc.
Arguments:
**kwargs: standard layer keyword arguments.
"""
def __init__(self, **kwargs):
super(_Merge, self).__init__(**kwargs)
self.supports_masking = True
def _merge_function(self, inputs):
raise NotImplementedError
def _compute_elemwise_op_output_shape(self, shape1, shape2):
"""Computes the shape of the resultant of an elementwise operation.
Arguments:
shape1: tuple or None. Shape of the first tensor
shape2: tuple or None. Shape of the second tensor
Returns:
expected output shape when an element-wise operation is
carried out on 2 tensors with shapes shape1 and shape2.
tuple or None.
Raises:
ValueError: if shape1 and shape2 are not compatible for
element-wise operations.
"""
if None in [shape1, shape2]:
return None
elif len(shape1) < len(shape2):
return self._compute_elemwise_op_output_shape(shape2, shape1)
elif not shape2:
return shape1
output_shape = list(shape1[:-len(shape2)])
for i, j in zip(shape1[-len(shape2):], shape2):
if i is None or j is None:
output_shape.append(None)
elif i == 1:
output_shape.append(j)
elif j == 1:
output_shape.append(i)
else:
if i != j:
raise ValueError('Operands could not be broadcast '
'together with shapes ' + str(shape1) + ' ' +
str(shape2))
output_shape.append(i)
return tuple(output_shape)
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape, list):
raise ValueError('A merge layer should be called ' 'on a list of inputs.')
if len(input_shape) < 2:
raise ValueError('A merge layer should be called '
'on a list of at least 2 inputs. '
'Got ' + str(len(input_shape)) + ' inputs.')
input_shape = [tensor_shape.TensorShape(s).as_list() for s in input_shape]
batch_sizes = [s[0] for s in input_shape if s is not None]
batch_sizes = set(batch_sizes)
batch_sizes -= set([None])
if len(batch_sizes) > 1:
raise ValueError('Can not merge tensors with different '
'batch sizes. Got tensors with shapes : ' +
str(input_shape))
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)
# If the inputs have different ranks, we have to reshape them
# to make them broadcastable.
if None not in input_shape and len(set(map(len, input_shape))) == 1:
self._reshape_required = False
else:
self._reshape_required = True
self.built = True
def call(self, inputs):
if self._reshape_required:
reshaped_inputs = []
input_ndims = list(map(K.ndim, inputs))
if None not in input_ndims:
# If ranks of all inputs are available,
# we simply expand each of them at axis=1
# until all of them have the same rank.
max_ndim = max(input_ndims)
for x in inputs:
x_ndim = K.ndim(x)
for _ in range(max_ndim - x_ndim):
x = K.expand_dims(x, 1)
reshaped_inputs.append(x)
return self._merge_function(reshaped_inputs)
else:
# Transpose all inputs so that batch size is the last dimension.
# (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
transposed = False
for x in inputs:
x_ndim = K.ndim(x)
if x_ndim is None:
x_shape = K.shape(x)
batch_size = x_shape[0]
new_shape = K.concatenate([x_shape[1:], K.expand_dims(batch_size)])
x_transposed = K.reshape(x,
K.stack([batch_size, K.prod(x_shape[1:])]))
x_transposed = K.permute_dimensions(x_transposed, (1, 0))
x_transposed = K.reshape(x_transposed, new_shape)
reshaped_inputs.append(x_transposed)
transposed = True
elif x_ndim > 1:
dims = list(range(1, x_ndim)) + [0]
reshaped_inputs.append(K.permute_dimensions(x, dims))
transposed = True
else:
# We don't transpose inputs if they are 1D vectors or scalars.
reshaped_inputs.append(x)
y = self._merge_function(reshaped_inputs)
y_ndim = K.ndim(y)
if transposed:
# If inputs have been transposed, we have to transpose the output too.
if y_ndim is None:
y_shape = K.shape(y)
y_ndim = K.shape(y_shape)[0]
batch_size = y_shape[y_ndim - 1]
new_shape = K.concatenate(
[K.expand_dims(batch_size), y_shape[:y_ndim - 1]])
y = K.reshape(y, (-1, batch_size))
y = K.permute_dimensions(y, (1, 0))
y = K.reshape(y, new_shape)
elif y_ndim > 1:
dims = [y_ndim - 1] + list(range(y_ndim - 1))
y = K.permute_dimensions(y, dims)
return y
else:
return self._merge_function(inputs)
def compute_output_shape(self, input_shape):
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)
batch_sizes = [s[0] for s in input_shape if s is not None]
batch_sizes = set(batch_sizes)
batch_sizes -= set([None])
if len(batch_sizes) == 1:
output_shape = (list(batch_sizes)[0],) + output_shape
else:
output_shape = (None,) + output_shape
return output_shape
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, list):
raise ValueError('`mask` should be a list.')
if not isinstance(inputs, list):
raise ValueError('`inputs` should be a list.')
if len(mask) != len(inputs):
raise ValueError('The lists `inputs` and `mask` '
'should have the same length.')
if all([m is None for m in mask]):
return None
masks = [K.expand_dims(m, 0) for m in mask if m is not None]
return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)
class Add(_Merge):
"""Layer that adds a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output += inputs[i]
return output
class Multiply(_Merge):
"""Layer that multiplies (element-wise) a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output *= inputs[i]
return output
class Average(_Merge):
"""Layer that averages a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output += inputs[i]
return output / len(inputs)
class Maximum(_Merge):
"""Layer that computes the maximum (element-wise) a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = K.maximum(output, inputs[i])
return output
class Concatenate(_Merge):
"""Layer that concatenates a list of inputs.
It takes as input a list of tensors,
all of the same shape expect for the concatenation axis,
and returns a single tensor, the concatenation of all inputs.
Arguments:
axis: Axis along which to concatenate.
**kwargs: standard layer keyword arguments.
"""
def __init__(self, axis=-1, **kwargs):
super(Concatenate, self).__init__(**kwargs)
self.axis = axis
self.supports_masking = True
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape, list):
raise ValueError('`Concatenate` layer should be called '
'on a list of inputs')
if all([shape is None for shape in input_shape]):
return
reduced_inputs_shapes = [
tensor_shape.TensorShape(shape).as_list() for shape in input_shape
]
shape_set = set()
for i in range(len(reduced_inputs_shapes)):
del reduced_inputs_shapes[i][self.axis]
shape_set.add(tuple(reduced_inputs_shapes[i]))
if len(shape_set) > 1:
raise ValueError('`Concatenate` layer requires '
'inputs with matching shapes '
'except for the concat axis. '
'Got inputs shapes: %s' % (input_shape))
self.built = True
def call(self, inputs):
if not isinstance(inputs, list):
raise ValueError('A `Concatenate` layer should be called '
'on a list of inputs.')
return K.concatenate(inputs, axis=self.axis)
def _compute_output_shape(self, input_shape):
if not isinstance(input_shape, list):
raise ValueError('A `Concatenate` layer should be called '
'on a list of inputs.')
input_shapes = input_shape
output_shape = tensor_shape.TensorShape(input_shapes[0]).as_list()
for shape in input_shapes[1:]:
shape = tensor_shape.TensorShape(shape).as_list()
if output_shape[self.axis] is None or shape[self.axis] is None:
output_shape[self.axis] = None
break
output_shape[self.axis] += shape[self.axis]
return tensor_shape.TensorShape(output_shape)
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, list):
raise ValueError('`mask` should be a list.')
if not isinstance(inputs, list):
raise ValueError('`inputs` should be a list.')
if len(mask) != len(inputs):
raise ValueError('The lists `inputs` and `mask` '
'should have the same length.')
if all([m is None for m in mask]):
return None
# Make a list of masks while making sure
# the dimensionality of each mask
# is the same as the corresponding input.
masks = []
for input_i, mask_i in zip(inputs, mask):
if mask_i is None:
# Input is unmasked. Append all 1s to masks,
# but cast it to bool first
masks.append(K.cast(K.ones_like(input_i), 'bool'))
elif K.ndim(mask_i) < K.ndim(input_i):
# Mask is smaller than the input, expand it
masks.append(K.expand_dims(mask_i))
else:
masks.append(mask_i)
concatenated = K.concatenate(masks, axis=self.axis)
return K.all(concatenated, axis=-1, keepdims=False)
def get_config(self):
config = {
'axis': self.axis,
}
base_config = super(Concatenate, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Dot(_Merge):
"""Layer that computes a dot product between samples in two tensors.
E.g. if applied to two tensors `a` and `b` of shape `(batch_size, n)`,
the output will be a tensor of shape `(batch_size, 1)`
where each entry `i` will be the dot product between
`a[i]` and `b[i]`.
Arguments:
axes: Integer or tuple of integers,
axis or axes along which to take the dot product.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
"""
def __init__(self, axes, normalize=False, **kwargs):
super(Dot, self).__init__(**kwargs)
if not isinstance(axes, int):
if not isinstance(axes, (list, tuple)):
raise TypeError('Invalid type for `axes` - '
'should be a list or an int.')
if len(axes) != 2:
raise ValueError('Invalid format for `axes` - '
'should contain two elements.')
if not isinstance(axes[0], int) or not isinstance(axes[1], int):
raise ValueError('Invalid format for `axes` - '
'list elements should be "int".')
self.axes = axes
self.normalize = normalize
self.supports_masking = True
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `Dot` layer should be called '
'on a list of 2 inputs.')
shape1 = tensor_shape.TensorShape(input_shape[0]).as_list()
shape2 = tensor_shape.TensorShape(input_shape[1]).as_list()
if shape1 is None or shape2 is None:
return
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % len(shape1), self.axes % len(shape2)]
else:
axes = [self.axes] * 2
else:
axes = self.axes
if shape1[axes[0]] != shape2[axes[1]]:
raise ValueError('Dimension incompatibility '
'%s != %s. ' % (shape1[axes[0]], shape2[axes[1]]) +
'Layer shapes: %s, %s' % (shape1, shape2))
self.built = True
def call(self, inputs):
x1 = inputs[0]
x2 = inputs[1]
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % K.ndim(x1), self.axes % K.ndim(x2)]
else:
axes = [self.axes] * 2
else:
axes = []
for i in range(len(self.axes)):
if self.axes[i] < 0:
axes.append(self.axes[i] % K.ndim(inputs[i]))
else:
axes.append(self.axes[i])
if self.normalize:
x1 = K.l2_normalize(x1, axis=axes[0])
x2 = K.l2_normalize(x2, axis=axes[1])
output = K.batch_dot(x1, x2, axes)
return output
def _compute_output_shape(self, input_shape):
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `Dot` layer should be called '
'on a list of 2 inputs.')
shape1 = tensor_shape.TensorShape(input_shape[0]).as_list()
shape2 = tensor_shape.TensorShape(input_shape[1]).as_list()
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % len(shape1), self.axes % len(shape2)]
else:
axes = [self.axes] * 2
else:
axes = self.axes
shape1.pop(axes[0])
shape2.pop(axes[1])
shape2.pop(0)
output_shape = shape1 + shape2
if len(output_shape) == 1:
output_shape += [1]
return tensor_shape.TensorShape(output_shape)
def compute_mask(self, inputs, mask=None):
return None
def get_config(self):
config = {
'axes': self.axes,
'normalize': self.normalize,
}
base_config = super(Dot, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def add(inputs, **kwargs):
"""Functional interface to the `Add` layer.
Arguments:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the sum of the inputs.
"""
return Add(**kwargs)(inputs)
def multiply(inputs, **kwargs):
"""Functional interface to the `Multiply` layer.
Arguments:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the element-wise product of the inputs.
"""
return Multiply(**kwargs)(inputs)
def average(inputs, **kwargs):
"""Functional interface to the `Average` layer.
Arguments:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the average of the inputs.
"""
return Average(**kwargs)(inputs)
def maximum(inputs, **kwargs):
"""Functional interface to the `Maximum` layer.
Arguments:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the element-wise maximum of the inputs.
"""
return Maximum(**kwargs)(inputs)
def concatenate(inputs, axis=-1, **kwargs):
"""Functional interface to the `Concatenate` layer.
Arguments:
inputs: A list of input tensors (at least 2).
axis: Concatenation axis.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the concatenation of the inputs alongside axis `axis`.
"""
return Concatenate(axis=axis, **kwargs)(inputs)
def dot(inputs, axes, normalize=False, **kwargs):
"""Functional interface to the `Dot` layer.
Arguments:
inputs: A list of input tensors (at least 2).
axes: Integer or tuple of integers,
axis or axes along which to take the dot product.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the dot product of the samples from the inputs.
"""
return Dot(axes=axes, normalize=normalize, **kwargs)(inputs)
|
|
# -*- coding: utf-8 -*-
"""The parser mediator."""
import copy
import datetime
import time
from dfvfs.lib import definitions as dfvfs_definitions
from plaso.containers import warnings
from plaso.engine import path_helper
from plaso.engine import profilers
from plaso.lib import errors
from plaso.parsers import logger
class ParserMediator(object):
"""Parser mediator.
Attributes:
collection_filters_helper (CollectionFiltersHelper): collection filters
helper.
last_activity_timestamp (int): timestamp received that indicates the last
time activity was observed. The last activity timestamp is updated
when the mediator produces an attribute container, such as an event
source. This timestamp is used by the multi processing worker process
to indicate the last time the worker was known to be active. This
information is then used by the foreman to detect workers that are
not responding (stalled).
"""
_INT64_MIN = -1 << 63
_INT64_MAX = (1 << 63) - 1
def __init__(
self, storage_writer, knowledge_base, collection_filters_helper=None,
preferred_year=None, resolver_context=None, temporary_directory=None):
"""Initializes a parser mediator.
Args:
storage_writer (StorageWriter): storage writer.
knowledge_base (KnowledgeBase): contains information from the source
data needed for parsing.
collection_filters_helper (Optional[CollectionFiltersHelper]): collection
filters helper.
preferred_year (Optional[int]): preferred year.
resolver_context (Optional[dfvfs.Context]): resolver context.
temporary_directory (Optional[str]): path of the directory for temporary
files.
"""
super(ParserMediator, self).__init__()
self._abort = False
self._cpu_time_profiler = None
self._event_data_stream_identifier = None
self._extra_event_attributes = {}
self._file_entry = None
self._knowledge_base = knowledge_base
self._last_event_data_hash = None
self._last_event_data_identifier = None
self._memory_profiler = None
self._number_of_event_sources = 0
self._number_of_events = 0
self._number_of_warnings = 0
self._parser_chain_components = []
self._preferred_year = preferred_year
self._process_information = None
self._resolver_context = resolver_context
self._storage_writer = storage_writer
self._temporary_directory = temporary_directory
self.collection_filters_helper = collection_filters_helper
self.last_activity_timestamp = 0.0
@property
def abort(self):
"""bool: True if parsing should be aborted."""
return self._abort
@property
def codepage(self):
"""str: codepage."""
return self._knowledge_base.codepage
@property
def hostname(self):
"""str: hostname."""
return self._knowledge_base.hostname
@property
def knowledge_base(self):
"""KnowledgeBase: knowledge base."""
return self._knowledge_base
@property
def number_of_produced_event_sources(self):
"""int: number of produced event sources."""
return self._number_of_event_sources
@property
def number_of_produced_events(self):
"""int: number of produced events."""
return self._number_of_events
@property
def number_of_produced_warnings(self):
"""int: number of produced warnings."""
return self._number_of_warnings
@property
def operating_system(self):
"""str: operating system or None if not set."""
return self._knowledge_base.GetValue('operating_system')
@property
def resolver_context(self):
"""dfvfs.Context: resolver context."""
return self._resolver_context
@property
def temporary_directory(self):
"""str: path of the directory for temporary files."""
return self._temporary_directory
@property
def timezone(self):
"""datetime.tzinfo: timezone."""
return self._knowledge_base.timezone
@property
def year(self):
"""int: year."""
return self._knowledge_base.year
def _GetEarliestYearFromFileEntry(self):
"""Retrieves the year from the file entry date and time values.
This function uses the creation time if available otherwise the change
time (metadata last modification time) is used.
Returns:
int: year of the file entry or None.
"""
file_entry = self.GetFileEntry()
if not file_entry:
return None
date_time = file_entry.creation_time
if not date_time:
date_time = file_entry.change_time
# Gzip files do not store a creation or change time, but its modification
# time is a good alternative.
if file_entry.TYPE_INDICATOR == dfvfs_definitions.TYPE_INDICATOR_GZIP:
date_time = file_entry.modification_time
if date_time is None:
logger.warning('File entry has no creation or change time.')
return None
year, _, _ = date_time.GetDate()
return year
def _GetLatestYearFromFileEntry(self):
"""Retrieves the maximum (highest value) year from the file entry.
This function uses the modification time if available otherwise the change
time (metadata last modification time) is used.
Returns:
int: year of the file entry or None if the year cannot be retrieved.
"""
file_entry = self.GetFileEntry()
if not file_entry:
return None
date_time = file_entry.modification_time
if not date_time:
date_time = file_entry.change_time
if date_time is None:
logger.warning('File entry has no modification or change time.')
return None
year, _, _ = date_time.GetDate()
return year
def AddEventAttribute(self, attribute_name, attribute_value):
"""Adds an attribute that will be set on all events produced.
Setting attributes using this method will cause events produced via this
mediator to have an attribute with the provided name set with the
provided value.
Args:
attribute_name (str): name of the attribute to add.
attribute_value (str): value of the attribute to add.
Raises:
KeyError: if the event attribute is already set.
"""
if attribute_name in self._extra_event_attributes:
raise KeyError('Event attribute {0:s} already set'.format(
attribute_name))
self._extra_event_attributes[attribute_name] = attribute_value
def AppendToParserChain(self, plugin_or_parser):
"""Adds a parser or parser plugin to the parser chain.
Args:
plugin_or_parser (BaseParser): parser or parser plugin.
"""
self._parser_chain_components.append(plugin_or_parser.NAME)
def ClearEventAttributes(self):
"""Clears the extra event attributes."""
self._extra_event_attributes = {}
def ClearParserChain(self):
"""Clears the parser chain."""
self._parser_chain_components = []
def GetDisplayName(self, file_entry=None):
"""Retrieves the display name for a file entry.
Args:
file_entry (Optional[dfvfs.FileEntry]): file entry object, where None
will return the display name of self._file_entry.
Returns:
str: human readable string that describes the path to the file entry.
Raises:
ValueError: if the file entry is missing.
"""
if file_entry is None:
file_entry = self._file_entry
if file_entry is None:
raise ValueError('Missing file entry')
path_spec = getattr(file_entry, 'path_spec', None)
mount_path = self._knowledge_base.GetMountPath()
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(
path_spec, mount_path=mount_path)
if not relative_path:
return file_entry.name
text_prepend = self._knowledge_base.GetTextPrepend()
return path_helper.PathHelper.GetDisplayNameForPathSpec(
path_spec, mount_path=mount_path, text_prepend=text_prepend)
def GetDisplayNameForPathSpec(self, path_spec):
"""Retrieves the display name for a path specification.
Args:
path_spec (dfvfs.PathSpec): path specification.
Returns:
str: human readable version of the path specification.
"""
mount_path = self._knowledge_base.GetMountPath()
text_prepend = self._knowledge_base.GetTextPrepend()
return path_helper.PathHelper.GetDisplayNameForPathSpec(
path_spec, mount_path=mount_path, text_prepend=text_prepend)
def GetEstimatedYear(self):
"""Retrieves an estimate of the year.
This function determines the year in the following manner:
* determine if the user provided a preferred year;
* determine if knowledge base defines a year derived from preprocessing;
* determine the year based on the file entry metadata;
* default to the current year;
Returns:
int: estimated year.
"""
# TODO: improve this method to get a more reliable estimate.
# Preserve the year-less date and sort this out in the psort phase.
if self._preferred_year:
return self._preferred_year
if self._knowledge_base.year:
return self._knowledge_base.year
# TODO: Find a decent way to actually calculate the correct year
# instead of relying on file entry timestamps.
year = self._GetEarliestYearFromFileEntry()
if not year:
year = self._GetLatestYearFromFileEntry()
if not year:
year = self.GetCurrentYear()
return year
def GetFileEntry(self):
"""Retrieves the active file entry.
Returns:
dfvfs.FileEntry: file entry.
"""
return self._file_entry
def GetFilename(self):
"""Retrieves the name of the active file entry.
Returns:
str: name of the active file entry or None.
"""
if not self._file_entry:
return None
data_stream = getattr(self._file_entry.path_spec, 'data_stream', None)
if data_stream:
return '{0:s}:{1:s}'.format(self._file_entry.name, data_stream)
return self._file_entry.name
def GetCurrentYear(self):
"""Retrieves current year.
Returns:
int: the current year.
"""
datetime_object = datetime.datetime.now()
return datetime_object.year
def GetLatestYear(self):
"""Retrieves the latest (newest) year for an event from a file.
This function tries to determine the year based on the file entry metadata,
if that fails the current year is used.
Returns:
int: year of the file entry or the current year.
"""
year = self._GetLatestYearFromFileEntry()
if not year:
year = self.GetCurrentYear()
return year
def GetParserChain(self):
"""Retrieves the current parser chain.
Returns:
str: parser chain.
"""
return '/'.join(self._parser_chain_components)
def GetRelativePath(self):
"""Retrieves the relative path of the current file entry.
Returns:
str: relateive path of the current file entry or None if no current
file entry.
"""
if self._file_entry is None:
return None
mount_path = self._knowledge_base.GetMountPath()
return path_helper.PathHelper.GetRelativePathForPathSpec(
self._file_entry.path_spec, mount_path=mount_path)
def GetRelativePathForPathSpec(self, path_spec):
"""Retrieves the relative path for a path specification.
Args:
path_spec (dfvfs.PathSpec): path specification.
Returns:
str: relateive path of the path specification.
"""
mount_path = self._knowledge_base.GetMountPath()
return path_helper.PathHelper.GetRelativePathForPathSpec(
path_spec, mount_path=mount_path)
def PopFromParserChain(self):
"""Removes the last added parser or parser plugin from the parser chain."""
self._parser_chain_components.pop()
def ProcessEventData(
self, event_data, parser_chain=None, file_entry=None, query=None):
"""Processes event data before it written to the storage.
Args:
event_data (EventData): event data.
parser_chain (Optional[str]): parsing chain up to this point.
file_entry (Optional[dfvfs.FileEntry]): file entry, where None will
use the current file entry set in the mediator.
query (Optional[str]): query that was used to obtain the event data.
Raises:
KeyError: if there's an attempt to add a duplicate attribute value to the
event data.
"""
# TODO: rename this to event_data.parser_chain or equivalent.
if not getattr(event_data, 'parser', None) and parser_chain:
event_data.parser = parser_chain
if file_entry is None:
file_entry = self._file_entry
if not getattr(event_data, 'hostname', None) and self.hostname:
event_data.hostname = self.hostname
if not getattr(event_data, 'username', None):
user_sid = getattr(event_data, 'user_sid', None)
username = self._knowledge_base.GetUsernameByIdentifier(user_sid)
if username:
event_data.username = username
if not getattr(event_data, 'query', None) and query:
event_data.query = query
for attribute, value in self._extra_event_attributes.items():
if hasattr(event_data, attribute):
raise KeyError('Event already has a value for {0:s}'.format(attribute))
setattr(event_data, attribute, value)
def ProduceEventDataStream(self, event_data_stream):
"""Produces an event data stream.
Args:
event_data_stream (EventDataStream): an event data stream or None if no
event data stream is needed.
Raises:
RuntimeError: when storage writer is not set.
"""
if not self._storage_writer:
raise RuntimeError('Storage writer not set.')
if not event_data_stream:
self._event_data_stream_identifier = None
else:
if not event_data_stream.path_spec:
event_data_stream.path_spec = getattr(
self._file_entry, 'path_spec', None)
self._storage_writer.AddEventDataStream(event_data_stream)
self._event_data_stream_identifier = event_data_stream.GetIdentifier()
self.last_activity_timestamp = time.time()
def ProduceEventSource(self, event_source):
"""Produces an event source.
Args:
event_source (EventSource): an event source.
Raises:
RuntimeError: when storage writer is not set.
"""
if not self._storage_writer:
raise RuntimeError('Storage writer not set.')
self._storage_writer.AddEventSource(event_source)
self._number_of_event_sources += 1
self.last_activity_timestamp = time.time()
def ProduceEventWithEventData(self, event, event_data):
"""Produces an event.
Args:
event (EventObject): event.
event_data (EventData): event data.
Raises:
InvalidEvent: if the event timestamp value is not set or out of bounds or
if the event data (attribute container) values cannot be hashed.
"""
if event.timestamp is None:
raise errors.InvalidEvent('Event timestamp value not set.')
if event.timestamp < self._INT64_MIN or event.timestamp > self._INT64_MAX:
raise errors.InvalidEvent('Event timestamp value out of bounds.')
try:
event_data_hash = event_data.GetAttributeValuesHash()
except TypeError as exception:
raise errors.InvalidEvent(
'Unable to hash event data values with error: {0!s}'.format(
exception))
if event_data_hash != self._last_event_data_hash:
# Make a copy of the event data before adding additional values.
event_data = copy.deepcopy(event_data)
self.ProcessEventData(
event_data, parser_chain=self.GetParserChain(),
file_entry=self._file_entry)
if self._event_data_stream_identifier:
event_data.SetEventDataStreamIdentifier(
self._event_data_stream_identifier)
self._storage_writer.AddEventData(event_data)
self._last_event_data_hash = event_data_hash
self._last_event_data_identifier = event_data.GetIdentifier()
if self._last_event_data_identifier:
event.SetEventDataIdentifier(self._last_event_data_identifier)
# TODO: remove this after structural fix is in place
# https://github.com/log2timeline/plaso/issues/1691
event.parser = self.GetParserChain()
self._storage_writer.AddEvent(event)
self._number_of_events += 1
self.last_activity_timestamp = time.time()
def ProduceExtractionWarning(self, message, path_spec=None):
"""Produces an extraction warning.
Args:
message (str): message of the warning.
path_spec (Optional[dfvfs.PathSpec]): path specification, where None
will use the path specification of current file entry set in
the mediator.
Raises:
RuntimeError: when storage writer is not set.
"""
if not self._storage_writer:
raise RuntimeError('Storage writer not set.')
if not path_spec and self._file_entry:
path_spec = self._file_entry.path_spec
parser_chain = self.GetParserChain()
warning = warnings.ExtractionWarning(
message=message, parser_chain=parser_chain, path_spec=path_spec)
self._storage_writer.AddExtractionWarning(warning)
self._number_of_warnings += 1
self.last_activity_timestamp = time.time()
def RemoveEventAttribute(self, attribute_name):
"""Removes an attribute from being set on all events produced.
Args:
attribute_name (str): name of the attribute to remove.
Raises:
KeyError: if the event attribute is not set.
"""
if attribute_name not in self._extra_event_attributes:
raise KeyError('Event attribute: {0:s} not set'.format(attribute_name))
del self._extra_event_attributes[attribute_name]
def ResetFileEntry(self):
"""Resets the active file entry."""
self._file_entry = None
def SampleMemoryUsage(self, parser_name):
"""Takes a sample of the memory usage for profiling.
Args:
parser_name (str): name of the parser.
"""
if self._memory_profiler:
used_memory = self._process_information.GetUsedMemory() or 0
self._memory_profiler.Sample(parser_name, used_memory)
def SampleStartTiming(self, parser_name):
"""Starts timing a CPU time sample for profiling.
Args:
parser_name (str): name of the parser.
"""
if self._cpu_time_profiler:
self._cpu_time_profiler.StartTiming(parser_name)
def SampleStopTiming(self, parser_name):
"""Stops timing a CPU time sample for profiling.
Args:
parser_name (str): name of the parser.
"""
if self._cpu_time_profiler:
self._cpu_time_profiler.StopTiming(parser_name)
def SetFileEntry(self, file_entry):
"""Sets the active file entry.
Args:
file_entry (dfvfs.FileEntry): file entry.
"""
self._file_entry = file_entry
self._event_data_stream_identifier = None
def SetStorageWriter(self, storage_writer):
"""Sets the storage writer.
Args:
storage_writer (StorageWriter): storage writer.
"""
self._storage_writer = storage_writer
# Reset the last event data information. Each storage file should
# contain event data for their events.
self._last_event_data_hash = None
self._last_event_data_identifier = None
def SignalAbort(self):
"""Signals the parsers to abort."""
self._abort = True
def StartProfiling(self, configuration, identifier, process_information):
"""Starts profiling.
Args:
configuration (ProfilingConfiguration): profiling configuration.
identifier (str): identifier of the profiling session used to create
the sample filename.
process_information (ProcessInfo): process information.
"""
if not configuration:
return
if configuration.HaveProfileParsers():
identifier = '{0:s}-parsers'.format(identifier)
self._cpu_time_profiler = profilers.CPUTimeProfiler(
identifier, configuration)
self._cpu_time_profiler.Start()
self._memory_profiler = profilers.MemoryProfiler(
identifier, configuration)
self._memory_profiler.Start()
self._process_information = process_information
def StopProfiling(self):
"""Stops profiling."""
if self._cpu_time_profiler:
self._cpu_time_profiler.Stop()
self._cpu_time_profiler = None
if self._memory_profiler:
self._memory_profiler.Stop()
self._memory_profiler = None
self._process_information = None
|
|
"""
Standard tag definitions.
"""
from ..utils import make_string, make_string_uc
# Interoperability tags
INTEROP_TAGS = {
0x0001: ('InteroperabilityIndex', ),
0x0002: ('InteroperabilityVersion', ),
0x1000: ('RelatedImageFileFormat', ),
0x1001: ('RelatedImageWidth', ),
0x1002: ('RelatedImageLength', ),
}
INTEROP_INFO = (
'Interoperability',
INTEROP_TAGS
)
# GPS tags
GPS_TAGS = {
0x0000: ('GPSVersionID', ),
0x0001: ('GPSLatitudeRef', ),
0x0002: ('GPSLatitude', ),
0x0003: ('GPSLongitudeRef', ),
0x0004: ('GPSLongitude', ),
0x0005: ('GPSAltitudeRef', ),
0x0006: ('GPSAltitude', ),
0x0007: ('GPSTimeStamp', ),
0x0008: ('GPSSatellites', ),
0x0009: ('GPSStatus', ),
0x000A: ('GPSMeasureMode', ),
0x000B: ('GPSDOP', ),
0x000C: ('GPSSpeedRef', ),
0x000D: ('GPSSpeed', ),
0x000E: ('GPSTrackRef', ),
0x000F: ('GPSTrack', ),
0x0010: ('GPSImgDirectionRef', ),
0x0011: ('GPSImgDirection', ),
0x0012: ('GPSMapDatum', ),
0x0013: ('GPSDestLatitudeRef', ),
0x0014: ('GPSDestLatitude', ),
0x0015: ('GPSDestLongitudeRef', ),
0x0016: ('GPSDestLongitude', ),
0x0017: ('GPSDestBearingRef', ),
0x0018: ('GPSDestBearing', ),
0x0019: ('GPSDestDistanceRef', ),
0x001A: ('GPSDestDistance', ),
0x001B: ('GPSProcessingMethod', ),
0x001C: ('GPSAreaInformation', ),
0x001D: ('GPSDate', ),
0x001E: ('GPSDifferential', ),
}
GPS_INFO = (
'GPS',
GPS_TAGS
)
# Main Exif tag names
EXIF_TAGS = {
0x00FE: ('SubfileType', {
0x0: 'Full-resolution Image',
0x1: 'Reduced-resolution image',
0x2: 'Single page of multi-page image',
0x3: 'Single page of multi-page reduced-resolution image',
0x4: 'Transparency mask',
0x5: 'Transparency mask of reduced-resolution image',
0x6: 'Transparency mask of multi-page image',
0x7: 'Transparency mask of reduced-resolution multi-page image',
0x10001: 'Alternate reduced-resolution image',
0xffffffff: 'invalid ',
}),
0x00FF: ('OldSubfileType', {
1: 'Full-resolution image',
2: 'Reduced-resolution image',
3: 'Single page of multi-page image',
}),
0x0100: ('ImageWidth', ),
0x0101: ('ImageLength', ),
0x0102: ('BitsPerSample', ),
0x0103: ('Compression', {
1: 'Uncompressed',
2: 'CCITT 1D',
3: 'T4/Group 3 Fax',
4: 'T6/Group 4 Fax',
5: 'LZW',
6: 'JPEG (old-style)',
7: 'JPEG',
8: 'Adobe Deflate',
9: 'JBIG B&W',
10: 'JBIG Color',
32766: 'Next',
32769: 'Epson ERF Compressed',
32771: 'CCIRLEW',
32773: 'PackBits',
32809: 'Thunderscan',
32895: 'IT8CTPAD',
32896: 'IT8LW',
32897: 'IT8MP',
32898: 'IT8BL',
32908: 'PixarFilm',
32909: 'PixarLog',
32946: 'Deflate',
32947: 'DCS',
34661: 'JBIG',
34676: 'SGILog',
34677: 'SGILog24',
34712: 'JPEG 2000',
34713: 'Nikon NEF Compressed',
65000: 'Kodak DCR Compressed',
65535: 'Pentax PEF Compressed'
}),
0x0106: ('PhotometricInterpretation', ),
0x0107: ('Thresholding', ),
0x0108: ('CellWidth', ),
0x0109: ('CellLength', ),
0x010A: ('FillOrder', ),
0x010D: ('DocumentName', ),
0x010E: ('ImageDescription', ),
0x010F: ('Make', ),
0x0110: ('Model', ),
0x0111: ('StripOffsets', ),
0x0112: ('Orientation', {
1: 'Horizontal (normal)',
2: 'Mirrored horizontal',
3: 'Rotated 180',
4: 'Mirrored vertical',
5: 'Mirrored horizontal then rotated 90 CCW',
6: 'Rotated 90 CCW',
7: 'Mirrored horizontal then rotated 90 CW',
8: 'Rotated 90 CW'
}),
0x0115: ('SamplesPerPixel', ),
0x0116: ('RowsPerStrip', ),
0x0117: ('StripByteCounts', ),
0x0118: ('MinSampleValue', ),
0x0119: ('MaxSampleValue', ),
0x011A: ('XResolution', ),
0x011B: ('YResolution', ),
0x011C: ('PlanarConfiguration', ),
0x011D: ('PageName', make_string),
0x011E: ('XPosition', ),
0x011F: ('YPosition', ),
0x0122: ('GrayResponseUnit', {
1: '0.1',
2: '0.001',
3: '0.0001',
4: '1e-05',
5: '1e-06',
}),
0x0123: ('GrayResponseCurve', ),
0x0124: ('T4Options', ),
0x0125: ('T6Options', ),
0x0128: ('ResolutionUnit', {
1: 'Not Absolute',
2: 'Pixels/Inch',
3: 'Pixels/Centimeter'
}),
0x0129: ('PageNumber', ),
0x012C: ('ColorResponseUnit', ),
0x012D: ('TransferFunction', ),
0x0131: ('Software', ),
0x0132: ('DateTime', ),
0x013B: ('Artist', ),
0x013C: ('HostComputer', ),
0x013D: ('Predictor', {
1: 'None',
2: 'Horizontal differencing'
}),
0x013E: ('WhitePoint', ),
0x013F: ('PrimaryChromaticities', ),
0x0140: ('ColorMap', ),
0x0141: ('HalftoneHints', ),
0x0142: ('TileWidth', ),
0x0143: ('TileLength', ),
0x0144: ('TileOffsets', ),
0x0145: ('TileByteCounts', ),
0x0146: ('BadFaxLines', ),
0x0147: ('CleanFaxData', {
0: 'Clean',
1: 'Regenerated',
2: 'Unclean'
}),
0x0148: ('ConsecutiveBadFaxLines', ),
0x014C: ('InkSet', {
1: 'CMYK',
2: 'Not CMYK'
}),
0x014D: ('InkNames', ),
0x014E: ('NumberofInks', ),
0x0150: ('DotRange', ),
0x0151: ('TargetPrinter', ),
0x0152: ('ExtraSamples', {
0: 'Unspecified',
1: 'Associated Alpha',
2: 'Unassociated Alpha'
}),
0x0153: ('SampleFormat', {
1: 'Unsigned',
2: 'Signed',
3: 'Float',
4: 'Undefined',
5: 'Complex int',
6: 'Complex float'
}),
0x0154: ('SMinSampleValue', ),
0x0155: ('SMaxSampleValue', ),
0x0156: ('TransferRange', ),
0x0157: ('ClipPath', ),
0x0200: ('JPEGProc', ),
0x0201: ('JPEGInterchangeFormat', ),
0x0202: ('JPEGInterchangeFormatLength', ),
0x0211: ('YCbCrCoefficients', ),
0x0212: ('YCbCrSubSampling', ),
0x0213: ('YCbCrPositioning', {
1: 'Centered',
2: 'Co-sited'
}),
0x0214: ('ReferenceBlackWhite', ),
0x02BC: ('ApplicationNotes', ), # XPM Info
0x4746: ('Rating', ),
0x828D: ('CFARepeatPatternDim', ),
0x828E: ('CFAPattern', ),
0x828F: ('BatteryLevel', ),
0x8298: ('Copyright', ),
0x829A: ('ExposureTime', ),
0x829D: ('FNumber', ),
0x83BB: ('IPTC/NAA', ),
0x8769: ('ExifOffset', ), # Exif Tags
0x8773: ('InterColorProfile', ),
0x8822: ('ExposureProgram', {
0: 'Unidentified',
1: 'Manual',
2: 'Program Normal',
3: 'Aperture Priority',
4: 'Shutter Priority',
5: 'Program Creative',
6: 'Program Action',
7: 'Portrait Mode',
8: 'Landscape Mode'
}),
0x8824: ('SpectralSensitivity', ),
0x8825: ('GPSInfo', GPS_INFO), # GPS tags
0x8827: ('ISOSpeedRatings', ),
0x8828: ('OECF', ),
0x8830: ('SensitivityType', {
0: 'Unknown',
1: 'Standard Output Sensitivity',
2: 'Recommended Exposure Index',
3: 'ISO Speed',
4: 'Standard Output Sensitivity and Recommended Exposure Index',
5: 'Standard Output Sensitivity and ISO Speed',
6: 'Recommended Exposure Index and ISO Speed',
7: 'Standard Output Sensitivity, Recommended Exposure Index and ISO Speed'
}),
0x8832: ('RecommendedExposureIndex', ),
0x8833: ('ISOSpeed', ),
0x9000: ('ExifVersion', make_string),
0x9003: ('DateTimeOriginal', ),
0x9004: ('DateTimeDigitized', ),
0x9101: ('ComponentsConfiguration', {
0: '',
1: 'Y',
2: 'Cb',
3: 'Cr',
4: 'Red',
5: 'Green',
6: 'Blue'
}),
0x9102: ('CompressedBitsPerPixel', ),
0x9201: ('ShutterSpeedValue', ),
0x9202: ('ApertureValue', ),
0x9203: ('BrightnessValue', ),
0x9204: ('ExposureBiasValue', ),
0x9205: ('MaxApertureValue', ),
0x9206: ('SubjectDistance', ),
0x9207: ('MeteringMode', {
0: 'Unidentified',
1: 'Average',
2: 'CenterWeightedAverage',
3: 'Spot',
4: 'MultiSpot',
5: 'Pattern',
6: 'Partial',
255: 'other'
}),
0x9208: ('LightSource', {
0: 'Unknown',
1: 'Daylight',
2: 'Fluorescent',
3: 'Tungsten (incandescent light)',
4: 'Flash',
9: 'Fine weather',
10: 'Cloudy weather',
11: 'Shade',
12: 'Daylight fluorescent (D 5700 - 7100K)',
13: 'Day white fluorescent (N 4600 - 5400K)',
14: 'Cool white fluorescent (W 3900 - 4500K)',
15: 'White fluorescent (WW 3200 - 3700K)',
17: 'Standard light A',
18: 'Standard light B',
19: 'Standard light C',
20: 'D55',
21: 'D65',
22: 'D75',
23: 'D50',
24: 'ISO studio tungsten',
255: 'other light source'
}),
0x9209: ('Flash', {
0: 'Flash did not fire',
1: 'Flash fired',
5: 'Strobe return light not detected',
7: 'Strobe return light detected',
9: 'Flash fired, compulsory flash mode',
13: 'Flash fired, compulsory flash mode, return light not detected',
15: 'Flash fired, compulsory flash mode, return light detected',
16: 'Flash did not fire, compulsory flash mode',
24: 'Flash did not fire, auto mode',
25: 'Flash fired, auto mode',
29: 'Flash fired, auto mode, return light not detected',
31: 'Flash fired, auto mode, return light detected',
32: 'No flash function',
65: 'Flash fired, red-eye reduction mode',
69: 'Flash fired, red-eye reduction mode, return light not detected',
71: 'Flash fired, red-eye reduction mode, return light detected',
73: 'Flash fired, compulsory flash mode, red-eye reduction mode',
77: 'Flash fired, compulsory flash mode, red-eye reduction mode, return light not detected',
79: 'Flash fired, compulsory flash mode, red-eye reduction mode, return light detected',
89: 'Flash fired, auto mode, red-eye reduction mode',
93: 'Flash fired, auto mode, return light not detected, red-eye reduction mode',
95: 'Flash fired, auto mode, return light detected, red-eye reduction mode'
}),
0x920A: ('FocalLength', ),
0x9214: ('SubjectArea', ),
0x927C: ('MakerNote', ),
0x9286: ('UserComment', make_string_uc),
0x9290: ('SubSecTime', ),
0x9291: ('SubSecTimeOriginal', ),
0x9292: ('SubSecTimeDigitized', ),
# used by Windows Explorer
0x9C9B: ('XPTitle', ),
0x9C9C: ('XPComment', ),
0x9C9D: ('XPAuthor', make_string), # (ignored by Windows Explorer if Artist exists)
0x9C9E: ('XPKeywords', ),
0x9C9F: ('XPSubject', ),
0xA000: ('FlashPixVersion', make_string),
0xA001: ('ColorSpace', {
1: 'sRGB',
2: 'Adobe RGB',
65535: 'Uncalibrated'
}),
0xA002: ('ExifImageWidth', ),
0xA003: ('ExifImageLength', ),
0xA004: ('RelatedSoundFile', ),
0xA005: ('InteroperabilityOffset', INTEROP_INFO),
0xA20B: ('FlashEnergy', ), # 0x920B in TIFF/EP
0xA20C: ('SpatialFrequencyResponse', ), # 0x920C
0xA20E: ('FocalPlaneXResolution', ), # 0x920E
0xA20F: ('FocalPlaneYResolution', ), # 0x920F
0xA210: ('FocalPlaneResolutionUnit', ), # 0x9210
0xA214: ('SubjectLocation', ), # 0x9214
0xA215: ('ExposureIndex', ), # 0x9215
0xA217: ('SensingMethod', { # 0x9217
1: 'Not defined',
2: 'One-chip color area',
3: 'Two-chip color area',
4: 'Three-chip color area',
5: 'Color sequential area',
7: 'Trilinear',
8: 'Color sequential linear'
}),
0xA300: ('FileSource', {
1: 'Film Scanner',
2: 'Reflection Print Scanner',
3: 'Digital Camera'
}),
0xA301: ('SceneType', {
1: 'Directly Photographed'
}),
0xA302: ('CVAPattern', ),
0xA401: ('CustomRendered', {
0: 'Normal',
1: 'Custom'
}),
0xA402: ('ExposureMode', {
0: 'Auto Exposure',
1: 'Manual Exposure',
2: 'Auto Bracket'
}),
0xA403: ('WhiteBalance', {
0: 'Auto',
1: 'Manual'
}),
0xA404: ('DigitalZoomRatio', ),
0xA405: ('FocalLengthIn35mmFilm', ),
0xA406: ('SceneCaptureType', {
0: 'Standard',
1: 'Landscape',
2: 'Portrait',
3: 'Night)'
}),
0xA407: ('GainControl', {
0: 'None',
1: 'Low gain up',
2: 'High gain up',
3: 'Low gain down',
4: 'High gain down'
}),
0xA408: ('Contrast', {
0: 'Normal',
1: 'Soft',
2: 'Hard'
}),
0xA409: ('Saturation', {
0: 'Normal',
1: 'Soft',
2: 'Hard'
}),
0xA40A: ('Sharpness', {
0: 'Normal',
1: 'Soft',
2: 'Hard'
}),
0xA40B: ('DeviceSettingDescription', ),
0xA40C: ('SubjectDistanceRange', ),
0xA420: ('ImageUniqueID', ),
0xA430: ('CameraOwnerName', ),
0xA431: ('BodySerialNumber', ),
0xA432: ('LensSpecification', ),
0xA433: ('LensMake', ),
0xA434: ('LensModel', ),
0xA435: ('LensSerialNumber', ),
0xA500: ('Gamma', ),
0xC4A5: ('PrintIM', ),
0xEA1C: ('Padding', ),
0xEA1D: ('OffsetSchema', ),
0xFDE8: ('OwnerName', ),
0xFDE9: ('SerialNumber', ),
}
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class AssertZeroImagPartTest(test.TestCase):
def test_real_tensor_doesnt_raise(self):
x = ops.convert_to_tensor([0., 2, 3])
# Should not raise.
self.evaluate(
linear_operator_util.assert_zero_imag_part(x, message="ABC123"))
def test_complex_tensor_with_imag_zero_doesnt_raise(self):
x = ops.convert_to_tensor([1., 0, 3])
y = ops.convert_to_tensor([0., 0, 0])
z = math_ops.complex(x, y)
# Should not raise.
self.evaluate(
linear_operator_util.assert_zero_imag_part(z, message="ABC123"))
def test_complex_tensor_with_nonzero_imag_raises(self):
x = ops.convert_to_tensor([1., 2, 0])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.assertRaisesOpError("ABC123"):
self.evaluate(
linear_operator_util.assert_zero_imag_part(z, message="ABC123"))
class AssertNoEntriesWithModulusZeroTest(test.TestCase):
def test_nonzero_real_tensor_doesnt_raise(self):
x = ops.convert_to_tensor([1., 2, 3])
# Should not raise.
self.evaluate(
linear_operator_util.assert_no_entries_with_modulus_zero(
x, message="ABC123"))
def test_nonzero_complex_tensor_doesnt_raise(self):
x = ops.convert_to_tensor([1., 0, 3])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
# Should not raise.
self.evaluate(
linear_operator_util.assert_no_entries_with_modulus_zero(
z, message="ABC123"))
def test_zero_real_tensor_raises(self):
x = ops.convert_to_tensor([1., 0, 3])
with self.assertRaisesOpError("ABC123"):
self.evaluate(
linear_operator_util.assert_no_entries_with_modulus_zero(
x, message="ABC123"))
def test_zero_complex_tensor_raises(self):
x = ops.convert_to_tensor([1., 2, 0])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.assertRaisesOpError("ABC123"):
self.evaluate(
linear_operator_util.assert_no_entries_with_modulus_zero(
z, message="ABC123"))
class BroadcastMatrixBatchDimsTest(test.TestCase):
def test_zero_batch_matrices_returned_as_empty_list(self):
self.assertAllEqual([],
linear_operator_util.broadcast_matrix_batch_dims([]))
def test_one_batch_matrix_returned_after_tensor_conversion(self):
arr = rng.rand(2, 3, 4)
tensor, = linear_operator_util.broadcast_matrix_batch_dims([arr])
self.assertTrue(isinstance(tensor, ops.Tensor))
self.assertAllClose(arr, self.evaluate(tensor))
def test_static_dims_broadcast(self):
# x.batch_shape = [3, 1, 2]
# y.batch_shape = [4, 1]
# broadcast batch shape = [3, 4, 2]
x = rng.rand(3, 1, 2, 1, 5)
y = rng.rand(4, 1, 3, 7)
batch_of_zeros = np.zeros((3, 4, 2, 1, 1))
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x, y])
self.assertAllEqual(x_bc_expected.shape, x_bc.shape)
self.assertAllEqual(y_bc_expected.shape, y_bc.shape)
x_bc_, y_bc_ = self.evaluate([x_bc, y_bc])
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_static_dims_broadcast_second_arg_higher_rank(self):
# x.batch_shape = [1, 2]
# y.batch_shape = [1, 3, 1]
# broadcast batch shape = [1, 3, 2]
x = rng.rand(1, 2, 1, 5)
y = rng.rand(1, 3, 2, 3, 7)
batch_of_zeros = np.zeros((1, 3, 2, 1, 1))
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x, y])
self.assertAllEqual(x_bc_expected.shape, x_bc.shape)
self.assertAllEqual(y_bc_expected.shape, y_bc.shape)
x_bc_, y_bc_ = self.evaluate([x_bc, y_bc])
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_dynamic_dims_broadcast_32bit(self):
# x.batch_shape = [3, 1, 2]
# y.batch_shape = [4, 1]
# broadcast batch shape = [3, 4, 2]
x = rng.rand(3, 1, 2, 1, 5).astype(np.float32)
y = rng.rand(4, 1, 3, 7).astype(np.float32)
batch_of_zeros = np.zeros((3, 4, 2, 1, 1)).astype(np.float32)
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_ph = array_ops.placeholder_with_default(x, shape=None)
y_ph = array_ops.placeholder_with_default(y, shape=None)
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x_ph, y_ph])
x_bc_, y_bc_ = self.evaluate([x_bc, y_bc])
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_dynamic_dims_broadcast_32bit_second_arg_higher_rank(self):
# x.batch_shape = [1, 2]
# y.batch_shape = [3, 4, 1]
# broadcast batch shape = [3, 4, 2]
x = rng.rand(1, 2, 1, 5).astype(np.float32)
y = rng.rand(3, 4, 1, 3, 7).astype(np.float32)
batch_of_zeros = np.zeros((3, 4, 2, 1, 1)).astype(np.float32)
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_ph = array_ops.placeholder_with_default(x, shape=None)
y_ph = array_ops.placeholder_with_default(y, shape=None)
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x_ph, y_ph])
x_bc_, y_bc_ = self.evaluate([x_bc, y_bc])
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_less_than_two_dims_raises_static(self):
x = rng.rand(3)
y = rng.rand(1, 1)
with self.assertRaisesRegex(ValueError, "at least two dimensions"):
linear_operator_util.broadcast_matrix_batch_dims([x, y])
with self.assertRaisesRegex(ValueError, "at least two dimensions"):
linear_operator_util.broadcast_matrix_batch_dims([y, x])
class MatrixSolveWithBroadcastTest(test.TestCase):
def test_static_dims_broadcast_matrix_has_extra_dims(self):
# batch_shape = [2]
matrix = rng.rand(2, 3, 3)
rhs = rng.rand(3, 7)
rhs_broadcast = rhs + np.zeros((2, 1, 1))
result = linear_operator_util.matrix_solve_with_broadcast(matrix, rhs)
self.assertAllEqual((2, 3, 7), result.shape)
expected = linalg_ops.matrix_solve(matrix, rhs_broadcast)
self.assertAllClose(*self.evaluate([expected, result]))
def test_static_dims_broadcast_rhs_has_extra_dims(self):
# Since the second arg has extra dims, and the domain dim of the first arg
# is larger than the number of linear equations, code will "flip" the extra
# dims of the first arg to the far right, making extra linear equations
# (then call the matrix function, then flip back).
# We have verified that this optimization indeed happens. How? We stepped
# through with a debugger.
# batch_shape = [2]
matrix = rng.rand(3, 3)
rhs = rng.rand(2, 3, 2)
matrix_broadcast = matrix + np.zeros((2, 1, 1))
result = linear_operator_util.matrix_solve_with_broadcast(matrix, rhs)
self.assertAllEqual((2, 3, 2), result.shape)
expected = linalg_ops.matrix_solve(matrix_broadcast, rhs)
self.assertAllClose(*self.evaluate([expected, result]))
def test_static_dims_broadcast_rhs_has_extra_dims_dynamic(self):
# Since the second arg has extra dims, and the domain dim of the first arg
# is larger than the number of linear equations, code will "flip" the extra
# dims of the first arg to the far right, making extra linear equations
# (then call the matrix function, then flip back).
# We have verified that this optimization indeed happens. How? We stepped
# through with a debugger.
# batch_shape = [2]
matrix = rng.rand(3, 3)
rhs = rng.rand(2, 3, 2)
matrix_broadcast = matrix + np.zeros((2, 1, 1))
matrix_ph = array_ops.placeholder_with_default(matrix, shape=[None, None])
rhs_ph = array_ops.placeholder_with_default(rhs, shape=[None, None, None])
result = linear_operator_util.matrix_solve_with_broadcast(matrix_ph, rhs_ph)
self.assertAllEqual(3, result.shape.ndims)
expected = linalg_ops.matrix_solve(matrix_broadcast, rhs)
self.assertAllClose(*self.evaluate([expected, result]))
def test_static_dims_broadcast_rhs_has_extra_dims_and_adjoint(self):
# Since the second arg has extra dims, and the domain dim of the first arg
# is larger than the number of linear equations, code will "flip" the extra
# dims of the first arg to the far right, making extra linear equations
# (then call the matrix function, then flip back).
# We have verified that this optimization indeed happens. How? We stepped
# through with a debugger.
# batch_shape = [2]
matrix = rng.rand(3, 3)
rhs = rng.rand(2, 3, 2)
matrix_broadcast = matrix + np.zeros((2, 1, 1))
result = linear_operator_util.matrix_solve_with_broadcast(
matrix, rhs, adjoint=True)
self.assertAllEqual((2, 3, 2), result.shape)
expected = linalg_ops.matrix_solve(matrix_broadcast, rhs, adjoint=True)
self.assertAllClose(*self.evaluate([expected, result]))
def test_dynamic_dims_broadcast_64bit(self):
# batch_shape = [2, 2]
matrix = rng.rand(2, 3, 3)
rhs = rng.rand(2, 1, 3, 7)
matrix_broadcast = matrix + np.zeros((2, 2, 1, 1))
rhs_broadcast = rhs + np.zeros((2, 2, 1, 1))
matrix_ph = array_ops.placeholder_with_default(matrix, shape=None)
rhs_ph = array_ops.placeholder_with_default(rhs, shape=None)
result, expected = self.evaluate([
linear_operator_util.matrix_solve_with_broadcast(matrix_ph, rhs_ph),
linalg_ops.matrix_solve(matrix_broadcast, rhs_broadcast)
])
self.assertAllClose(expected, result)
class DomainDimensionStubOperator(object):
def __init__(self, domain_dimension):
self._domain_dimension = ops.convert_to_tensor(domain_dimension)
def domain_dimension_tensor(self):
return self._domain_dimension
class AssertCompatibleMatrixDimensionsTest(test.TestCase):
def test_compatible_dimensions_do_not_raise(self):
x = ops.convert_to_tensor(rng.rand(2, 3, 4))
operator = DomainDimensionStubOperator(3)
# Should not raise
self.evaluate(
linear_operator_util.assert_compatible_matrix_dimensions(operator, x))
def test_incompatible_dimensions_raise(self):
x = ops.convert_to_tensor(rng.rand(2, 4, 4))
operator = DomainDimensionStubOperator(3)
# pylint: disable=g-error-prone-assert-raises
with self.assertRaisesOpError("Dimensions are not compatible"):
self.evaluate(
linear_operator_util.assert_compatible_matrix_dimensions(operator, x))
# pylint: enable=g-error-prone-assert-raises
class DummyOperatorWithHint(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class UseOperatorOrProvidedHintUnlessContradictingTest(test.TestCase,
parameterized.TestCase):
@parameterized.named_parameters(
("none_none", None, None, None),
("none_true", None, True, True),
("true_none", True, None, True),
("true_true", True, True, True),
("none_false", None, False, False),
("false_none", False, None, False),
("false_false", False, False, False),
)
def test_computes_an_or_if_non_contradicting(self, operator_hint_value,
provided_hint_value,
expected_result):
self.assertEqual(
expected_result,
linear_operator_util.use_operator_or_provided_hint_unless_contradicting(
operator=DummyOperatorWithHint(my_hint=operator_hint_value),
hint_attr_name="my_hint",
provided_hint_value=provided_hint_value,
message="should not be needed here"))
@parameterized.named_parameters(
("true_false", True, False),
("false_true", False, True),
)
def test_raises_if_contradicting(self, operator_hint_value,
provided_hint_value):
with self.assertRaisesRegex(ValueError, "my error message"):
linear_operator_util.use_operator_or_provided_hint_unless_contradicting(
operator=DummyOperatorWithHint(my_hint=operator_hint_value),
hint_attr_name="my_hint",
provided_hint_value=provided_hint_value,
message="my error message")
class BlockwiseTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("split_dim_1", [3, 3, 4], -1),
("split_dim_2", [2, 5], -2),
)
def test_blockwise_input(self, op_dimension_values, split_dim):
op_dimensions = [
tensor_shape.Dimension(v) for v in op_dimension_values]
unknown_op_dimensions = [
tensor_shape.Dimension(None) for _ in op_dimension_values]
batch_shape = [2, 1]
arg_dim = 5
if split_dim == -1:
blockwise_arrays = [np.zeros(batch_shape + [arg_dim, d])
for d in op_dimension_values]
else:
blockwise_arrays = [np.zeros(batch_shape + [d, arg_dim])
for d in op_dimension_values]
blockwise_list = [block.tolist() for block in blockwise_arrays]
blockwise_tensors = [ops.convert_to_tensor(block)
for block in blockwise_arrays]
blockwise_placeholders = [
array_ops.placeholder_with_default(block, shape=None)
for block in blockwise_arrays]
# Iterables of non-nested structures are always interpreted as blockwise.
# The list of lists is interpreted as blockwise as well, regardless of
# whether the operator dimensions are known, since the sizes of its elements
# along `split_dim` are non-identical.
for op_dims in [op_dimensions, unknown_op_dimensions]:
for blockwise_inputs in [
blockwise_arrays, blockwise_list,
blockwise_tensors, blockwise_placeholders]:
self.assertTrue(linear_operator_util.arg_is_blockwise(
op_dims, blockwise_inputs, split_dim))
def test_non_blockwise_input(self):
x = np.zeros((2, 3, 4, 6))
x_tensor = ops.convert_to_tensor(x)
x_placeholder = array_ops.placeholder_with_default(x, shape=None)
x_list = x.tolist()
# For known and matching operator dimensions, interpret all as non-blockwise
op_dimension_values = [2, 1, 3]
op_dimensions = [tensor_shape.Dimension(d) for d in op_dimension_values]
for inputs in [x, x_tensor, x_placeholder, x_list]:
self.assertFalse(linear_operator_util.arg_is_blockwise(
op_dimensions, inputs, -1))
# The input is still interpreted as non-blockwise for unknown operator
# dimensions (`x_list` has an outermost dimension that does not matcn the
# number of blocks, and the other inputs are not iterables).
unknown_op_dimensions = [
tensor_shape.Dimension(None) for _ in op_dimension_values]
for inputs in [x, x_tensor, x_placeholder, x_list]:
self.assertFalse(linear_operator_util.arg_is_blockwise(
unknown_op_dimensions, inputs, -1))
def test_ambiguous_input_raises(self):
x = np.zeros((3, 4, 2)).tolist()
op_dimensions = [tensor_shape.Dimension(None) for _ in range(3)]
# Since the leftmost dimension of `x` is equal to the number of blocks, and
# the operators have unknown dimension, the input is ambiguous.
with self.assertRaisesRegex(ValueError, "structure is ambiguous"):
linear_operator_util.arg_is_blockwise(op_dimensions, x, -2)
def test_mismatched_input_raises(self):
x = np.zeros((2, 3, 4, 6)).tolist()
op_dimension_values = [4, 3]
op_dimensions = [tensor_shape.Dimension(v) for v in op_dimension_values]
# The dimensions of the two operator-blocks sum to 7. `x` is a
# two-element list; if interpreted blockwise, its corresponding dimensions
# sum to 12 (=6*2). If not interpreted blockwise, its corresponding
# dimension is 6. This is a mismatch.
with self.assertRaisesRegex(ValueError, "dimension does not match"):
linear_operator_util.arg_is_blockwise(op_dimensions, x, -1)
if __name__ == "__main__":
test.main()
|
|
from __future__ import unicode_literals
from datetime import date
from decimal import Decimal
from django.db.models.query_utils import InvalidQuery
from django.test import TestCase, skipUnlessDBFeature
from .models import Author, Book, BookFkAsPk, Coffee, FriendlyAuthor, Reviewer
class RawQueryTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(first_name='Joe', last_name='Smith', dob=date(1950, 9, 20))
cls.a2 = Author.objects.create(first_name='Jill', last_name='Doe', dob=date(1920, 4, 2))
cls.a3 = Author.objects.create(first_name='Bob', last_name='Smith', dob=date(1986, 1, 25))
cls.a4 = Author.objects.create(first_name='Bill', last_name='Jones', dob=date(1932, 5, 10))
cls.b1 = Book.objects.create(
title='The awesome book', author=cls.a1, paperback=False,
opening_line='It was a bright cold day in April and the clocks were striking thirteen.',
)
cls.b2 = Book.objects.create(
title='The horrible book', author=cls.a1, paperback=True,
opening_line=(
'On an evening in the latter part of May a middle-aged man '
'was walking homeward from Shaston to the village of Marlott, '
'in the adjoining Vale of Blakemore, or Blackmoor.'
),
)
cls.b3 = Book.objects.create(
title='Another awesome book', author=cls.a1, paperback=False,
opening_line='A squat grey building of only thirty-four stories.',
)
cls.b4 = Book.objects.create(
title='Some other book', author=cls.a3, paperback=True,
opening_line='It was the day my grandmother exploded.',
)
cls.c1 = Coffee.objects.create(brand='dunkin doughnuts')
cls.c2 = Coffee.objects.create(brand='starbucks')
cls.r1 = Reviewer.objects.create()
cls.r2 = Reviewer.objects.create()
cls.r1.reviewed.add(cls.b2, cls.b3, cls.b4)
def assertSuccessfulRawQuery(self, model, query, expected_results,
expected_annotations=(), params=[], translations=None):
"""
Execute the passed query against the passed model and check the output
"""
results = list(model.objects.raw(query, params=params, translations=translations))
self.assertProcessed(model, results, expected_results, expected_annotations)
self.assertAnnotations(results, expected_annotations)
def assertProcessed(self, model, results, orig, expected_annotations=()):
"""
Compare the results of a raw query against expected results
"""
self.assertEqual(len(results), len(orig))
for index, item in enumerate(results):
orig_item = orig[index]
for annotation in expected_annotations:
setattr(orig_item, *annotation)
for field in model._meta.fields:
# Check that all values on the model are equal
self.assertEqual(
getattr(item, field.attname),
getattr(orig_item, field.attname)
)
# This includes checking that they are the same type
self.assertEqual(
type(getattr(item, field.attname)),
type(getattr(orig_item, field.attname))
)
def assertNoAnnotations(self, results):
"""
Check that the results of a raw query contain no annotations
"""
self.assertAnnotations(results, ())
def assertAnnotations(self, results, expected_annotations):
"""
Check that the passed raw query results contain the expected
annotations
"""
if expected_annotations:
for index, result in enumerate(results):
annotation, value = expected_annotations[index]
self.assertTrue(hasattr(result, annotation))
self.assertEqual(getattr(result, annotation), value)
def test_simple_raw_query(self):
"""
Basic test of raw query with a simple database query
"""
query = "SELECT * FROM raw_query_author"
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
def test_raw_query_lazy(self):
"""
Raw queries are lazy: they aren't actually executed until they're
iterated over.
"""
q = Author.objects.raw('SELECT * FROM raw_query_author')
self.assertIsNone(q.query.cursor)
list(q)
self.assertIsNotNone(q.query.cursor)
def test_FK_raw_query(self):
"""
Test of a simple raw query against a model containing a foreign key
"""
query = "SELECT * FROM raw_query_book"
books = Book.objects.all()
self.assertSuccessfulRawQuery(Book, query, books)
def test_db_column_handler(self):
"""
Test of a simple raw query against a model containing a field with
db_column defined.
"""
query = "SELECT * FROM raw_query_coffee"
coffees = Coffee.objects.all()
self.assertSuccessfulRawQuery(Coffee, query, coffees)
def test_order_handler(self):
"""
Test of raw raw query's tolerance for columns being returned in any
order
"""
selects = (
('dob, last_name, first_name, id'),
('last_name, dob, first_name, id'),
('first_name, last_name, dob, id'),
)
for select in selects:
query = "SELECT %s FROM raw_query_author" % select
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
def test_translations(self):
"""
Test of raw query's optional ability to translate unexpected result
column names to specific model fields
"""
query = "SELECT first_name AS first, last_name AS last, dob, id FROM raw_query_author"
translations = {'first': 'first_name', 'last': 'last_name'}
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, translations=translations)
def test_params(self):
"""
Test passing optional query parameters
"""
query = "SELECT * FROM raw_query_author WHERE first_name = %s"
author = Author.objects.all()[2]
params = [author.first_name]
qset = Author.objects.raw(query, params=params)
results = list(qset)
self.assertProcessed(Author, results, [author])
self.assertNoAnnotations(results)
self.assertEqual(len(results), 1)
self.assertIsInstance(repr(qset), str)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_pyformat_params(self):
"""
Test passing optional query parameters
"""
query = "SELECT * FROM raw_query_author WHERE first_name = %(first)s"
author = Author.objects.all()[2]
params = {'first': author.first_name}
qset = Author.objects.raw(query, params=params)
results = list(qset)
self.assertProcessed(Author, results, [author])
self.assertNoAnnotations(results)
self.assertEqual(len(results), 1)
self.assertIsInstance(repr(qset), str)
def test_query_representation(self):
"""
Test representation of raw query with parameters
"""
query = "SELECT * FROM raw_query_author WHERE last_name = %(last)s"
qset = Author.objects.raw(query, {'last': 'foo'})
self.assertEqual(repr(qset), "<RawQuerySet: SELECT * FROM raw_query_author WHERE last_name = foo>")
self.assertEqual(repr(qset.query), "<RawQuery: SELECT * FROM raw_query_author WHERE last_name = foo>")
query = "SELECT * FROM raw_query_author WHERE last_name = %s"
qset = Author.objects.raw(query, {'foo'})
self.assertEqual(repr(qset), "<RawQuerySet: SELECT * FROM raw_query_author WHERE last_name = foo>")
self.assertEqual(repr(qset.query), "<RawQuery: SELECT * FROM raw_query_author WHERE last_name = foo>")
def test_many_to_many(self):
"""
Test of a simple raw query against a model containing a m2m field
"""
query = "SELECT * FROM raw_query_reviewer"
reviewers = Reviewer.objects.all()
self.assertSuccessfulRawQuery(Reviewer, query, reviewers)
def test_extra_conversions(self):
"""
Test to insure that extra translations are ignored.
"""
query = "SELECT * FROM raw_query_author"
translations = {'something': 'else'}
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, translations=translations)
def test_missing_fields(self):
query = "SELECT id, first_name, dob FROM raw_query_author"
for author in Author.objects.raw(query):
self.assertNotEqual(author.first_name, None)
# last_name isn't given, but it will be retrieved on demand
self.assertNotEqual(author.last_name, None)
def test_missing_fields_without_PK(self):
query = "SELECT first_name, dob FROM raw_query_author"
try:
list(Author.objects.raw(query))
self.fail('Query without primary key should fail')
except InvalidQuery:
pass
def test_annotations(self):
query = (
"SELECT a.*, count(b.id) as book_count "
"FROM raw_query_author a "
"LEFT JOIN raw_query_book b ON a.id = b.author_id "
"GROUP BY a.id, a.first_name, a.last_name, a.dob ORDER BY a.id"
)
expected_annotations = (
('book_count', 3),
('book_count', 0),
('book_count', 1),
('book_count', 0),
)
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, expected_annotations)
def test_white_space_query(self):
query = " SELECT * FROM raw_query_author"
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
def test_multiple_iterations(self):
query = "SELECT * FROM raw_query_author"
normal_authors = Author.objects.all()
raw_authors = Author.objects.raw(query)
# First Iteration
first_iterations = 0
for index, raw_author in enumerate(raw_authors):
self.assertEqual(normal_authors[index], raw_author)
first_iterations += 1
# Second Iteration
second_iterations = 0
for index, raw_author in enumerate(raw_authors):
self.assertEqual(normal_authors[index], raw_author)
second_iterations += 1
self.assertEqual(first_iterations, second_iterations)
def test_get_item(self):
# Indexing on RawQuerySets
query = "SELECT * FROM raw_query_author ORDER BY id ASC"
third_author = Author.objects.raw(query)[2]
self.assertEqual(third_author.first_name, 'Bob')
first_two = Author.objects.raw(query)[0:2]
self.assertEqual(len(first_two), 2)
with self.assertRaises(TypeError):
Author.objects.raw(query)['test']
def test_inheritance(self):
# date is the end of the Cuban Missile Crisis, I have no idea when
# Wesley was born
f = FriendlyAuthor.objects.create(first_name="Wesley", last_name="Chun",
dob=date(1962, 10, 28))
query = "SELECT * FROM raw_query_friendlyauthor"
self.assertEqual(
[o.pk for o in FriendlyAuthor.objects.raw(query)], [f.pk]
)
def test_query_count(self):
self.assertNumQueries(1, list, Author.objects.raw("SELECT * FROM raw_query_author"))
def test_subquery_in_raw_sql(self):
try:
list(Book.objects.raw('SELECT id FROM (SELECT * FROM raw_query_book WHERE paperback IS NOT NULL) sq'))
except InvalidQuery:
self.fail("Using a subquery in a RawQuerySet raised InvalidQuery")
def test_db_column_name_is_used_in_raw_query(self):
"""
Regression test that ensures the `column` attribute on the field is
used to generate the list of fields included in the query, as opposed
to the `attname`. This is important when the primary key is a
ForeignKey field because `attname` and `column` are not necessarily the
same.
"""
b = BookFkAsPk.objects.create(book=self.b1)
self.assertEqual(list(BookFkAsPk.objects.raw('SELECT not_the_default FROM raw_query_bookfkaspk')), [b])
def test_decimal_parameter(self):
c = Coffee.objects.create(brand='starbucks', price=20.5)
qs = Coffee.objects.raw("SELECT * FROM raw_query_coffee WHERE price >= %s", params=[Decimal(20)])
self.assertEqual(list(qs), [c])
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Stats Server:
Default Bind IP: localhost
Default Bind Port: 5090
metric data should be be a json string.
Mandator keys:
'metric_name'
'metric_type'
For Metric type counter:
mandator key: 'count'
"""
import sys
import os
import time
import signal
import socket
import json
from threading import Thread
import multiprocessing
import hashlib
import pystat_config
import pystats_log
class TimerMonitor(Thread):
DEFAULT_INTERVAL = 10
def __init__(self, duration, metricsmgr):
super(TimerMonitor, self).__init__()
self.sleep_interval = duration
self.metricsmgr = metricsmgr
def run(self):
pystats_log.print_msg("Timer Monitor start")
while True:
time.sleep(self.sleep_interval)
pystats_log.print_msg("TimerMonitor Wokeup")
self.metricsmgr.forward_metrics()
class StatsForwarder(object):
TIMEOUT = 3
FORWARDERS = {
'kafka': {'module': 'kafka_publisher', 'classname': 'KafkaPublisher'}
}
def __init__(self, common_queue):
self.queue = common_queue
self.forwarders = {}
self.cfg = pystat_config.PyStatConfig()
self.debug_mode = self.cfg.parsedyaml.get('debug_mode', True)
pystats_log.print_msg("StatsForwarder Initialized!")
for forwarder in self.cfg.parsedyaml['forwarders'].keys():
fwobj = self.cfg.parsedyaml['forwarders'][forwarder]
mod = __import__(StatsForwarder.FORWARDERS[forwarder]['module'])
classobj = getattr(
mod, StatsForwarder.FORWARDERS[forwarder]['classname'])
if forwarder == "kafka":
kafka_broker = fwobj['kafka_broker']
kafka_apikey = fwobj['kafka_apikey']
kafka_tenant_id = fwobj['kafka_tenant_id']
kafka_topic = fwobj['kafka_topic']
self.forwarders[forwarder] = classobj(kafka_broker,
kafka_apikey,
kafka_tenant_id,
kafka_topic)
def start(self):
while True:
objdata = None
try:
objdata = self.queue.get(timeout=StatsForwarder.TIMEOUT)
except multiprocessing.queues.Empty:
continue
if objdata is None:
print "No data. Continue"
continue
# Forward metrics.
if objdata['metric_type'] == "trace":
self.forward_trace_metrics(objdata)
elif objdata['metric_type'] == "guage":
self.forward_guage_metrics(objdata)
def forward_trace_metrics(self, objdata):
metric_name = objdata['metric_name']
tags = {}
value = objdata['trace_info']['count']
for tag in objdata['trace_info'].keys():
if tag == "count":
continue
tags[tag] = objdata['trace_info'][tag]
self.forwarders['kafka'].forward_metrics(
metric_name, value, tags, debug=self.debug_mode)
def forward_guage_metrics(self, objdata):
metric_name = objdata['metric_name']
tags = {}
value = objdata['guage_info']['value']
for tag in objdata['guage_info'].keys():
if tag == "value":
continue
tags[tag] = objdata['guage_info'][tag]
self.forwarders['kafka'].forward_metrics(
metric_name, value, tags, debug=self.debug_mode)
class TraceMetric(object):
"""
Manage metric_type: TraceMetric
"""
def __init__(self, jdata):
self.metric_name = jdata['metric_name']
self.metric_type = jdata['metric_type']
self.trace_info = {}
def update_metric(self, jdata):
hashstr = ""
traceobj = {}
for key in jdata:
# skip metric_name and type, copy rest.
if key in ['metric_name', 'metric_type']:
continue
traceobj[key] = jdata[key]
try:
hashstr += key + jdata[key]
except TypeError:
hashstr += key + str(jdata[key])
objhash = hashlib.md5(hashstr)
hash_key = objhash.hexdigest()
if self.trace_info.get(hash_key, None) is None:
self.trace_info[hash_key] = traceobj
self.trace_info[hash_key]['count'] = 1
else:
self.trace_info[hash_key]['count'] += 1
def display_metric_info(self):
print "Name: %s Traceinfo: %s " % (self.metric_name, self.trace_info)
def get_metric_info(self):
metricobjs = []
for key in self.trace_info.keys():
metricobj = {}
metricobj['metric_name'] = self.metric_name
metricobj['metric_type'] = self.metric_type
metricobj['trace_info'] = self.trace_info[key]
metricobjs.append(metricobj)
return metricobjs
class GuageMetric(object):
def __init__(self, jdata):
self.metric_name = jdata['metric_name']
self.metric_type = jdata['metric_type']
self.guage_info = {}
def update_metric(self, jdata):
self.metric_value = int(jdata['value'])
hashstr = ""
traceobj = {}
for key in jdata:
# skip metric_name and type, copy rest.
if key in ['metric_name', 'metric_type', 'value']:
continue
traceobj[key] = jdata[key]
try:
hashstr += key + jdata[key]
except TypeError:
hashstr += key + str(jdata[key])
objhash = hashlib.md5(hashstr)
hash_key = objhash.hexdigest()
if self.guage_info.get(hash_key, None) is None:
self.guage_info[hash_key] = traceobj
self.guage_info[hash_key]['value'] = jdata['value']
else:
self.guage_info[hash_key]['value'] = jdata['value']
def display_metric_info(self):
print "Name: %s Traceinfo: %s " % (self.metric_name, self.guage_info)
def get_metric_info(self):
metricobjs = []
for key in self.guage_info.keys():
metricobj = {}
metricobj['metric_name'] = self.metric_name
metricobj['metric_type'] = self.metric_type
metricobj['guage_info'] = self.guage_info[key]
metricobjs.append(metricobj)
return metricobjs
class CounterMetric(object):
def __init__(self, jdata):
self.metric_name = jdata['metric_name']
self.metric_type = jdata['metric_type']
self.metric_counter = 1
def update_metric(self, jdata):
self.metric_counter += int(jdata['count'])
def display_metric_info(self):
print "Name: %s, Type: %s, Counter: %d " % \
(self.metric_name, self.metric_type, self.metric_counter)
def get_metric_info(self):
metricobj = {}
metricobj['metric_name'] = self.metric_name
metricobj['metric_type'] = self.metric_type
metricobj['metric_counter'] = self.metric_counter
return [metricobj]
class MetricsManager(object):
METRIC_TYPES = {
'counter': 'CounterMetric',
'trace': 'TraceMetric',
'guage': 'GuageMetric'
}
def __init__(self, common_queue):
self.metrics = {}
self.queue = common_queue
self.last_sent_trace = []
def init_metric(self, jdata):
metric_name = jdata['metric_name']
metric_type = jdata['metric_type']
metric_cls = getattr(sys.modules['__main__'],
MetricsManager.METRIC_TYPES[metric_type])
self.metrics[metric_name] = metric_cls(jdata)
def add_metric(self, jdata):
metric_name = jdata['metric_name']
if self.metrics.get(metric_name, None) is None:
self.init_metric(jdata)
self.metrics[metric_name].update_metric(jdata)
else:
self.metrics[metric_name].update_metric(jdata)
def display_metric_info(self, jdata):
metric_name = jdata['metric_name']
if self.metrics.get(metric_name, None) is not None:
self.metrics[metric_name].display_metric_info()
def put_metric_data_on_queue(self, jdata):
metric_name = jdata['metric_name']
metricobj = self.metrics[metric_name].get_metric_info()
self.queue.put(metricobj)
def forward_metrics(self):
for metric_name in self.metrics.keys():
metricobjs = self.metrics[metric_name].get_metric_info()
for metricobj in metricobjs:
hashstr = str(metricobj)
objhash = hashlib.md5(hashstr)
hash_key = objhash.hexdigest()
if hash_key in self.last_sent_trace:
print "(%d) skip sending: %s" % \
(len(self.last_sent_trace), hash_key)
continue
self.queue.put(metricobj)
self.last_sent_trace.append(hash_key)
# Keep only the last n(10) elements in this list.
# as it is unlikely anything more than that has not been sent.
length = len(self.last_sent_trace)
if length > 10:
self.last_sent_trace.pop(0)
class UDPServer(object):
"""
Stats UDP Server.
"""
BUFSIZE = 1024
def __init__(self, bind_ip, bind_port, common_queue):
"""Initalize UDPServer"""
self.bind_ip = bind_ip
self.bind_port = bind_port
self.sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
self.sock.bind((self.bind_ip, self.bind_port))
self.metricsmgr = MetricsManager(common_queue)
self.timermonitor = None
def start_timer(self):
pystats_log.print_msg("Start Timer Monitor")
self.timermonitor = TimerMonitor(10, self.metricsmgr)
self.timermonitor.start()
def start_listener(self):
"""Listener"""
self.start_timer()
while True:
try:
data, addr = self.sock.recvfrom(UDPServer.BUFSIZE)
except KeyboardInterrupt:
pystats_log.print_msg("KeyboardInterrupt: Terminate Server!")
sys.exit()
try:
jdata = json.loads(data)
except ValueError:
pystats_log.print_msg("Data is not correct json string")
continue
self.metricsmgr.add_metric(jdata)
#self.metricsmgr.display_metric_info(jdata)
class StatsServer(object):
def __init__(self):
self.cfg = pystat_config.PyStatConfig()
self.bind_ip = self.cfg.parsedyaml.get(
'bind_address',
os.environ.get('STATSD_BIND_ADDRESS', 'localhost'))
# TODO: catch exception
self.bind_port = int(self.cfg.parsedyaml.get(
'bind_port',
os.environ.get('STATSD_BIND_PORT', 5090)))
self.udpserver = None
self.udpworker = None
self.forwarder = None
self.fwdworker = None
# Handle Keyboard Interrupt
signal.signal(signal.SIGINT, self._handle_sigterm)
def _handle_sigterm(self, signum, frame):
print "Signal caught: ", signum
self.udpworker.terminate()
print "UDP Server Terminated!"
self.fwdworker.terminate()
print "Fowarder Terminated!"
def udpserver_initiate(self, common_queue):
self.udpserver = UDPServer(self.bind_ip,
self.bind_port,
common_queue)
self.udpserver.start_listener()
def forwarder_initiate(self, common_queue):
self.forwarder = StatsForwarder(common_queue)
self.forwarder.start()
def run(self):
common_queue = multiprocessing.Queue()
self.udpworker = multiprocessing.Process(target=self.udpserver_initiate,
args=(common_queue,))
self.udpworker.start()
print "UDP Server Started!"
self.fwdworker = multiprocessing.Process(target=self.forwarder_initiate,
args=(common_queue,))
self.fwdworker.start()
print "Fowarder Started!"
def main():
"""Main Function"""
statserver = StatsServer()
statserver.run()
if __name__ == '__main__':
main()
|
|
"""\
NAME
qmmd.py
SYNOPSIS
First principles molecular dynamics module
DESCRIPTION
This is an experimental module for ab initio MD calculations which
makes use of numerical integrators to move nuclei in response to forces
computed through analytic derivatives of the total energy expression.
AUTHOR
Hatem H. Helal, [email protected]
REPORT BUGS
Report bugs to [email protected]
COPYRIGHT
"""
from leapfrog import leapfrog
from hartree_fock import rhf,uhf,uhf_fixed_occ
from Wavefunction import Wavefunction
from force import hf_force
from Element import symbol
def rhf_dyn(atoms,**opts):
"""\
Uses RHF derived forces to compute dynamics.
Options: Value Description
-------- ----- -----------
job pydyn Descriptive job name
nsteps 100 number of dynamics steps to take
dt 0.1 time step size in picoseconds
units matter! we assume atom positions are
stored in bohrs, velocities are in bohr/ps,
acceleration is in bohr/ps^2
and forces are in hartree/bohr
Hartree-Fock options copied from hartree_fock.py -> rhf
rhf(atoms,**opts) - Closed-shell HF driving routine
atoms A Molecule object containing the molecule
Options: Value Description
-------- ----- -----------
ConvCriteria 1e-4 Convergence Criteria
MaxIter 20 Maximum SCF iterations
DoAveraging True Use DIIS for accelerated convergence (default)
False No convergence acceleration
ETemp False Use ETemp value for finite temperature DFT (default)
float Use (float) for the electron temperature
bfs None The basis functions to use. List of CGBF's
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
orbs None If not none, the guess orbitals
"""
#dynamics options
job = opts.get('job','pydyn')
nsteps = opts.get('nsteps',100)
dt = opts.get('dt',0.1)
#save any given RHF options
cc = opts.get('ConvCriteria',1e-4)
maxit = opts.get('MaxIter',20)
doavg = opts.get('DoAveraging',False)
temp = opts.get('ETemp',False)
bfcns = opts.get('bfs',None)
if not bfcns:
bdat = opts.get('basis_data',None)
ints = opts.get('integrals', None)
init_orbs = opts.get('orbs',None)
#open data file to store energy info
edat = open(job+'.edat', 'w')
edat.write("#Step Time PE KE TE\n")
#open trajectory file to store xyz info
xyz = open(job+'.xyz', 'w')
#xyz.write("#RHF molecular dynamics done by PyQuante\n")
#xyz.write("#job: %s nsteps: %d dt:%f\n"%(job,nsteps,dt))
xyz.write(xyz_str(atoms))
t=0.
for n in range(nsteps):
t+=n*dt
pe,orben,coefs = rhf(atoms,ConvCriteria=cc,MaxIter=maxit,\
DoAveraging=doavg,ETemp=temp,bfs=bfcns,\
basis_data=bdat,integrals=ints,orbs=init_orbs)
ncl,nop = atoms.get_closedopen()
wf = Wavefunction(orbs=coefs,orbe=orben,restricted=True,nclosed=ncl,nopen=nop)
hf_force(atoms,wf,bdat)
ke = leapfrog(atoms,t,dt)
te = ke+pe
bl = atoms[0].dist(atoms[1])
edat.write('%d %f %f %f %f %f\n' %(n,t,bl,pe,ke,te))
xyz.write(xyz_str(atoms))
edat.close()
xyz.close()
return
def uhf_dyn(atoms,**opts):
"""\
Uses UHF derived forces to compute dynamics.
Options: Value Description
-------- ----- -----------
job pydyn Descriptive job name
nsteps 100 number of dynamics steps to take
dt 0.1 time step size in picoseconds
units matter! we assume atom positions are
stored in bohrs, velocities are in bohr/ps,
acceleration is in bohr/ps^2
and forces are in hartree/bohr
Hartree-Fock options copied from hartree_fock.py -> uhf
uhf(atoms,**opts) - Unrestriced Open Shell Hartree Fock
atoms A Molecule object containing the molecule
Options: Value Description
-------- ----- -----------
ConvCriteria 1e-4 Convergence Criteria
MaxIter 20 Maximum SCF iterations
DoAveraging True Use DIIS averaging for convergence acceleration
bfs None The basis functions to use. List of CGBF's
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
orbs None If not None, the guess orbitals
"""
#dynamics options
job = opts.get('job','pydyn')
nsteps = opts.get('nsteps',100)
dt = opts.get('dt',0.1)
#save any given UHF options
cc = opts.get('ConvCriteria',1e-4)
maxit = opts.get('MaxIter',20)
doavg = opts.get('DoAveraging',False)
temp = opts.get('ETemp',False)
bfcns = opts.get('bfs',None)
if not bfcns:
bdat = opts.get('basis_data',None)
ints = opts.get('integrals', None)
init_orbs = opts.get('orbs',None)
#open data file to store energy info
edat = open(job+'.edat', 'w')
edat.write("#Step Time PE KE TE\n")
#open trajectory file to store xyz info
xyz = open(job+'.xyz', 'w')
#xyz.write("#RHF molecular dynamics done by PyQuante\n")
#xyz.write("#job: %s nsteps: %d dt:%f\n"%(job,nsteps,dt))
xyz.write(xyz_str(atoms))
t=0.
for n in range(nsteps):
t+=n*dt
pe,(orbea,orbeb),(coefsa,coefsb) = uhf(atoms,ConvCriteria=cc,MaxIter=maxit,\
DoAveraging=doavg,ETemp=temp,bfs=bfcns,\
basis_data=bdat,integrals=ints,orbs=init_orbs)
na,nb = atoms.get_alphabeta()
wf = Wavefunction(orbs_a=coefsa,orbs_b=coefsb,\
orbe_a=orbea,orbe_b=orbeb,\
unrestricted=True,nalpha=na,nbeta=nb)
hf_force(atoms,wf,bdat)
#bl = atoms[0].dist(atoms[1]) #testing with H2
ke = leapfrog(atoms,t,dt)
te = ke+pe
edat.write('%d %f %f %f %f\n' %(n,t,pe,ke,te))
xyz.write(xyz_str(atoms))
edat.close()
xyz.close()
return
def fixedocc_uhf_dyn(atoms,occa,occb,**opts):
"""\
Uses Fixed occupation UHF derived forces to compute dynamics.
occa and occb represent the orbital occupation arrays for
the calculating spin orbitals with holes
Options: Value Description
-------- ----- -----------
job pydyn Descriptive job name
nsteps 100 number of dynamics steps to take
dt 0.1 time step size in picoseconds
units matter! we assume atom positions are
stored in bohrs, velocities are in bohr/ps,
acceleration is in bohr/ps^2
and forces are in hartree/bohr
Hartree-Fock options copied from hartree_fock.py -> uhf
uhf(atoms,**opts) - Unrestriced Open Shell Hartree Fock
atoms A Molecule object containing the molecule
Options: Value Description
-------- ----- -----------
ConvCriteria 1e-4 Convergence Criteria
MaxIter 20 Maximum SCF iterations
DoAveraging True Use DIIS averaging for convergence acceleration
bfs None The basis functions to use. List of CGBF's
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
orbs None If not None, the guess orbitals
"""
#dynamics options
job = opts.get('job','pydyn')
nsteps = opts.get('nsteps',100)
dt = opts.get('dt',0.1)
#save any given UHF options
cc = opts.get('ConvCriteria',1e-4)
maxit = opts.get('MaxIter',20)
doavg = opts.get('DoAveraging',False)
temp = opts.get('ETemp',False)
bfcns = opts.get('bfs',None)
if not bfcns:
bdat = opts.get('basis_data',None)
ints = opts.get('integrals', None)
init_orbs = opts.get('orbs',None)
#open data file to store energy info
edat = open(job+'.edat', 'w')
edat.write("#Step Time PE KE TE\n")
#open trajectory file to store xyz info
xyz = open(job+'.xyz', 'w')
#xyz.write("#RHF molecular dynamics done by PyQuante\n")
#xyz.write("#job: %s nsteps: %d dt:%f\n"%(job,nsteps,dt))
xyz.write(xyz_str(atoms))
t=0.
for n in range(nsteps):
t+=n*dt
pe,(orbea,orbeb),(coefsa,coefsb) = uhf_fixed_occ(atoms,occa,occb, ConvCriteria=cc,MaxIter=maxit,\
DoAveraging=doavg,ETemp=temp,bfs=bfcns,\
basis_data=bdat,integrals=ints,orbs=init_orbs)
na,nb = atoms.get_alphabeta()
wf = Wavefunction(orbs_a=coefsa,orbs_b=coefsb,\
orbe_a=orbea,orbe_b=orbeb,\
occs_a=occa,occs_b=occb,\
fixedocc=True,nalpha=na,nbeta=nb)
hf_force(atoms,wf,bdat)
bl = atoms[0].dist(atoms[1]) #testing with H2
ke = leapfrog(atoms,t,dt)
te = ke+pe
edat.write('%d %f %f %f %f %f\n' %(n,t,bl,pe,ke,te))
xyz.write(xyz_str(atoms))
edat.close()
xyz.close()
return
def xyz_str(mol):
"""
Takes a mol and returns the xyz file as a string
"""
natoms = len(mol.atoms)
str='%d\n' %natoms
str+='%s\n' %mol.name
for atom in mol.atoms:
sym = symbol[atom.atno]
xyz = atom.r
tmpstr = "%s %12.6f %12.6f %12.6f\n" %(sym,xyz[0],xyz[1],xyz[2])
str += tmpstr
return str
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbvserver_feopolicy_binding(base_resource) :
""" Binding class showing the feopolicy that can be bound to lbvserver.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._gotopriorityexpression = ""
self._bindpoint = ""
self._name = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self.___count = 0
@property
def priority(self) :
ur"""Priority.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""Priority.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Name of the policy bound to the LB vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""Name of the policy bound to the LB vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def name(self) :
ur"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def bindpoint(self) :
ur"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
ur"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def labeltype(self) :
ur"""Type of policy label to invoke. Applicable only to rewrite and cache policies. Available settings function as follows:
* reqvserver - Evaluate the request against the request-based policies bound to the specified virtual server.
* resvserver - Evaluate the response against the response-based policies bound to the specified virtual server.
* policylabel - invoke the request or response against the specified user-defined policy label.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
ur"""Type of policy label to invoke. Applicable only to rewrite and cache policies. Available settings function as follows:
* reqvserver - Evaluate the request against the request-based policies bound to the specified virtual server.
* resvserver - Evaluate the response against the response-based policies bound to the specified virtual server.
* policylabel - invoke the request or response against the specified user-defined policy label.
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def labelname(self) :
ur"""Name of the virtual server or user-defined policy label to invoke if the policy evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
ur"""Name of the virtual server or user-defined policy label to invoke if the policy evaluates to TRUE.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def invoke(self) :
ur"""Invoke policies bound to a virtual server or policy label.
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
ur"""Invoke policies bound to a virtual server or policy label.
"""
try :
self._invoke = invoke
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbvserver_feopolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbvserver_feopolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = lbvserver_feopolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lbvserver_feopolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = lbvserver_feopolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lbvserver_feopolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
deleteresources[i].priority = resource[i].priority
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch lbvserver_feopolicy_binding resources.
"""
try :
obj = lbvserver_feopolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of lbvserver_feopolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_feopolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count lbvserver_feopolicy_binding resources configued on NetScaler.
"""
try :
obj = lbvserver_feopolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of lbvserver_feopolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_feopolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class lbvserver_feopolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.lbvserver_feopolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbvserver_feopolicy_binding = [lbvserver_feopolicy_binding() for _ in range(length)]
|
|
#!/usr/bin/env python
# cardinal_pythonlib/modules.py
"""
===============================================================================
Original code copyright (C) 2009-2021 Rudolf Cardinal ([email protected]).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**Functions to work with Python modules.**
"""
import importlib
from importlib.machinery import ExtensionFileLoader, EXTENSION_SUFFIXES
import inspect
import os
import os.path
import pkgutil
# noinspection PyUnresolvedReferences
from types import ModuleType
from typing import Dict, List, Union
from cardinal_pythonlib.logs import get_brace_style_log_with_null_handler
log = get_brace_style_log_with_null_handler(__name__)
# =============================================================================
# Module management
# =============================================================================
def import_submodules(package: Union[str, ModuleType],
base_package_for_relative_import: str = None,
recursive: bool = True) -> Dict[str, ModuleType]:
"""
Import all submodules of a module, recursively, including subpackages.
Args:
package: package (name or actual module)
base_package_for_relative_import: path to prepend?
recursive: import submodules too?
Returns:
dict: mapping from full module name to module
"""
# https://stackoverflow.com/questions/3365740/how-to-import-all-submodules
if isinstance(package, str):
package = importlib.import_module(package,
base_package_for_relative_import)
results = {}
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + '.' + name
log.debug("importing: {}", full_name)
results[full_name] = importlib.import_module(full_name)
if recursive and is_pkg:
results.update(import_submodules(full_name))
return results
# Note slightly nastier way: e.g.
# # Task imports: everything in "tasks" directory
# task_modules = glob.glob(os.path.dirname(__file__) + "/tasks/*.py")
# task_modules = [os.path.basename(f)[:-3] for f in task_modules]
# for tm in task_modules:
# __import__(tm, locals(), globals())
# =============================================================================
# For package developers
# =============================================================================
def is_builtin_module(module: ModuleType) -> bool:
"""
Is this module a built-in module, like ``os``?
Method is as per :func:`inspect.getfile`.
"""
assert inspect.ismodule(module)
return not hasattr(module, "__file__")
def is_module_a_package(module: ModuleType) -> bool:
assert inspect.ismodule(module)
return os.path.basename(inspect.getfile(module)) == "__init__.py"
def is_c_extension(module: ModuleType) -> bool:
"""
Modified from
https://stackoverflow.com/questions/20339053/in-python-how-can-one-tell-if-a-module-comes-from-a-c-extension.
``True`` only if the passed module is a C extension implemented as a
dynamically linked shared library specific to the current platform.
Args:
module: Previously imported module object to be tested.
Returns:
bool: ``True`` only if this module is a C extension.
Examples:
.. code-block:: python
from cardinal_pythonlib.modules import is_c_extension
import os
import _elementtree as et
import numpy
import numpy.core.multiarray as numpy_multiarray
is_c_extension(os) # False
is_c_extension(numpy) # False
is_c_extension(et) # False on my system (Python 3.5.6). True in the original example.
is_c_extension(numpy_multiarray) # True
""" # noqa
assert inspect.ismodule(module), f'"{module}" not a module.'
# If this module was loaded by a PEP 302-compliant CPython-specific loader
# loading only C extensions, this module is a C extension.
if isinstance(getattr(module, '__loader__', None), ExtensionFileLoader):
return True
# If it's built-in, it's not a C extension.
if is_builtin_module(module):
return False
# Else, fallback to filetype matching heuristics.
#
# Absolute path of the file defining this module.
module_filename = inspect.getfile(module)
# "."-prefixed filetype of this path if any or the empty string otherwise.
module_filetype = os.path.splitext(module_filename)[1]
# This module is only a C extension if this path's filetype is that of a
# C extension specific to the current platform.
return module_filetype in EXTENSION_SUFFIXES
def contains_c_extension(module: ModuleType,
import_all_submodules: bool = True,
include_external_imports: bool = False,
seen: List[ModuleType] = None) -> bool:
"""
Extends :func:`is_c_extension` by asking: is this module, or any of its
submodules, a C extension?
Args:
module: Previously imported module object to be tested.
import_all_submodules: explicitly import all submodules of this module?
include_external_imports: check modules in other packages that this
module imports?
seen: used internally for recursion (to deal with recursive modules);
should be ``None`` when called by users
Returns:
bool: ``True`` only if this module or one of its submodules is a C
extension.
Examples:
.. code-block:: python
import logging
from cardinal_pythonlib.modules import contains_c_extension
from cardinal_pythonlib.logs import main_only_quicksetup_rootlogger
import _elementtree as et
import os
import arrow
import alembic
import django
import numpy
import numpy.core.multiarray as numpy_multiarray
log = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG) # be verbose
main_only_quicksetup_rootlogger(level=logging.DEBUG)
contains_c_extension(os) # False
contains_c_extension(et) # False
contains_c_extension(numpy) # True -- different from is_c_extension()
contains_c_extension(numpy_multiarray) # True
contains_c_extension(arrow) # False
contains_c_extension(alembic) # False
contains_c_extension(alembic, include_external_imports=True) # True
# ... this example shows that Alembic imports hashlib, which can import
# _hashlib, which is a C extension; however, that doesn't stop us (for
# example) installing Alembic on a machine with no C compiler
contains_c_extension(django)
""" # noqa
assert inspect.ismodule(module), f'"{module}" not a module.'
if seen is None: # only true for the top-level call
seen = [] # type: List[ModuleType]
if module in seen: # modules can "contain" themselves
# already inspected; avoid infinite loops
return False
seen.append(module)
# Check the thing we were asked about
is_c_ext = is_c_extension(module)
log.info("Is module {!r} a C extension? {}", module, is_c_ext)
if is_c_ext:
return True
if is_builtin_module(module):
# built-in, therefore we stop searching it
return False
# Now check any children, in a couple of ways
top_level_module = seen[0]
top_path = os.path.dirname(top_level_module.__file__)
# Recurse using dir(). This picks up modules that are automatically
# imported by our top-level model. But it won't pick up all submodules;
# try e.g. for django.
for candidate_name in dir(module):
candidate = getattr(module, candidate_name)
# noinspection PyBroadException
try:
if not inspect.ismodule(candidate):
# not a module
continue
except Exception:
# e.g. a Django module that won't import until we configure its
# settings
log.error("Failed to test ismodule() status of {!r}", candidate)
continue
if is_builtin_module(candidate):
# built-in, therefore we stop searching it
continue
candidate_fname = getattr(candidate, "__file__")
if not include_external_imports:
if os.path.commonpath([top_path, candidate_fname]) != top_path:
log.debug("Skipping, not within the top-level module's "
"directory: {!r}", candidate)
continue
# Recurse:
if contains_c_extension(
module=candidate,
import_all_submodules=False, # only done at the top level, below # noqa
include_external_imports=include_external_imports,
seen=seen):
return True
if import_all_submodules:
if not is_module_a_package(module):
log.debug("Top-level module is not a package: {!r}", module)
return False
# Otherwise, for things like Django, we need to recurse in a different
# way to scan everything.
# See https://stackoverflow.com/questions/3365740/how-to-import-all-submodules. # noqa
log.debug("Walking path: {!r}", top_path)
# noinspection PyBroadException
try:
for loader, module_name, is_pkg in pkgutil.walk_packages([top_path]): # noqa
if not is_pkg:
log.debug("Skipping, not a package: {!r}", module_name)
continue
log.debug("Manually importing: {!r}", module_name)
# noinspection PyBroadException
try:
candidate = loader.find_module(module_name)\
.load_module(module_name) # noqa
except Exception:
# e.g. Alembic "autogenerate" gives: "ValueError: attempted
# relative import beyond top-level package"; or Django
# "django.core.exceptions.ImproperlyConfigured"
log.error("Package failed to import: {!r}", module_name)
continue
if contains_c_extension(
module=candidate,
import_all_submodules=False, # only done at the top level # noqa
include_external_imports=include_external_imports,
seen=seen):
return True
except Exception:
log.error("Unable to walk packages further; no C extensions "
"detected so far!")
raise
return False
|
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_extender_controller_extender
short_description: Extender controller configuration in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify extender_controller feature and extender category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
extender_controller_extender:
description:
- Extender controller configuration.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
aaa_shared_secret:
description:
- AAA shared secret.
type: str
access_point_name:
description:
- Access point name(APN).
type: str
admin:
description:
- FortiExtender Administration (enable or disable).
type: str
choices:
- disable
- discovered
- enable
at_dial_script:
description:
- Initialization AT commands specific to the MODEM.
type: str
billing_start_day:
description:
- Billing start day.
type: int
cdma_aaa_spi:
description:
- CDMA AAA SPI.
type: str
cdma_ha_spi:
description:
- CDMA HA SPI.
type: str
cdma_nai:
description:
- NAI for CDMA MODEMS.
type: str
conn_status:
description:
- Connection status.
type: int
description:
description:
- Description.
type: str
dial_mode:
description:
- Dial mode (dial-on-demand or always-connect).
type: str
choices:
- dial-on-demand
- always-connect
dial_status:
description:
- Dial status.
type: int
ext_name:
description:
- FortiExtender name.
type: str
ha_shared_secret:
description:
- HA shared secret.
type: str
id:
description:
- FortiExtender serial number.
required: true
type: str
ifname:
description:
- FortiExtender interface name.
type: str
initiated_update:
description:
- Allow/disallow network initiated updates to the MODEM.
type: str
choices:
- enable
- disable
mode:
description:
- FortiExtender mode.
type: str
choices:
- standalone
- redundant
modem_passwd:
description:
- MODEM password.
type: str
modem_type:
description:
- MODEM type (CDMA, GSM/LTE or WIMAX).
type: str
choices:
- cdma
- gsm/lte
- wimax
multi_mode:
description:
- MODEM mode of operation(3G,LTE,etc).
type: str
choices:
- auto
- auto-3g
- force-lte
- force-3g
- force-2g
ppp_auth_protocol:
description:
- PPP authentication protocol (PAP,CHAP or auto).
type: str
choices:
- auto
- pap
- chap
ppp_echo_request:
description:
- Enable/disable PPP echo request.
type: str
choices:
- enable
- disable
ppp_password:
description:
- PPP password.
type: str
ppp_username:
description:
- PPP username.
type: str
primary_ha:
description:
- Primary HA.
type: str
quota_limit_mb:
description:
- Monthly quota limit (MB).
type: int
redial:
description:
- Number of redials allowed based on failed attempts.
type: str
choices:
- none
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
redundant_intf:
description:
- Redundant interface.
type: str
roaming:
description:
- Enable/disable MODEM roaming.
type: str
choices:
- enable
- disable
role:
description:
- FortiExtender work role(Primary, Secondary, None).
type: str
choices:
- none
- primary
- secondary
secondary_ha:
description:
- Secondary HA.
type: str
sim_pin:
description:
- SIM PIN.
type: str
vdom:
description:
- VDOM
type: int
wimax_auth_protocol:
description:
- WiMax authentication protocol(TLS or TTLS).
type: str
choices:
- tls
- ttls
wimax_carrier:
description:
- WiMax carrier.
type: str
wimax_realm:
description:
- WiMax realm.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Extender controller configuration.
fortios_extender_controller_extender:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
extender_controller_extender:
aaa_shared_secret: "<your_own_value>"
access_point_name: "<your_own_value>"
admin: "disable"
at_dial_script: "<your_own_value>"
billing_start_day: "7"
cdma_aaa_spi: "<your_own_value>"
cdma_ha_spi: "<your_own_value>"
cdma_nai: "<your_own_value>"
conn_status: "11"
description: "<your_own_value>"
dial_mode: "dial-on-demand"
dial_status: "14"
ext_name: "<your_own_value>"
ha_shared_secret: "<your_own_value>"
id: "17"
ifname: "<your_own_value>"
initiated_update: "enable"
mode: "standalone"
modem_passwd: "<your_own_value>"
modem_type: "cdma"
multi_mode: "auto"
ppp_auth_protocol: "auto"
ppp_echo_request: "enable"
ppp_password: "<your_own_value>"
ppp_username: "<your_own_value>"
primary_ha: "<your_own_value>"
quota_limit_mb: "29"
redial: "none"
redundant_intf: "<your_own_value>"
roaming: "enable"
role: "none"
secondary_ha: "<your_own_value>"
sim_pin: "<your_own_value>"
vdom: "36"
wimax_auth_protocol: "tls"
wimax_carrier: "<your_own_value>"
wimax_realm: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_extender_controller_extender_data(json):
option_list = ['aaa_shared_secret', 'access_point_name', 'admin',
'at_dial_script', 'billing_start_day', 'cdma_aaa_spi',
'cdma_ha_spi', 'cdma_nai', 'conn_status',
'description', 'dial_mode', 'dial_status',
'ext_name', 'ha_shared_secret', 'id',
'ifname', 'initiated_update', 'mode',
'modem_passwd', 'modem_type', 'multi_mode',
'ppp_auth_protocol', 'ppp_echo_request', 'ppp_password',
'ppp_username', 'primary_ha', 'quota_limit_mb',
'redial', 'redundant_intf', 'roaming',
'role', 'secondary_ha', 'sim_pin',
'vdom', 'wimax_auth_protocol', 'wimax_carrier',
'wimax_realm']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def extender_controller_extender(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['extender_controller_extender'] and data['extender_controller_extender']:
state = data['extender_controller_extender']['state']
else:
state = True
extender_controller_extender_data = data['extender_controller_extender']
filtered_data = underscore_to_hyphen(filter_extender_controller_extender_data(extender_controller_extender_data))
if state == "present":
return fos.set('extender-controller',
'extender',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('extender-controller',
'extender',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_extender_controller(data, fos):
if data['extender_controller_extender']:
resp = extender_controller_extender(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"extender_controller_extender": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"aaa_shared_secret": {"required": False, "type": "str"},
"access_point_name": {"required": False, "type": "str"},
"admin": {"required": False, "type": "str",
"choices": ["disable", "discovered", "enable"]},
"at_dial_script": {"required": False, "type": "str"},
"billing_start_day": {"required": False, "type": "int"},
"cdma_aaa_spi": {"required": False, "type": "str"},
"cdma_ha_spi": {"required": False, "type": "str"},
"cdma_nai": {"required": False, "type": "str"},
"conn_status": {"required": False, "type": "int"},
"description": {"required": False, "type": "str"},
"dial_mode": {"required": False, "type": "str",
"choices": ["dial-on-demand", "always-connect"]},
"dial_status": {"required": False, "type": "int"},
"ext_name": {"required": False, "type": "str"},
"ha_shared_secret": {"required": False, "type": "str"},
"id": {"required": True, "type": "str"},
"ifname": {"required": False, "type": "str"},
"initiated_update": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"mode": {"required": False, "type": "str",
"choices": ["standalone", "redundant"]},
"modem_passwd": {"required": False, "type": "str"},
"modem_type": {"required": False, "type": "str",
"choices": ["cdma", "gsm/lte", "wimax"]},
"multi_mode": {"required": False, "type": "str",
"choices": ["auto", "auto-3g", "force-lte",
"force-3g", "force-2g"]},
"ppp_auth_protocol": {"required": False, "type": "str",
"choices": ["auto", "pap", "chap"]},
"ppp_echo_request": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ppp_password": {"required": False, "type": "str"},
"ppp_username": {"required": False, "type": "str"},
"primary_ha": {"required": False, "type": "str"},
"quota_limit_mb": {"required": False, "type": "int"},
"redial": {"required": False, "type": "str",
"choices": ["none", "1", "2",
"3", "4", "5",
"6", "7", "8",
"9", "10"]},
"redundant_intf": {"required": False, "type": "str"},
"roaming": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"role": {"required": False, "type": "str",
"choices": ["none", "primary", "secondary"]},
"secondary_ha": {"required": False, "type": "str"},
"sim_pin": {"required": False, "type": "str"},
"vdom": {"required": False, "type": "int"},
"wimax_auth_protocol": {"required": False, "type": "str",
"choices": ["tls", "ttls"]},
"wimax_carrier": {"required": False, "type": "str"},
"wimax_realm": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_extender_controller(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_extender_controller(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
import numpy as np
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, layers, utils
from libml.utils import EasyDict
from mixmatch import MixMatch
FLAGS = flags.FLAGS
class ReMixMatch(MixMatch):
def classifier_rot(self, x):
with tf.variable_scope('classify_rot', reuse=tf.AUTO_REUSE):
return tf.layers.dense(x, 4, kernel_initializer=tf.glorot_normal_initializer())
def guess_label(self, logits_y, p_data, p_model, T, use_dm, redux, **kwargs):
del kwargs
if redux == 'swap':
p_model_y = tf.concat([tf.nn.softmax(x) for x in logits_y[1:] + logits_y[:1]], axis=0)
elif redux == 'mean':
p_model_y = sum(tf.nn.softmax(x) for x in logits_y) / len(logits_y)
p_model_y = tf.tile(p_model_y, [len(logits_y), 1])
elif redux == '1st':
p_model_y = tf.nn.softmax(logits_y[0])
p_model_y = tf.tile(p_model_y, [len(logits_y), 1])
else:
raise NotImplementedError()
# Compute the target distribution.
# 1. Rectify the distribution or not.
if use_dm:
p_ratio = (1e-6 + p_data) / (1e-6 + p_model)
p_weighted = p_model_y * p_ratio
p_weighted /= tf.reduce_sum(p_weighted, axis=1, keep_dims=True)
else:
p_weighted = p_model_y
# 2. Apply sharpening.
p_target = tf.pow(p_weighted, 1. / T)
p_target /= tf.reduce_sum(p_target, axis=1, keep_dims=True)
return EasyDict(p_target=p_target, p_model=p_model_y)
def model(self, batch, lr, wd, beta, w_kl, w_match, w_rot, K, use_xe, warmup_kimg=1024, T=0.5,
mixmode='xxy.yxy', dbuf=128, ema=0.999, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch, K + 1] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
w_match *= tf.clip_by_value(tf.cast(self.step, tf.float32) / (warmup_kimg << 10), 0, 1)
augment = layers.MixMode(mixmode)
gpu = utils.get_gpu()
def classifier_to_gpu(x, **kw):
with tf.device(next(gpu)):
return self.classifier(x, **kw, **kwargs).logits
def random_rotate(x):
b4 = batch // 4
x, xt = x[:2 * b4], tf.transpose(x[2 * b4:], [0, 2, 1, 3])
l = np.zeros(b4, np.int32)
l = tf.constant(np.concatenate([l, l + 1, l + 2, l + 3], axis=0))
return tf.concat([x[:b4], x[b4:, ::-1, ::-1], xt[:b4, ::-1], xt[b4:, :, ::-1]], axis=0), l
# Moving average of the current estimated label distribution
p_model = layers.PMovingAverage('p_model', self.nclass, dbuf)
p_target = layers.PMovingAverage('p_target', self.nclass, dbuf) # Rectified distribution (only for plotting)
# Known (or inferred) true unlabeled distribution
p_data = layers.PData(self.dataset)
if w_rot > 0:
rot_y, rot_l = random_rotate(y_in[:, 1])
with tf.device(next(gpu)):
rot_logits = self.classifier_rot(self.classifier(rot_y, training=True, **kwargs).embeds)
loss_rot = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(rot_l, 4), logits=rot_logits)
loss_rot = tf.reduce_mean(loss_rot)
tf.summary.scalar('losses/rot', loss_rot)
else:
loss_rot = 0
if kwargs['redux'] == '1st' and w_kl <= 0:
logits_y = [classifier_to_gpu(y_in[:, 0], training=True)] * (K + 1)
elif kwargs['redux'] == '1st':
logits_y = [classifier_to_gpu(y_in[:, i], training=True) for i in range(2)]
logits_y += logits_y[:1] * (K - 1)
else:
logits_y = [classifier_to_gpu(y_in[:, i], training=True) for i in range(K + 1)]
guess = self.guess_label(logits_y, p_data(), p_model(), T=T, **kwargs)
ly = tf.stop_gradient(guess.p_target)
if w_kl > 0:
w_kl *= tf.clip_by_value(tf.cast(self.step, tf.float32) / (warmup_kimg << 10), 0, 1)
loss_kl = tf.nn.softmax_cross_entropy_with_logits_v2(labels=ly[:batch], logits=logits_y[1])
loss_kl = tf.reduce_mean(loss_kl)
tf.summary.scalar('losses/kl', loss_kl)
else:
loss_kl = 0
del logits_y
lx = tf.one_hot(l_in, self.nclass)
xy, labels_xy = augment([xt_in] + [y_in[:, i] for i in range(K + 1)], [lx] + tf.split(ly, K + 1),
[beta, beta])
x, y = xy[0], xy[1:]
labels_x, labels_y = labels_xy[0], tf.concat(labels_xy[1:], 0)
del xy, labels_xy
batches = layers.interleave([x] + y, batch)
logits = [classifier_to_gpu(yi, training=True) for yi in batches[:-1]]
skip_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
logits.append(classifier_to_gpu(batches[-1], training=True))
post_ops = [v for v in tf.get_collection(tf.GraphKeys.UPDATE_OPS) if v not in skip_ops]
logits = layers.interleave(logits, batch)
logits_x = logits[0]
logits_y = tf.concat(logits[1:], 0)
del batches, logits
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
if use_xe:
loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)
else:
loss_xeu = tf.square(labels_y - tf.nn.softmax(logits_y))
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/%s' % ('xeu' if use_xe else 'l2u'), loss_xeu)
self.distribution_summary(p_data(), p_model(), p_target())
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.extend([ema_op,
p_model.update(guess.p_model),
p_target.update(guess.p_target)])
if p_data.has_update:
post_ops.append(p_data.update(lx))
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe
+ w_kl * loss_kl
+ w_match * loss_xeu
+ w_rot * loss_rot,
colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_op=tf.nn.softmax(classifier_to_gpu(x_in, getter=ema_getter, training=False)),
classify_raw=tf.nn.softmax(classifier_to_gpu(x_in, training=False))) # No EMA, for debugging.
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.MANY_DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = ReMixMatch(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
K=FLAGS.K,
beta=FLAGS.beta,
w_kl=FLAGS.w_kl,
w_match=FLAGS.w_match,
w_rot=FLAGS.w_rot,
redux=FLAGS.redux,
use_dm=FLAGS.use_dm,
use_xe=FLAGS.use_xe,
warmup_kimg=FLAGS.warmup_kimg,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('beta', 0.75, 'Mixup beta distribution.')
flags.DEFINE_float('w_kl', 0.5, 'Weight for KL loss.')
flags.DEFINE_float('w_match', 1.5, 'Weight for distribution matching loss.')
flags.DEFINE_float('w_rot', 0.5, 'Weight for rotation loss.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
flags.DEFINE_integer('warmup_kimg', 1024, 'Unannealing duration for SSL loss.')
flags.DEFINE_enum('redux', 'swap', 'swap mean 1st'.split(), 'Logit selection.')
flags.DEFINE_bool('use_dm', True, 'Whether to use distribution matching.')
flags.DEFINE_bool('use_xe', True, 'Whether to use cross-entropy or Brier.')
FLAGS.set_default('augment', 'd.d.d')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
|
|
"""
A client library for working with Box's v2 API.
For extended specs, see: http://developers.box.com/docs/
"""
from datetime import datetime
from httplib import NOT_FOUND, PRECONDITION_FAILED, CONFLICT, UNAUTHORIZED
import simplejson
import time
from urllib import urlencode
import urlparse
import requests
class EventFilter(object):
"""
Types of events you can fetch
"""
ALL = 'all'
CHANGES = 'changes'
SYNC = 'sync'
class EventType(object):
ITEM_CREATE = 'ITEM_CREATE'
ITEM_UPLOAD = 'ITEM_UPLOAD'
ITEM_MOVE = 'ITEM_MOVE'
ITEM_COPY = 'ITEM_COPY'
ITEM_TRASH = 'ITEM_TRASH'
class CollaboratorRole(object):
EDITOR = 'editor'
VIEWER = 'viewer'
PREVIEWER = 'previewer'
UPLOADER = 'uploader'
PREVIEWER_UPLOADER = 'previewer-uploader'
VIEWER_UPLOADER = 'viewer-uploader'
CO_OWNER = 'co-owner'
class ShareAccess(object):
OPEN = 'open'
COMPANY = 'company'
COLLABORATORS = 'collaborators'
def start_authenticate_v1(api_key):
"""
Returns a url to redirect the client to. Expires after 10 minutes.
Note that according to Box, this endpoint will cease to function after December 31st.
"""
from lxml import objectify
r = requests.get('https://www.box.com/api/1.0/rest?action=get_ticket&api_key=%s' % api_key)
if not r.ok:
raise BoxAuthenticationException(r.status_code, r.text)
content = objectify.fromstring(str(r.text))
if content.status != 'get_ticket_ok':
raise BoxAuthenticationException(r.status_code, content.status.text)
return 'https://www.box.com/api/1.0/auth/%s' % content.ticket
def finish_authenticate_v1(api_key, ticket):
"""
Exchanges the ticket for an auth token. Should be called after the redirect completes.
Returns a dictionary with the token and some additional user info
Examples output:
{ 'token': 'xbfe79wdedb5mxxxxxxxxxxxxxxxxxxx',
'user': {
'access_id': 123456789,
'email': '[email protected]',
'login': '[email protected]',
'max_upload_size': 2147483648,
'sharing_disabled': u'',
'space_amount': 5368709120,
'space_used': 2445159,
'user_id': 987654321
}
}
"""
from lxml import objectify
r = requests.get('https://www.box.com/api/1.0/rest', params={'action': 'get_auth_token',
'api_key': api_key,
'ticket': ticket})
if not r.ok:
raise BoxAuthenticationException(r.status_code, r.text)
content = objectify.fromstring(str(r.text))
if content.status != 'get_auth_token_ok':
raise BoxAuthenticationException(r.status_code, content.status.text)
user_dict = {}
for x in content.user.iterchildren():
user_dict[x.tag] = x.pyval
return {
'token': content.auth_token.text,
'user': user_dict
}
def start_authenticate_v2(client_id, state=None, redirect_uri=None):
"""
Returns a url to redirect the client to.
Args:
- client_id: The client_id you obtained in the initial setup.
- redirect_uri: An HTTPS URI or custom URL scheme where the response will be redirected.
Optional if the redirect URI is registered with Box already.
- state: An arbitrary string of your choosing that will be included in the response to your application
Returns:
- a url to redirect to user to.
"""
args = {
'response_type': 'code',
'client_id': client_id,
}
if state:
args['state'] = state
if redirect_uri:
args['redirect_uri'] = redirect_uri
return 'https://www.box.com/api/oauth2/authorize?' + urlencode(args)
def finish_authenticate_v2(client_id, client_secret, code):
"""
finishes the authentication flow. See http://developers.box.com/oauth/ for details.
Args:
- client_id: The client_id you obtained in the initial setup.
- client_secret: The client_secret you obtained in the initial setup.
- code: a string containing the code, or a dictionary containing the GET query
Returns:
- a dictionary with the token and additional info
Example output:
{ 'access_token': 'T9cE5asGnuyYCCqIZFoWjFHvNbvVqHjl',
'expires_in': 3600,
'restricted_to': [],
'token_type': 'bearer',
'refresh_token': 'J7rxTiWOHMoSC1isKZKBZWizoRXjkQzig5C6jFgCVJ9bUnsUfGMinKBDLZWP9BgR',
}
"""
return _oauth2_token_request(client_id, client_secret, 'authorization_code', code=code)
def refresh_v2_token(client_id, client_secret, refresh_token):
"""
Returns a new access_token & refresh_token from an existing refresh_token
Each access_token is valid for 1 hour. In order to get a new, valid token, you can use the accompanying
refresh_token. Each refresh token is valid for 14 days. Every time you get a new access_token by using a
refresh_token, we reset your timer for the 14 day period. This means that as long as your users use your
application once every 14 days, their login is valid forever.
Args:
- client_id: The client_id you obtained in the initial setup.
- client_secret: The client_secret you obtained in the initial setup.
- code: a string containing the code, or a dictionary containing the GET query
Returns:
- a dictionary with the token and additional info
"""
return _oauth2_token_request(client_id, client_secret, 'refresh_token', refresh_token=refresh_token)
def _oauth2_token_request(client_id, client_secret, grant_type, **kwargs):
"""
Performs an oauth2 request against Box
"""
args = {
'client_id': client_id,
'client_secret': client_secret,
'grant_type': grant_type
}
args.update(kwargs)
response = requests.post('https://www.box.com/api/oauth2/token', args)
return _handle_auth_response(response)
def _handle_auth_response(response):
result = response.json()
if 'error' in result:
raise BoxAuthenticationException(response.status_code, message=result.get('error_description'), error=result['error'])
return result
class CredentialsV1(object):
"""
v1 credentials
Args:
- api_key: Your Box api_key
- access_token: the user access token
"""
def __init__(self, api_key, access_token):
self._api_key = api_key
self._access_token = access_token
@property
def headers(self):
return {'Authorization': 'BoxAuth api_key={0}&auth_token={1}'.format(self._api_key, self._access_token)}
def refresh(self):
"""
V1 credentials cannot be refreshed, but doesn't expire either
Always returns False
"""
return False
class CredentialsV2(object):
"""
v2 credentials
Args:
- access_token: The user access token
- refresh_token: The user refresh token (optional)
- client_id: The client_id you obtained in the initial setup (optional)
- client_secret: The client_secret you obtained in the initial setup (optional)
- refresh_call: A method that will be called when the tokens have been refreshed. Should take two arguments, access_token and refresh_token. (optional)
"""
def __init__(self, access_token, refresh_token=None, client_id=None, client_secret=None, refresh_callback=None):
self._access_token = access_token
self._refresh_token = refresh_token
self._client_id = client_id
self._client_secret = client_secret
self._refresh_callback = refresh_callback
@property
def headers(self):
return {'Authorization': 'Bearer {0}'.format(self._access_token)}
def refresh(self):
"""
Refreshes the access token based on the the refresh token, client id and secret if available.
Returns True if the refresh was successful, False if the refresh could not be performed,
and raises BoxAuthenticationException if the refresh failed
"""
if not self._refresh_token or not self._client_id or not self._client_secret:
return False
result = refresh_v2_token(self._client_id, self._client_secret, self._refresh_token)
self._access_token = result["access_token"]
if "refresh_token" in result:
self._refresh_token = result["refresh_token"]
if self._refresh_callback:
self._refresh_callback(self._access_token, self._refresh_token)
return True
class BoxClient(object):
def __init__(self, credentials):
"""
Args:
- credentials: an access_token string, or an instance of CredentialsV1/CredentialsV2
"""
if not hasattr(credentials, 'headers'):
credentials = CredentialsV2(credentials)
self.credentials = credentials
def _check_for_errors(self, response):
if not response.ok:
exception = EXCEPTION_MAP.get(response.status_code, BoxClientException)
raise exception(response.status_code, response.text)
@property
def default_headers(self):
return self.credentials.headers
def _request(self, method, resource, params=None, data=None, headers=None, endpoint="api", try_refresh=True, **kwargs):
"""
Performs a HTTP request to Box.
This method adds authentication headers, and performs error checking on the response.
It also automatically tries to refresh tokens, if possible.
Args:
- method: The type of HTTP method, f.ex. get or post
- resource: The resource to request (without shared prefix)
- params: Any query parameters to send
- data: Any data to send. If data is a dict, it will be encoded as json.
- headers: Any additional headers
- endpoint: The endpoint to use, f.ex. api or upload, defaults to api
- try_refresh: True if a refresh of the credentials should be attempted, False otherwise
- **kwargs: Any addiitonal arguments to pass to the request
"""
if isinstance(data, dict):
data = simplejson.dumps(data)
if headers:
headers = dict(headers)
headers.update(self.default_headers)
else:
headers = self.default_headers
url = 'https://%s.box.com/2.0/%s' % (endpoint, resource)
response = requests.request(method, url, params=params, data=data, headers=headers, **kwargs)
if response.status_code == UNAUTHORIZED and try_refresh and self.credentials.refresh():
return self._request(method, resource, params, data, headers, try_refresh=False, **kwargs)
self._check_for_errors(response)
return response
@classmethod
def _get_id(cls, identifier):
"""
converts a an identifier to a string id
Args:
- identifier: a dictionary or string or long
"""
if isinstance(identifier, dict):
identifier = identifier['id']
return str(identifier)
def get_user_info(self, username=None):
"""
Returns info.
Args:
- username: The username to query. If None then the info on the current user will
be returned
"""
username = username or 'me'
return self._request("get", 'users/' + username).json()
def get_user_list(self, limit=100, offset=0):
"""
Returns users in an enterprise.
Args:
- limit: number of users to return. (default=100, max=1000). Optional.
- offset: The record at which to start. Optional.
"""
params = {
'limit': limit,
'offset': offset,
}
return self._request("get", 'users/', params).json()
def get_folder(self, folder_id=0, limit=100, offset=0, fields=None):
"""
Retrieves the metadata of a folder and child directory/files.
Args:
- folder_id: the id of the folder you wish to query. No you can use paths :(
- limit: (optional) number of items to return. (default=100, max=1000).
- offset: (optional) The record at which to start
- fields: (optional) Attribute(s) to include in the response
"""
params = {
'limit': limit,
'offset': offset,
}
if fields:
params['fields'] = ','.join(fields)
return self._request("get", 'folders/{0}'.format(folder_id), params=params).json()
def get_folder_content(self, folder_id=0, limit=100, offset=0, fields=None):
"""
Retrieves the files and/or folders contained within this folder without any other metadata about the folder.
Args:
- folder_id: the id of the folder you wish to query. No you can use paths :(
- limit: (optional) number of items to return. (default=100, max=1000).
- offset: (optional) The record at which to start
- fields: (optional) Attribute(s) to include in the response
"""
params = {
'limit': limit,
'offset': offset,
}
if fields:
params['fields'] = ','.join(fields)
return self._request("get", 'folders/{0}/items'.format(folder_id), params=params).json()
def get_folder_iterator(self, folder_id):
"""
returns an iterator over the folder entries.
this is equivalent of iterating over the folder pages manually
"""
batch_size = 1000
content = self.get_folder_content(folder_id, limit=batch_size)
offset = 0
while content['entries']: # while the current batch has entries
for entry in content['entries']:
yield entry
# stop if 'total_count' of entries (or more) has been fetched so far
if offset + len(content['entries']) >= content['total_count']:
break
# otherwise, fetch the next batch and repeat
offset += batch_size
content = self.get_folder_content(folder_id, limit=batch_size, offset=offset)
def copy_folder(self, folder_id, destination_parent, new_foldername=None):
"""
Copies a given `folder_id` into a new location, `destination_parent`. By default
the original name of the folder is used unless `new_foldername` is provided.
@see https://developers.box.com/docs/#folders-copy-a-folder
Args:
- file_id: the id of the file we want to copy
- destination_parent: ID or a dictionary (as returned by the apis) of the target folder
- new_foldername: (optional) name the copy `new_foldername`, if provided.
Returns:
- a dictionary containing the metadata of newly created copy
"""
data = {
'parent': {
'id': self._get_id(destination_parent)
}
}
if new_foldername:
data.update({'name': new_foldername})
return self._request('post', 'folders/{0}/copy'.format(folder_id), data=data).json()
def create_folder(self, name, parent=0):
"""
creates a new folder under the parent.
Args:
- parent: (optional) ID or a Dictionary (as returned by the apis) of the parent folder
"""
data = {"name": name,
'parent': {'id': self._get_id(parent)}}
return self._request("post", 'folders', data=data).json()
def get_folder_collaborations(self, folder_id):
"""
Fetches the collaborations of the given folder_id
Args:
- folder_id: the folder id.
Returns a list with all folder collaborations.
"""
return self._request("get", 'folders/{0}/collaborations'.format(folder_id)).json()
def get_file_metadata(self, file_id):
"""
Fetches the metadata of the given file_id
Args:
- file_id: the file id.
Returns a dictionary with all of the file metadata.
"""
return self._request("get", 'files/{0}'.format(file_id)).json()
def get_file_comments(self, file_id):
""" Retrieves a file's associated comments
Args:
- file_id: the file id
Returns a list of mini formatted comments
"""
return self._request('get', 'files/{0}/comments'.format(file_id)).json()
def get_file_tasks(self, file_id):
""" Retrieves a file's associated tasks
Args:
- file_id: the file id
Returns a list of mini formatted tasks
"""
return self._request('get', 'files/{0}/tasks'.format(file_id)).json()
def delete_file(self, file_id, etag=None):
"""
Discards a file to the trash.
Args:
- etag: (optional) If specified, the file will only be deleted if
its etag matches the parameter
"""
headers = {}
if etag:
headers['If-Match'] = etag
self._request("delete", 'files/{0}'.format(file_id), headers=headers)
def delete_folder(self, folder_id, etag=None, recursive=False):
"""
Discards a folder to the trash.
Args:
- etag: (optional) If specified, the folder will only be deleted if its etag matches the parameter
- recursive: (optional) If False, the folder will not be deleted if it contains files.
"""
headers = {}
if etag:
headers['If-Match'] = etag
params = {}
if recursive:
params['recursive'] = 'true'
self._request("delete", 'folders/{0}'.format(folder_id), headers=headers, params=params)
def delete_trashed_file(self, file_id):
"""
Permanently deletes an item that is in the trash.
"""
self._request("delete", 'files/{0}/trash'.format(file_id))
def download_file(self, file_id, version=None):
"""
Downloads a file
Args:
- file_id: The ID of the file to download.
- version: (optional) The ID specific version of this file to download.
Returns:
- Request's response object
"""
params = {}
if version:
params['version'] = version
return self._request("get", 'files/{0}/content'.format(file_id), params=params, stream=True)
def get_thumbnail(self, file_id, extension="png", min_height=None, max_height=None, min_width=None, max_width=None, max_wait=0):
"""
Downloads a file
Args:
- file_id: The ID of the file to download.
- extension: Currently thumbnails are only available png
- min_height: (optional) The minimum height of the thumbnail.
- max_height: (optional) The maximum height of the thumbnail
- min_width: (optional) The minimum width of the thumbnail
- max_width: (optional) The maximum width of the thumbnail
Returns a file-like object to the file content
"""
params = {}
if min_height is not None:
params['min_height'] = min_height
if max_height is not None:
params['max_height'] = max_height
if min_width is not None:
params['min_width'] = min_width
if max_width is not None:
params['max_width'] = max_width
response = self._request("get", 'files/{0}/thumbnail.{1}'.format(file_id, extension), params=params, stream=True)
if response.status_code == 202:
# Thumbnail not ready yet
ready_in_seconds = int(response.headers["Retry-After"])
if ready_in_seconds > max_wait:
return None
# Wait for the thumbnail to get ready
time.sleep(ready_in_seconds)
response = self._request("get", 'files/{0}/thumbnail.{1}'.format(file_id, extension), params=params, stream=True)
self._check_for_errors(response)
return response.raw
elif response.status_code == 302:
# No thumbnail available
return None
else:
return response.raw
def upload_file(self, filename, fileobj, parent=0, content_created_at=None, content_modified_at=None):
"""
Uploads a file. If the file already exists, ItemAlreadyExists is raised.
Args:
- filename: the filename to be used. If the file already exists, an ItemAlreadyExists exception will be
raised.
- fileobj: a fileobj-like object that contains the data to upload
- parent: (optional) ID or a Dictionary (as returned by the apis) of the parent folder
- content_created_at: (optional) a timestamp (datetime or a properly formatted string) of the time the
content was created
- content_modified_at: (optional) a timestamp (datetime or a properly formatted string) of the time the
content was last modified
"""
form = {"parent_id": self._get_id(parent)}
if content_created_at:
form['content_created_at'] = content_created_at.isoformat() if isinstance(content_created_at, datetime) else content_created_at
if content_created_at:
form['content_modified_at'] = content_modified_at.isoformat() if isinstance(content_modified_at, datetime) else content_modified_at
# usually Box goes with data==json, but here they want headers (as per standard http form)
response = requests.post('https://upload.box.com/api/2.0/files/content',
form,
headers=self.default_headers,
files={filename: (filename, fileobj)})
self._check_for_errors(response)
return response.json()['entries'][0]
def overwrite_file(self, file_id, fileobj, etag=None, content_modified_at=None):
"""
Overwrites an existing file. The file_id must exist on the server.
Args:
- fileid: the id of an existing file.
- fileobj: a fileobj-like object that contains the data to upload
- etag: an etag the file has to match in order to be overwritten. If the etags mismatch, an PreconditionFailed is raised
- content_modified_at: (optional) a timestamp (datetime or a properly formatted string) of the time the
content was created
"""
headers = dict(self.default_headers)
form = {}
if etag:
headers['If-Match'] = etag
if content_modified_at:
form['content_modified_at'] = content_modified_at.isoformat() if isinstance(content_modified_at, datetime) else content_modified_at
response = requests.post('https://upload.box.com/api/2.0/files/{0}/content'.format(file_id),
form,
headers=headers,
files={'file': fileobj})
self._check_for_errors(response)
return response.json()['entries'][0]
def copy_file(self, file_id, destination_parent, new_filename=None):
"""
Copies a file
@see http://developers.box.com/docs/#files-copy-a-file
Args:
- file_id: the id of the file we want to copy
- destination_parent: ID or a dictionary (as returned by the apis) of the parent folder
- new_filename: (optional) the new filename to use. If not passed, the original filename will be used.
Returns:
- a dictionary with the new file metadata
"""
data = {'parent': {'id': self._get_id(destination_parent)}}
if new_filename:
data['name'] = new_filename
return self._request("post", 'files/{0}/copy'.format(file_id), data=data).json()
def share_link(self, file_id, access=ShareAccess.OPEN, expire_at=None, can_download=None, can_preview=None):
"""
Creates a share link for the file_id
Args:
- file_id: the id of the file we want to share
- access: one of the values of ShareAccess
- expire_at: (optional) a datetime representing the time the link will expire. Timestamps are rounded off
to the given day.
- can_download: Whether this link allows downloads. Can only be used with Open and Company
- can_preview: Whether this link allows previewing. Can only be used with Open and Company
Returns:
- a dictionary containing the various urls. Example:
{
"url": "https://www.box.com/s/rh935iit6ewrmw0unyul",
"download_url": "https://www.box.com/shared/static/rh935iit6ewrmw0unyul.jpeg",
"vanity_url": null,
"is_password_enabled": false,
"unshared_at": null,
"download_count": 0,
"preview_count": 0,
"access": "open",
"permissions": {
"can_download": true,
"can_preview": true
}
}
"""
data = {
'access': access
}
if can_download is not None or can_preview is not None:
data['permissions'] = {}
if can_download is not None:
data['permissions']['can_download'] = can_download
if can_preview is not None:
data['permissions']['can_preview'] = can_preview
if expire_at:
data['unshared_at'] = expire_at.isoformat()
result = self._request("put", 'files/{0}'.format(file_id), data={'shared_link': data}).json()
return result['shared_link']
def get_events(self, stream_position='0', stream_type=EventFilter.ALL, limit=1000):
"""
Use this to get events for a given user. A chunk of event objects is returned for the user based on the
parameters passed in.
Args:
- stream_position: where to start reading the events from.
Can specify special case 'now', which is used to get the latest stream position and will return 0 events.
- stream_type: a value from ``EventFilter`` that limits the type of events returned
- limit: Limits the number of events returned.
Returns:
- a dictionary containing metadata & the events
"""
params = {
'stream_position': str(stream_position),
'stream_type': stream_type,
'limit': limit
}
return self._request("get", 'events', params).json()
def long_poll_for_events(self, stream_position=None, stream_type=EventFilter.ALL):
"""
Blocks until new events are available
Args:
- stream_position: where to start reading the events from.
Can specify special case 'now', which is used to get the latest stream position and will return 0 events.
- stream_type: a value from ``EventFilter`` that limits the type of events returned
"""
if not stream_position or stream_position == 'now':
cursor = self.get_events(stream_position='now', stream_type=EventFilter.CHANGES)
stream_position = cursor['next_stream_position']
while True:
poll_data = self._get_long_poll_data()
url, query = poll_data['url'].split('?', 1)
query = urlparse.parse_qs(query)
query['stream_position'] = stream_position
query['stream_type'] = stream_type
response = requests.get(url, params=query)
self._check_for_errors(response)
result = response.json()
if result['message'] in ['new_message', 'new_change']:
return stream_position
def _get_long_poll_data(self):
"""
Returns the information about the endpoint that will handle the actual long poll request.
See http://developers.box.com/using-long-polling-to-monitor-events/ for details.
"""
result = self._request('options', "events").json()
return result['entries'][0]
@staticmethod
def get_path_of_file(file_metadata):
"""
returns the full path of a file.
Args:
- file_metadata: the dictionary as returned by the various Box apis.
Returns:
- The full path to the file
"""
# skip over first part, which is 'All Files'
path_parts = [x['name'].strip('/') for x in file_metadata['path_collection']['entries'][1:]]
path_parts.append(file_metadata['name'])
return '/' + '/'.join(path_parts)
def get_comment_information(self, comment_id):
""" Retrieves information about a comment
Args:
- comment_id: the comment id
Returns a full comment object
"""
return self._request('get', 'comments/{0}'.format(comment_id)).json()
def add_comment(self, id, type, message):
""" Add a comment to the given file or comment
Args:
- id: the id of the object to comment
- type: the type of the object to comment, can be "file" or "comment"
- message: the comment's message
Returns the newly created comment full object
"""
item = {"type": type, "id": id}
data = {'item': item, 'message': message}
return self._request('post', 'comments', data=data).json()
def change_comment(self, comment_id, message):
""" Change a comment's message
Args:
- comment_id: the comment's to change id
- message: the new message
Returns the modified comment full object
"""
data = {'message': message}
return self._request('put', 'comments/{0}'.format(comment_id), data=data).json()
def delete_comment(self, comment_id):
""" Delete a given comment from box
Args:
- comment_id: the comment id
"""
self._request('delete', 'comments/{0}'.format(comment_id))
def get_task_information(self, task_id):
""" Retrieves information about a task
Args:
- task_id: the task id
Returns a full task object
"""
return self._request('get', 'tasks/{0}'.format(task_id)).json()
def add_task(self, file_id, due_at, action='review', message=None):
""" Add a task to the given file
Args:
- file_id: the file to add a task to
- due_at: a datetime object containing the due date
- (optional) message: the task's message
- (optional) action: currently only support 'review'
Returns the newly created task full object
"""
item = {"type": "file", "id": file_id}
data = {
'item': item,
'action': action,
'due_at': str(due_at),
'message': message,
}
return self._request('post', 'tasks', data=data).json()
def change_task(self, task_id, due_at, action='review', message=None):
""" Change a task
Args:
- task_id: the task's to change id
- due_at: a datetime for the day the task is due
- (optional) message: the new task's message
- (optional) action: currently only support 'review'
Returns the modified task full object
"""
data = {
'action': action,
'due_at': str(due_at),
}
if message:
data['message'] = message
return self._request('put', 'tasks/{0}'.format(task_id), data=data).json()
def delete_task(self, task_id):
""" Delete a given task from box
Args:
- task_id: the comment id
"""
self._request('delete', 'tasks/{0}'.format(task_id))
def get_task_assignments(self, task_id):
""" Retrieves assignments for a given task
Args:
- task_id: the task id
Returns the list of the task assignments (mini formatted)
"""
return self._request('get', 'tasks/{0}/assignments'.format(task_id)).json()
def get_assignment(self, assignment_id):
""" Retrieves a given assignment information
Args
- assignment_id: the assignment id
Returns an assignment full object
"""
return self._request('get', 'task_assignments/{0}'.format(assignment_id)).json()
def assign_task(self, task_id, user_id=None, login=None):
""" Assign the given task to a user
Args:
- task_id: the task id
- id: (optional) the id of the user to assign the task to
- login: (optional) the login of the user to assign the task to
At least the id or login field is to be set to identify the user
Returns a full task assignment object
"""
task = {"id": task_id, "type": "task"}
assign_to = dict()
if user_id:
assign_to['id'] = user_id
if login:
assign_to['login'] = login
data = {'task': task, 'assign_to': assign_to}
return self._request('post', 'task_assignments', data=data).json()
def update_assignment(self, assignment_id, resolution_state, message=None):
""" Update a task assignment state
Args:
- assignment_id: the assignment to update id
- message: (optional) A message about the state change
- resolution_state: can be "completed", "incomplete", "approved" or "rejected"
Returns the modified task assignment object
"""
data = {'resolution_state': resolution_state}
if message:
data['message'] = message
return self._request('put', 'task_assignments/{0}'.format(assignment_id), data=data).json()
def delete_assignment(self, assignment_id):
""" Delete the given assignment
Args
- assignment_id: the assignment id
"""
self._request('delete', 'task_assignments/{0}'.format(assignment_id))
def search(self, query, limit=30, offset=0):
"""
The search endpoint provides a simple way of finding items that are accessible in a given user's Box account.
Args:
- query: The string to search for; can be matched against item names, descriptions, text content of a file,
and other fields of the different item types.
- limit: (optional) number of items to return. (default=30, max=200).
- offset: (optional) The record at which to start
"""
params = {
'query': query,
'limit': limit,
'offset': offset,
}
return self._request("get", 'search', params).json()
def get_collaboration(self, collaboration_id):
"""
Fetches the collaboration of the given collaboration_id
Args:
- collaboration_id: the collaboration id.
Returns a dictionary with all of the collaboration data.
"""
return self._request("get", 'collaborations/{0}'.format(collaboration_id)).json()
def create_collaboration_by_user_id(self, folder_id, user_id, role=CollaboratorRole.VIEWER, notify=False):
"""
Create a collaboration of the given folder_id and user_id
Args:
- folder_id: the folder id.
- user_id: the user id.
- role: (optional) access level of this collaboration from ``CollaboratorRole``. (default=CollaboratorRole.VIEWER)
- notify: (optional) determines if the user should receive email notification of the collaboration. ``CollaboratorRole``. (default=False)
Returns a dictionary with all of the collaboration data.
"""
params = {
'notify': notify,
}
data = {
'item': {'id': folder_id, 'type': 'folder'},
'accessible_by': {'id': user_id, 'type': 'user'},
'role': role,
}
return self._request('post', 'collaborations', params, data=data).json()
def create_collaboration_by_login(self, folder_id, login, role=CollaboratorRole.VIEWER, notify=False):
"""
Create a collaboration of the given folder_id and login (login does not need to be a Box user)
Args:
- folder_id: the folder id.
- login: the user login (does not need to be a Box user).
- role: (optional) access level of this collaboration from ``CollaboratorRole``. (default=CollaboratorRole.VIEWER)
- notify: (optional) determines if the user should receive email notification of the collaboration. ``CollaboratorRole``. (default=False)
Returns a dictionary with all of the collaboration data.
"""
params = {
'notify': notify,
}
data = {
'item': {'id': folder_id, 'type': 'folder'},
'accessible_by': {'login': login, 'type': 'user'},
'role': role,
}
return self._request('post', 'collaborations', params, data=data).json()
def edit_collaboration(self, collaboration_id, role=CollaboratorRole.VIEWER, etag=None):
"""
Edit an existing collaboration.
Args:
- collaboration_id: collaboration id to be edited
- role: (optional) access level of this collaboration from ``CollaboratorRole``. (default=CollaboratorRole.VIEWER)
- etag: (optional) If specified, the file will only be deleted if
its etag matches the parameter
"""
headers = {}
if etag:
headers['If-Match'] = etag
data = {
'role': role,
}
return self._request('put', 'collaborations/{0}'.format(collaboration_id), headers=headers, data=data).json()
def delete_collaboration(self, collaboration_id, etag=None):
"""
Deletes a collaboration.
Args:
- collaboration_id: collaboration id to be deleted
- etag: (optional) If specified, the file will only be deleted if
its etag matches the parameter
"""
headers = {}
if etag:
headers['If-Match'] = etag
self._request("delete", 'collaborations/{0}'.format(collaboration_id), headers=headers)
class BoxClientException(Exception):
def __init__(self, status_code, message=None, **kwargs):
super(BoxClientException, self).__init__(message)
self.status_code = status_code
self.message = message
self.__dict__.update(kwargs)
class ItemAlreadyExists(BoxClientException):
pass
class ItemDoesNotExist(BoxClientException):
pass
class PreconditionFailed(BoxClientException):
pass
class BoxAuthenticationException(BoxClientException):
pass
class BoxAccountUnauthorized(BoxClientException):
pass
EXCEPTION_MAP = {
CONFLICT: ItemAlreadyExists,
NOT_FOUND: ItemDoesNotExist,
PRECONDITION_FAILED: PreconditionFailed,
UNAUTHORIZED: BoxAccountUnauthorized
}
|
|
#!/usr/bin/env python
# coding: utf-8
import random
import time
import math
import pprint
pp = pprint.PrettyPrinter(indent=4, width=200).pprint
import libtcodpy as libtcod
from planet import Planet
from sector import Sector
from nebula import Nebula
from starfield import Starfield
from asteroid import Asteroid
class Galaxy:
def __init__(self, width, height, seed=54):
self.screen_width = width
self.screen_height = height
self.seed = seed
random.seed(seed)
self.rnd = libtcod.random_new_from_seed(self.seed)
# Load Random Names
self.planet_names = []
with open("planet_names", "r") as planet_names_file:
self.planet_names = planet_names_file.readlines()
random.shuffle(self.planet_names)
self.planet_name_index = -1
# Build Galaxy Map
self.bsp_depth = 6
self.bsp = libtcod.bsp_new_with_size(0, 0, self.screen_width, self.screen_height)
libtcod.bsp_split_recursive(self.bsp, self.rnd, self.bsp_depth, 8, 8, 1.0, 1.0)
# Count number of sectors
count = [ 0 ]
def count_bsp_leaf_nodes(node, userData):
if node.level == self.bsp_depth:
count[0] += 1
return True
libtcod.bsp_traverse_inverted_level_order(self.bsp, count_bsp_leaf_nodes, userData=None)
self.sector_count = count[0]
# self.sector_count = 2**self.bsp_depth # only if we have a fully populated tree (not guaranteed)
self.sectors = []
for i in range(0, self.sector_count):
self.new_sector()
self.link_sectors()
self.current_sector = random.randrange(self.sector_count)
# pp("total sectors: {} current sector: {}".format(self.sector_count, self.current_sector))
self.targeted_sector_index = 0
self.selected_blink = 0
def next_name(self):
self.planet_name_index += 1
if self.planet_name_index > len(self.planet_names):
self.planet_name_index = 0
return self.planet_names[self.planet_name_index].strip()
def new_sector(self):
self.sectors.append( SectorMap( self, random.randrange(0,1000000), self.next_name() ) )
def cycle_sector_target(self):
self.targeted_sector_index += 1
if self.targeted_sector_index >= len(self.sectors[self.current_sector].neighbors):
self.targeted_sector_index = 0
def link_sectors(self):
self.bsp_nodes = {"index": 0}
def get_bsp_nodes(node, userData):
self.bsp_nodes["index"] += 1
if node.level not in self.bsp_nodes:
self.bsp_nodes[node.level] = []
self.bsp_nodes[node.level].append( {"index": self.bsp_nodes["index"]-1, "x": node.x, "y": node.y, "w": node.w, "h": node.h, "node": node } )
return True
libtcod.bsp_traverse_inverted_level_order(self.bsp, get_bsp_nodes, userData=None)
# pp(self.bsp_nodes)
# Set Sector Galaxy Positions
for index, sector in enumerate(self.sectors):
node = self.bsp_nodes[self.bsp_depth][index]
startx = int(node["x"] + (node["w"]/2.0))
starty = int(node["y"] + (node["h"]/2.0))
sector.galaxy_position_x = startx
sector.galaxy_position_y = starty
# Link nodes in the bsp tree
for i in range(self.bsp_depth):
for index in range(0, self.sector_count, 2**(i+1)):
# print("current depth: {} starting index: {}".format(i, index))
node1_index = index
if i == 0:
# we are linking the lowest level nodes
node2_index = node1_index + 2**i
else:
# find the two closest nodes in each subtree
node2_index = node1_index + 2**i
min_distance = self.screen_width
min_node1 = None
min_node2 = None
tree1_start_index = index
tree1_stop_index = index + (2**(i+1))/2
tree2_start_index = tree1_stop_index
tree2_stop_index = tree2_start_index + (2**(i+1))/2
# pp([tree1_start_index, tree1_stop_index, tree2_start_index, tree2_stop_index])
for n1 in range(tree1_start_index, tree1_stop_index):
for n2 in range(tree2_start_index, tree2_stop_index):
if n1 != n2 and n1 < self.sector_count and n2 < self.sector_count:
# pp((n1, n2))
node1 = self.bsp_nodes[self.bsp_depth][n1]
node2 = self.bsp_nodes[self.bsp_depth][n2]
d = math.sqrt((node2["x"] - node1["x"])**2 + (node2["y"] - node1["y"])**2)
if d < min_distance:
min_distance = d
min_node1 = node1["index"]
min_node2 = node2["index"]
# print("new min: {} indexes: {} {}".format(d, min_node1, min_node2))
node1_index = min_node1
node2_index = min_node2
# print("done min ---")
if node2_index < self.sector_count:
# print("linked {} -> {}".format(node1_index, node2_index))
if node2_index not in self.sectors[node1_index].neighbors:
self.sectors[node1_index].neighbors.append( node2_index )
# Add links in the other direction
for index, sector in enumerate(self.sectors):
for neighbor in sector.neighbors:
if index not in self.sectors[neighbor].neighbors:
self.sectors[neighbor].neighbors.append(index)
self.one_way_links = []
for index, sector in enumerate(self.sectors):
for neighbor in sector.neighbors:
if [index, neighbor] not in self.one_way_links and [neighbor, index] not in self.one_way_links:
self.one_way_links.append([index, neighbor])
# pp([sector.neighbors for sector in self.sectors])
# pp(self.one_way_links)
def draw(self, buffer):
# Draw Connecting Lines
for index1, index2 in self.one_way_links:
if index1 == self.current_sector and \
index2 == self.sectors[self.current_sector].neighbors[self.targeted_sector_index] or \
index2 == self.current_sector and \
index1 == self.sectors[self.current_sector].neighbors[self.targeted_sector_index]:
# if this is a line to the target sector
color = libtcod.Color(0, 255, 0)
elif self.sectors[index1].discovered() and self.sectors[index2].discovered():
# if this is a line between two discovered sectors
color = libtcod.Color(87, 186, 255)
else:
# else standard connecting line
color = libtcod.Color(150, 150, 150)
libtcod.line_init(
self.sectors[index1].galaxy_position_x,
self.sectors[index1].galaxy_position_y,
self.sectors[index2].galaxy_position_x,
self.sectors[index2].galaxy_position_y,
)
x,y=libtcod.line_step()
while x is not None:
# if self.sectors[index1].discovered() or self.sectors[index2].discovered():
buffer.set_fore(x, y, color[0], color[1], color[2], 4)
x,y=libtcod.line_step()
# Draw Sectors Nodes
for index, sector in enumerate(self.sectors):
x, y = sector.galaxy_position_x, sector.galaxy_position_y
buffer.set_fore(x, y, sector.star_color[0], sector.star_color[1], sector.star_color[2], sector.star_icon)
for x, y, icon in [(x-1, y-1, ord(' ')), (x, y-1, ord(' ')), (x+1, y-1, ord(' ')),
(x-1, y, ord(' ')), (x+1, y, ord(' ')),
(x-1, y+1, ord(' ')), (x, y+1, ord(' ')), (x+1, y+1, ord(' ')) ]:
buffer.set_fore(x, y, 0, 0, 0, icon )
if index == self.sectors[self.current_sector].neighbors[self.targeted_sector_index]:
x, y = sector.galaxy_position_x, sector.galaxy_position_y
for x, y, icon in [(x-1, y-1, ord(' ')), (x, y-1, ord('-')), (x+1, y-1, ord(' ')),
(x-1, y, ord('|')), (x+1, y, ord('|')),
(x-1, y+1, ord(' ')), (x, y+1, ord('-')), (x+1, y+1, ord(' ')) ]:
buffer.set_fore(x, y, 255, 128, 128, icon)
if index == self.current_sector:
t = time.clock()
if t > self.selected_blink + 0.5:
if t > self.selected_blink + 1.0:
self.selected_blink = t
x, y = sector.galaxy_position_x, sector.galaxy_position_y
for x, y, icon in [(x-1, y-1, 213), (x, y-1, 205), (x+1, y-1, 184),
(x-1, y, 179), (x+1, y, 179),
(x-1, y+1, 212), (x, y+1, 205), (x+1, y+1, 190) ]:
buffer.set_fore(x, y, 128, 255, 128, icon)
class SectorMap:
def __init__(self, galaxy, seed, name, posx=None, posy=None):
self.galaxy = galaxy
self.screen_width = self.galaxy.screen_width
self.screen_height = self.galaxy.screen_height
self.name = name
self.galaxy_position_x = int(random.random() * (self.screen_width/2)) + self.screen_width/4
self.galaxy_position_y = int(random.random() * (self.screen_height/2)) + self.screen_height/4
self.seed = seed
random.seed(seed)
# self.sector_background = libtcod.Color( random.randrange(0,256), random.randrange(0,256), random.randrange(0,256) )
self.nebula_background = [ random.random(), random.random(), random.random() ]
self.nebula_seed = seed * 3
self.planet_count = random.randrange(1, 17)
self.asteriod_count = random.randrange(0, 25)
self.asteroids = []
self.planets = []
self.neighbors = []
self.star_icon = ord('?')
self.star_color = libtcod.Color(255, 255, 255)
self.new_star()
for p in range(0, self.planet_count):
self.new_planet()
for a in range(self.asteriod_count):
self.new_asteroid()
def discovered(self):
return self.star_icon != ord('?')
def new_star(self):
self.planets.append( {
"planet_class" : "star",
"position_x" : 0,
"position_y" : 0,
"diameter" : 50,
"seed" : random.randrange(1,1000000),
"name" : self.galaxy.next_name(),
} )
def new_asteroid(self):
self.asteroids.append( {
"planet_class" : "asteroid",
"position_x" : random.randrange(-1000,1001),
"position_y" : random.randrange(-1000,1001),
"diameter" : random.randrange(5, 8),
"seed" : random.randrange(1,1000000),
"name" : "A-{0}".format(len(self.asteroids)),
} )
def new_planet(self):
self.planets.append( {
"planet_class" : Planet.classes[ random.randrange(0, len(Planet.classes)) ],
"position_x" : random.randrange(-1000,1001),
"position_y" : random.randrange(-1000,1001),
"diameter" : random.randrange(18, self.galaxy.screen_height),
"seed" : random.randrange(1,1000000),
"name" : self.galaxy.next_name(),
} )
def __repr__(self):
return repr({ "posx": self.galaxy_position_x, "posy": self.galaxy_position_y, "seed": self.seed, "planet_count": self.planet_count, "nebula_background": self.nebula_background, "planets": self.planets })
def print_planet_loading_icon(self, console, icon, color, offset=0, count=0, line=0):
center_height = self.screen_height/2
center_width = self.screen_width/2
libtcod.console_put_char_ex(console, center_width-((count+2)/2)+offset, center_height+4+line, icon, color, libtcod.black)
libtcod.console_blit(console, 0, 0, self.screen_width, self.screen_height, 0, 0, 0)
libtcod.console_flush()
def loading_message(self, message, console, clear=True):
if clear:
libtcod.console_clear(console)
libtcod.console_set_fade(255,libtcod.black)
center_height = self.screen_height/2
third_width = self.screen_width/2
libtcod.console_print_ex(console, 0, center_height, libtcod.BKGND_SET, libtcod.LEFT, message.center(self.screen_width))
libtcod.console_print_frame(console, int(third_width*0.5), center_height-2, third_width, 5, clear=False, flag=libtcod.BKGND_SET, fmt=0)
libtcod.console_blit(console, 0, 0, self.screen_width, self.screen_height, 0, 0, 0)
libtcod.console_flush()
def load_sector(self, console, buffer):
self.loading_message("Scanning Planets", console)
sector = Sector(self.screen_width, self.screen_height, buffer)
for index, planet in enumerate(self.planets):
# pp(planet)
icon, color, planet_count = sector.add_planet(
planet_class=planet['planet_class'],
position_x=planet['position_x'],
position_y=planet['position_y'],
diameter=planet['diameter'],
seed=planet['seed'],
name=planet['name'],
)
if planet['planet_class'] == "star":
self.star_icon = icon
self.star_color = color
self.print_planet_loading_icon(console, icon, color, offset=index, count=len(self.planets))
# self.loading_message("Mapping Asteroids", console)
for index, asteroid in enumerate(self.asteroids):
icon, color, asteroid_count = sector.add_asteroid(
planet_class=asteroid['planet_class'],
position_x=asteroid['position_x'],
position_y=asteroid['position_y'],
diameter=asteroid['diameter'],
seed=asteroid['seed'],
name=asteroid['name'],
)
self.print_planet_loading_icon(console, icon, color, offset=index, count=len(self.asteroids), line=1)
self.loading_message("Reading Background Radiation", console)
starfield = Starfield(sector, max_stars=50)
nebula = Nebula(sector, r_factor=self.nebula_background[0], g_factor=self.nebula_background[1], b_factor=self.nebula_background[2], seed=self.nebula_seed)
return sector, starfield, nebula
|
|
"""Config flow for Apple TV integration."""
from ipaddress import ip_address
import logging
from random import randrange
from pyatv import exceptions, pair, scan
from pyatv.const import Protocol
from pyatv.convert import protocol_str
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_ADDRESS,
CONF_NAME,
CONF_PIN,
CONF_PROTOCOL,
CONF_TYPE,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import AbortFlow
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import CONF_CREDENTIALS, CONF_IDENTIFIER, CONF_START_OFF
from .const import DOMAIN # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
DEVICE_INPUT = "device_input"
INPUT_PIN_SCHEMA = vol.Schema({vol.Required(CONF_PIN, default=None): int})
DEFAULT_START_OFF = False
PROTOCOL_PRIORITY = [Protocol.MRP, Protocol.DMAP, Protocol.AirPlay]
async def device_scan(identifier, loop, cache=None):
"""Scan for a specific device using identifier as filter."""
def _filter_device(dev):
if identifier is None:
return True
if identifier == str(dev.address):
return True
if identifier == dev.name:
return True
return any([service.identifier == identifier for service in dev.services])
def _host_filter():
try:
return [ip_address(identifier)]
except ValueError:
return None
if cache:
matches = [atv for atv in cache if _filter_device(atv)]
if matches:
return cache, matches[0]
for hosts in [_host_filter(), None]:
scan_result = await scan(loop, timeout=3, hosts=hosts)
matches = [atv for atv in scan_result if _filter_device(atv)]
if matches:
return scan_result, matches[0]
return scan_result, None
def is_valid_credentials(credentials):
"""Verify that credentials are valid for establishing a connection."""
return (
credentials.get(Protocol.MRP.value) is not None
or credentials.get(Protocol.DMAP.value) is not None
)
class AppleTVConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Apple TV."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get options flow for this handler."""
return AppleTVOptionsFlow(config_entry)
def __init__(self):
"""Initialize a new AppleTVConfigFlow."""
self.target_device = None
self.scan_result = None
self.atv = None
self.protocol = None
self.pairing = None
self.credentials = {} # Protocol -> credentials
async def async_step_reauth(self, info):
"""Handle initial step when updating invalid credentials."""
await self.async_set_unique_id(info[CONF_IDENTIFIER])
self.target_device = info[CONF_IDENTIFIER]
self.context["title_placeholders"] = {"name": info[CONF_NAME]}
self.context["identifier"] = self.unique_id
return await self.async_step_reconfigure()
async def async_step_reconfigure(self, user_input=None):
"""Inform user that reconfiguration is about to start."""
if user_input is not None:
return await self.async_find_device_wrapper(
self.async_begin_pairing, allow_exist=True
)
return self.async_show_form(step_id="reconfigure")
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
# Be helpful to the user and look for devices
if self.scan_result is None:
self.scan_result, _ = await device_scan(None, self.hass.loop)
errors = {}
default_suggestion = self._prefill_identifier()
if user_input is not None:
self.target_device = user_input[DEVICE_INPUT]
try:
await self.async_find_device()
except DeviceNotFound:
errors["base"] = "no_devices_found"
except DeviceAlreadyConfigured:
errors["base"] = "already_configured"
except exceptions.NoServiceError:
errors["base"] = "no_usable_service"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
await self.async_set_unique_id(
self.atv.identifier, raise_on_progress=False
)
return await self.async_step_confirm()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{vol.Required(DEVICE_INPUT, default=default_suggestion): str}
),
errors=errors,
description_placeholders={"devices": self._devices_str()},
)
async def async_step_zeroconf(self, discovery_info):
"""Handle device found via zeroconf."""
service_type = discovery_info[CONF_TYPE]
properties = discovery_info["properties"]
if service_type == "_mediaremotetv._tcp.local.":
identifier = properties["UniqueIdentifier"]
name = properties["Name"]
elif service_type == "_touch-able._tcp.local.":
identifier = discovery_info["name"].split(".")[0]
name = properties["CtlN"]
else:
return self.async_abort(reason="unknown")
await self.async_set_unique_id(identifier)
self._abort_if_unique_id_configured()
self.context["identifier"] = self.unique_id
self.context["title_placeholders"] = {"name": name}
self.target_device = identifier
return await self.async_find_device_wrapper(self.async_step_confirm)
async def async_find_device_wrapper(self, next_func, allow_exist=False):
"""Find a specific device and call another function when done.
This function will do error handling and bail out when an error
occurs.
"""
try:
await self.async_find_device(allow_exist)
except DeviceNotFound:
return self.async_abort(reason="no_devices_found")
except DeviceAlreadyConfigured:
return self.async_abort(reason="already_configured")
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
return self.async_abort(reason="unknown")
return await next_func()
async def async_find_device(self, allow_exist=False):
"""Scan for the selected device to discover services."""
self.scan_result, self.atv = await device_scan(
self.target_device, self.hass.loop, cache=self.scan_result
)
if not self.atv:
raise DeviceNotFound()
self.protocol = self.atv.main_service().protocol
if not allow_exist:
for identifier in self.atv.all_identifiers:
if identifier in self._async_current_ids():
raise DeviceAlreadyConfigured()
# If credentials were found, save them
for service in self.atv.services:
if service.credentials:
self.credentials[service.protocol.value] = service.credentials
async def async_step_confirm(self, user_input=None):
"""Handle user-confirmation of discovered node."""
if user_input is not None:
return await self.async_begin_pairing()
return self.async_show_form(
step_id="confirm", description_placeholders={"name": self.atv.name}
)
async def async_begin_pairing(self):
"""Start pairing process for the next available protocol."""
self.protocol = self._next_protocol_to_pair()
# Dispose previous pairing sessions
if self.pairing is not None:
await self.pairing.close()
self.pairing = None
# Any more protocols to pair? Else bail out here
if not self.protocol:
await self.async_set_unique_id(self.atv.main_service().identifier)
return self._async_get_entry(
self.atv.main_service().protocol,
self.atv.name,
self.credentials,
self.atv.address,
)
# Initiate the pairing process
abort_reason = None
session = async_get_clientsession(self.hass)
self.pairing = await pair(
self.atv, self.protocol, self.hass.loop, session=session
)
try:
await self.pairing.begin()
except exceptions.ConnectionFailedError:
return await self.async_step_service_problem()
except exceptions.BackOffError:
abort_reason = "backoff"
except exceptions.PairingError:
_LOGGER.exception("Authentication problem")
abort_reason = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
abort_reason = "unknown"
if abort_reason:
if self.pairing:
await self.pairing.close()
return self.async_abort(reason=abort_reason)
# Choose step depending on if PIN is required from user or not
if self.pairing.device_provides_pin:
return await self.async_step_pair_with_pin()
return await self.async_step_pair_no_pin()
async def async_step_pair_with_pin(self, user_input=None):
"""Handle pairing step where a PIN is required from the user."""
errors = {}
if user_input is not None:
try:
self.pairing.pin(user_input[CONF_PIN])
await self.pairing.finish()
self.credentials[self.protocol.value] = self.pairing.service.credentials
return await self.async_begin_pairing()
except exceptions.PairingError:
_LOGGER.exception("Authentication problem")
errors["base"] = "invalid_auth"
except AbortFlow:
raise
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return self.async_show_form(
step_id="pair_with_pin",
data_schema=INPUT_PIN_SCHEMA,
errors=errors,
description_placeholders={"protocol": protocol_str(self.protocol)},
)
async def async_step_pair_no_pin(self, user_input=None):
"""Handle step where user has to enter a PIN on the device."""
if user_input is not None:
await self.pairing.finish()
if self.pairing.has_paired:
self.credentials[self.protocol.value] = self.pairing.service.credentials
return await self.async_begin_pairing()
await self.pairing.close()
return self.async_abort(reason="device_did_not_pair")
pin = randrange(1000, stop=10000)
self.pairing.pin(pin)
return self.async_show_form(
step_id="pair_no_pin",
description_placeholders={
"protocol": protocol_str(self.protocol),
"pin": pin,
},
)
async def async_step_service_problem(self, user_input=None):
"""Inform user that a service will not be added."""
if user_input is not None:
self.credentials[self.protocol.value] = None
return await self.async_begin_pairing()
return self.async_show_form(
step_id="service_problem",
description_placeholders={"protocol": protocol_str(self.protocol)},
)
def _async_get_entry(self, protocol, name, credentials, address):
if not is_valid_credentials(credentials):
return self.async_abort(reason="invalid_config")
data = {
CONF_PROTOCOL: protocol.value,
CONF_NAME: name,
CONF_CREDENTIALS: credentials,
CONF_ADDRESS: str(address),
}
self._abort_if_unique_id_configured(reload_on_update=False, updates=data)
return self.async_create_entry(title=name, data=data)
def _next_protocol_to_pair(self):
def _needs_pairing(protocol):
if self.atv.get_service(protocol) is None:
return False
return protocol.value not in self.credentials
for protocol in PROTOCOL_PRIORITY:
if _needs_pairing(protocol):
return protocol
return None
def _devices_str(self):
return ", ".join(
[
f"`{atv.name} ({atv.address})`"
for atv in self.scan_result
if atv.identifier not in self._async_current_ids()
]
)
def _prefill_identifier(self):
# Return identifier (address) of one device that has not been paired with
for atv in self.scan_result:
if atv.identifier not in self._async_current_ids():
return str(atv.address)
return ""
class AppleTVOptionsFlow(config_entries.OptionsFlow):
"""Handle Apple TV options."""
def __init__(self, config_entry):
"""Initialize Apple TV options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
async def async_step_init(self, user_input=None):
"""Manage the Apple TV options."""
if user_input is not None:
self.options[CONF_START_OFF] = user_input[CONF_START_OFF]
return self.async_create_entry(title="", data=self.options)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_START_OFF,
default=self.config_entry.options.get(
CONF_START_OFF, DEFAULT_START_OFF
),
): bool,
}
),
)
class DeviceNotFound(HomeAssistantError):
"""Error to indicate device could not be found."""
class DeviceAlreadyConfigured(HomeAssistantError):
"""Error to indicate device is already configured."""
|
|
"""
********************************************************************************
* Name: ChannelInputChunk
* Author: Nathan Swain
* Created On: July 23, 2013
* Copyright: (c) Brigham Young University 2013
* License: BSD 2-Clause
********************************************************************************
"""
from future.utils import iteritems
from . import parsetools as pt
def cardChunk(key, chunk):
"""
Parse Card Chunk Method
"""
for line in chunk:
values = []
sline = line.strip().split()
for idx in range(1, len(sline)):
values.append(sline[idx])
return {'card': sline[0],
'values': values}
def connectChunk(key, chunk):
"""
Parse Card Chunk Method
"""
upLinks = []
schunk = chunk[0].strip().split()
for idx in range(4, len(schunk)):
upLinks.append(schunk[idx])
result = {'link': schunk[1],
'downLink': schunk[2],
'numUpLinks': schunk[3],
'upLinks': upLinks}
return result
def linkChunk(key, chunk):
"""
Parse LINK Chunk Method
"""
# Extract link type card
linkType = chunk[1].strip().split()[0]
# Cases
if linkType == 'DX':
# Cross section link type handler
result = xSectionLink(chunk)
elif linkType == 'STRUCTURE':
# Structure link type handler
result = structureLink(chunk)
elif linkType in ('RESERVOIR', 'LAKE'):
# Reservoir link type handler
result = reservoirLink(chunk)
return result
def structureLink(lines):
"""
Parse STRUCTURE LINK Method
"""
# Constants
KEYWORDS = ('LINK',
'STRUCTURE',
'NUMSTRUCTS',
'STRUCTTYPE')
WEIR_KEYWORDS = ('STRUCTTYPE',
'CREST_LENGTH',
'CREST_LOW_ELEV',
'DISCHARGE_COEFF_FORWARD',
'DISCHARGE_COEFF_REVERSE',
'CREST_LOW_LOC',
'STEEP_SLOPE',
'SHALLOW_SLOPE')
CULVERT_KEYWORDS = ('STRUCTTYPE',
'UPINVERT',
'DOWNINVERT',
'INLET_DISCH_COEFF',
'REV_FLOW_DISCH_COEFF',
'SLOPE',
'LENGTH',
'ROUGH_COEFF',
'DIAMETER',
'WIDTH',
'HEIGHT')
WEIRS = ('WEIR', 'SAG_WEIR')
CULVERTS = ('ROUND_CULVERT', 'RECT_CULVERT')
CURVES = ('RATING_CURVE', 'SCHEDULED_RELEASE', 'RULE_CURVE')
result = {'type': 'STRUCTURE',
'header': {'link': None,
'numstructs': None},
'structures':[]}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Cases
if key == 'STRUCTTYPE':
# Structure handler
structType = chunk[0].strip().split()[1]
# Cases
if structType in WEIRS:
weirResult = {'structtype': None,
'crest_length': None,
'crest_low_elev': None,
'discharge_coeff_forward': None,
'discharge_coeff_reverse': None,
'crest_low_loc': None,
'steep_slope': None,
'shallow_slope': None}
# Weir type structures handler
result['structures'].append(structureChunk(WEIR_KEYWORDS, weirResult, chunk))
elif structType in CULVERTS:
culvertResult = {'structtype': None,
'upinvert': None,
'downinvert': None,
'inlet_disch_coeff': None,
'rev_flow_disch_coeff': None,
'slope': None,
'length': None,
'rough_coeff': None,
'diameter': None,
'width': None,
'height': None}
# Culvert type structures handler
result['structures'].append(structureChunk(CULVERT_KEYWORDS, culvertResult, chunk))
elif structType in CURVES:
# Curve type handler
pass
elif key != 'STRUCTURE':
# All other variables header
result['header'][key.lower()] = chunk[0].strip().split()[1]
return result
def xSectionLink(lines):
"""
Parse Cross Section Links Method
"""
# Constants
KEYWORDS = ('LINK',
'DX',
'TRAPEZOID',
'TRAPEZOID_ERODE',
'TRAPEZOID_SUBSURFACE',
'ERODE_TRAPEZOID',
'ERODE_SUBSURFACE',
'SUBSURFACE_TRAPEZOID',
'SUBSURFACE_ERODE',
'TRAPEZOID_ERODE_SUBSURFACE',
'TRAPEZOID_SUBSURFACE_ERODE',
'ERODE_TRAPEZOID_SUBSURFACE',
'ERODE_SUBSURFACE_TRAPEZOID',
'SUBSURFACE_TRAPEZOID_ERODE',
'SUBSURFACE_ERODE_TRAPEZOID',
'BREAKPOINT',
'BREAKPOINT_ERODE',
'BREAKPOINT_SUBSURFACE',
'ERODE_BREAKPOINT',
'ERODE_SUBSURFACE',
'SUBSURFACE_BREAKPOINT',
'SUBSURFACE_ERODE',
'BREAKPOINT_ERODE_SUBSURFACE',
'BREAKPOINT_SUBSURFACE_ERODE',
'ERODE_BREAKPOINT_SUBSURFACE',
'ERODE_SUBSURFACE_BREAKPOINT',
'SUBSURFACE_BREAKPOINT_ERODE',
'SUBSURFACE_ERODE_BREAKPOINT',
'TRAP',
'TRAP_ERODE',
'TRAP_SUBSURFACE',
'ERODE_TRAP',
'ERODE_SUBSURFACE',
'SUBSURFACE_TRAP',
'SUBSURFACE_ERODE',
'TRAP_ERODE_SUBSURFACE',
'TRAP_SUBSURFACE_ERODE',
'ERODE_TRAP_SUBSURFACE',
'ERODE_SUBSURFACE_TRAP',
'SUBSURFACE_TRAP_ERODE',
'SUBSURFACE_ERODE_TRAP',
'NODES',
'NODE',
'XSEC')
ERODE = ('TRAPEZOID_ERODE',
'TRAP_ERODE',
'TRAP_SUBSURFACE_ERODE',
'TRAP_ERODE_SUBSURFACE',
'BREAKPOINT_ERODE',
'TRAPEZOID_SUBSURFACE_ERODE',
'TRAPEZOID_ERODE_SUBSURFACE',
'BREAKPOINT_SUBSURFACE_ERODE',
'BREAKPOINT_ERODE_SUBSURFACE')
SUBSURFACE = ('TRAPEZOID_SUBSURFACE',
'TRAP_SUBSURFACE',
'TRAP_SUBSURFACE_ERODE',
'TRAP_ERODE_SUBSURFACE',
'BREAKPOINT_SUBSURFACE',
'TRAPEZOID_SUBSURFACE_ERODE',
'TRAPEZOID_ERODE_SUBSURFACE',
'BREAKPOINT_SUBSURFACE_ERODE',
'BREAKPOINT_ERODE_SUBSURFACE')
result = {'type': 'XSEC',
'header': {'link': None,
'dx': None,
'xSecType': None,
'nodes': None,
'erode': False,
'subsurface': False},
'xSection': None,
'nodes': []}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Cases
if key == 'NODE':
# Extract node x and y
result['nodes'].append(nodeChunk(chunk))
elif key == 'XSEC':
# Extract cross section information
result['xSection'] = xSectionChunk(chunk)
elif ('TRAPEZOID' in key) or ('BREAKPOINT' in key) or ('TRAP' in key):
# Cross section type handler
result['header']['xSecType'] = key
elif key in ERODE:
# Erode handler
result['header']['erode'] = True
elif key in SUBSURFACE:
# Subsurface handler
result['header']['subsurface'] = True
else:
# Extract all other variables into header
result['header'][key.lower()] = chunk[0].strip().split()[1]
return result
def reservoirLink(lines):
"""
Parse RESERVOIR Link Method
"""
# Constants
KEYWORDS = ('LINK',
'RESERVOIR',
'RES_MINWSE',
'RES_INITWSE',
'RES_MAXWSE',
'RES_NUMPTS',
'LAKE',
'MINWSE',
'INITWSE',
'MAXWSE',
'NUMPTS')
result = {'header': {'link': None,
'res_minwse': None,
'res_initwse': None,
'res_maxwse': None,
'res_numpts': None,
'minwse': None,
'initwse': None,
'maxwse': None,
'numpts': None},
'type': None,
'points': []}
pair = {'i': None,
'j': None}
# Rechunk the chunk
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
schunk = chunk[0].strip().split()
# Cases
if key in ('NUMPTS', 'RES_NUMPTS'):
# Points handler
result['header'][key.lower()] = schunk[1]
# Parse points
for idx in range(1, len(chunk)):
schunk = chunk[idx].strip().split()
for count, ordinate in enumerate(schunk):
# Divide ordinates into ij pairs
if (count % 2) == 0:
pair['i'] = ordinate
else:
pair['j'] = ordinate
result['points'].append(pair)
pair = {'i': None,
'j': None}
elif key in ('LAKE', 'RESERVOIR'):
# Type handler
result['type'] = schunk[0]
else:
# Header variables handler
result['header'][key.lower()] = schunk[1]
return result
def nodeChunk(lines):
"""
Parse NODE Method
"""
# Constants
KEYWORDS = ('NODE',
'X_Y',
'ELEV')
result = {'node': None,
'x': None,
'y': None,
'elev': None}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
schunk = chunk[0].strip().split()
if key == 'X_Y':
result['x'] = schunk[1]
result['y'] = schunk[2]
else:
result[key.lower()] = schunk[1]
return result
def xSectionChunk(lines):
"""
Parse XSEC Method
"""
# Constants
KEYWORDS = ('MANNINGS_N',
'BOTTOM_WIDTH',
'BANKFULL_DEPTH',
'SIDE_SLOPE',
'NPAIRS',
'NUM_INTERP',
'X1',
'ERODE',
'MAX_EROSION',
'SUBSURFACE',
'M_RIVER',
'K_RIVER')
result = {'mannings_n': None,
'bottom_width': None,
'bankfull_depth': None,
'side_slope': None,
'npairs': None,
'num_interp': None,
'erode': False,
'subsurface': False,
'max_erosion': None,
'm_river': None,
'k_river': None,
'breakpoints': []}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Strip and split the line (only one item in each list)
schunk = chunk[0].strip().split()
# Cases
if key == 'X1':
# Extract breakpoint XY pairs
x = schunk[1]
y = schunk[2]
result['breakpoints'].append({'x': x, 'y': y})
if key in ('SUBSURFACE', 'ERODE'):
# Set booleans
result[key.lower()] = True
else:
# Extract value
result[key.lower()] = schunk[1]
return result
def structureChunk(keywords, resultDict, lines):
"""
Parse Weir and Culvert Structures Method
"""
chunks = pt.chunk(keywords, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Strip and split the line (only one item in each list)
schunk = chunk[0].strip().split()
# Extract values and assign to appropriate key in resultDict
resultDict[key.lower()] = schunk[1]
return resultDict
|
|
from .. utils import TranspileTestCase, UnaryOperationTestCase, BinaryOperationTestCase, InplaceOperationTestCase
class FrozensetTests(TranspileTestCase):
def test_creation(self):
# Empty dict
self.assertCodeExecution("""
x = frozenset()
print(x)
x = frozenset({'a'})
print(x)
y = {1,2}
x = frozenset(y)
print(x)
y = {3}
print(x)
y = ['c']
x = frozenset(y)
print(x)
y.append('d')
print(x)
y = (1,2)
x = frozenset(y)
print(x)
y = ()
print(x)
y ='a'
x = frozenset(y)
print(x)
y = y + 'b'
print(x)
""")
def test_contains(self):
# Normal Contains
self.assertCodeExecution("""
x = frozenset()
print(1 in x)
x = frozenset({'a'})
print('a' in x)
print('b' in x)
y = {1,2}
x = frozenset(y)
print(2 in x)
print(3 in x)
y = ['c']
x = frozenset(y)
print('c' in x)
print('a' in x)
y = (1,2)
x = frozenset(y)
print(3 in x)
print(1 in x)
y = 'a'
x = frozenset(y)
print('e' in x)
print('a' in x)
""")
def test_not_contains(self):
# Normal Not Contains
self.assertCodeExecution("""
x = frozenset()
print(1 in x)
x = frozenset({'a'})
print('b' not in x)
print('a' not in x)
y = {1,2}
x = frozenset(y)
print(4 not in x)
print(1 not in x)
y = ['c']
x = frozenset(y)
print('d' not in x)
print('c' not in x)
y = (1,2)
x = frozenset(y)
print(1 not in x)
print(3 not in x)
y = 'a'
x = frozenset(y)
print('a' not in x)
print('d' not in x)
""")
def test_iteration(self):
self.assertCodeExecution("""
x = {1, 2, 3}
y = frozenset()
#We are not printing each element because python and voc store the items in different ways.
for s in x:
print(x.__contains__(s))
for s in y:
print(y.__conatins__(s))
""")
def test_copy(self):
self.assertCodeExecution("""
x = frozenset()
y = x.copy()
print(x is y)
x = frozenset({'a'})
y = x.copy()
print(x is y)
y = {1,2}
x = frozenset(y)
z = x.copy()
print(x is z)
y = ['c']
x = frozenset(y)
z = x.copy()
print(x is z)
y = (1,2)
x = frozenset(y)
z = x.copy()
print(x is z)
y = 'a'
x = frozenset(y)
z = x.copy()
print(x is z)
""")
def test_isdisjoint(self):
self.assertCodeExecution("""
x = frozenset("hello world")
y = set("hello")
z = frozenset()
t = "hello"
w = 2.0
print(x.isdisjoint(y))
print(x.isdisjoint(z))
# iterable test
print(x.isdisjoint(t))
# not-iterable test
try:
print(x.isdisjoint(w))
except TypeError as err:
print(err)
""")
def test_issubset(self):
self.assertCodeExecution("""
a = frozenset('abc')
b = frozenset('abcde')
c = set()
d = 'wxyz'
print(a.issubset(b))
print(a.issubset(a))
print(a.issubset(c))
# iterable test
print(a.issubset(d))
""")
# not-iterable test
self.assertCodeExecution("""
a = frozenset({1, 2, 3})
b = 1
try:
print(a.issubset(b))
except TypeError as err:
print(err)
""")
def test_issuperset(self):
self.assertCodeExecution("""
a = frozenset('abcd')
b = frozenset('ab')
c = set()
d = 'ab'
print(a.issuperset(b))
print(a.issuperset(a))
print(a.issuperset(c))
# iterable test
print(a.issuperset(d))
""")
# not-iterable test
self.assertCodeExecution("""
a = frozenset({1, 2, 3})
b = 1
try:
print(a.issuperset(b))
except TypeError as err:
print(err)
""")
def test_union(self):
self.assertCodeExecution("""
x = frozenset({1, 2, 3})
y = frozenset({3, 4, 5})
z = [5, 6, 7]
w = 1
t = frozenset()
print(x.union(y))
# empty set test
print(x.union(t))
# multiple args test
print(x.union(y, z))
# iterable test
print(x.union(z))
# not-iterable test
try:
print(x.union(w))
except TypeError as err:
print(err)
""")
def test_intersection(self):
self.assertCodeExecution("""
x = frozenset({1, 2, 3})
y = frozenset({2, 3, 4})
z = [3, 6, 7]
w = 1
t = frozenset()
print(x.intersection(y))
# empty set test
print(x.intersection(t))
# multiple args test
print(x.intersection(y, z))
# iterable test
print(x.intersection(z))
# not-iterable test
try:
print(x.intersection(w))
except TypeError as err:
print(err)
""")
def test_difference(self):
self.assertCodeExecution("""
x = frozenset({1, 2, 3})
y = frozenset({2, 3, 4})
z = [1, 6]
w = 1
t = frozenset()
print(x.difference(y))
# empty set test
print(x.difference(t))
# multiple args test
print(x.difference(y, z))
# iterable test
print(x.difference(z))
# not-iterable test
try:
print(x.difference(w))
except TypeError as err:
print(err)
""")
def test_symmetric_difference(self):
self.assertCodeExecution("""
x = frozenset({1, 2, 3})
y = frozenset({2, 3, 4})
z = [1, 6, 7]
w = 1
t = frozenset()
print(x.symmetric_difference(y))
# empty set test
print(x.symmetric_difference(t))
# iterable test
print(x.symmetric_difference(z))
# not-iterable test
try:
print(x.symmetric_difference(w))
except TypeError as err:
print(err)
""")
class UnaryFrozensetOperationTests(UnaryOperationTestCase, TranspileTestCase):
data_type = 'frozenset'
not_implemented = []
class BinaryFrozensetOperationTests(BinaryOperationTestCase, TranspileTestCase):
data_type = 'frozenset'
not_implemented = [
'test_add_class',
'test_and_class',
'test_direct_eq_bool',
'test_direct_eq_bytearray',
'test_direct_eq_bytes',
'test_direct_eq_class',
'test_direct_eq_complex',
'test_direct_eq_dict',
'test_direct_eq_float',
'test_direct_eq_frozenset',
'test_direct_eq_int',
'test_direct_eq_list',
'test_direct_eq_None',
'test_direct_eq_NotImplemented',
'test_direct_eq_range',
'test_direct_eq_set',
'test_direct_eq_slice',
'test_direct_eq_str',
'test_direct_eq_tuple',
'test_direct_ne_bool',
'test_direct_ne_bytearray',
'test_direct_ne_bytes',
'test_direct_ne_class',
'test_direct_ne_complex',
'test_direct_ne_dict',
'test_direct_ne_float',
'test_direct_ne_frozenset',
'test_direct_ne_int',
'test_direct_ne_list',
'test_direct_ne_None',
'test_direct_ne_NotImplemented',
'test_direct_ne_range',
'test_direct_ne_set',
'test_direct_ne_slice',
'test_direct_ne_str',
'test_direct_ne_tuple',
'test_direct_le_bool',
'test_direct_le_bytearray',
'test_direct_le_bytes',
'test_direct_le_class',
'test_direct_le_complex',
'test_direct_le_dict',
'test_direct_le_float',
'test_direct_le_frozenset',
'test_direct_le_int',
'test_direct_le_list',
'test_direct_le_None',
'test_direct_le_NotImplemented',
'test_direct_le_range',
'test_direct_le_set',
'test_direct_le_slice',
'test_direct_le_str',
'test_direct_le_tuple',
'test_direct_lt_bool',
'test_direct_lt_bytearray',
'test_direct_lt_bytes',
'test_direct_lt_class',
'test_direct_lt_complex',
'test_direct_lt_dict',
'test_direct_lt_float',
'test_direct_lt_frozenset',
'test_direct_lt_int',
'test_direct_lt_list',
'test_direct_lt_None',
'test_direct_lt_NotImplemented',
'test_direct_lt_range',
'test_direct_lt_set',
'test_direct_lt_slice',
'test_direct_lt_str',
'test_direct_lt_tuple',
'test_direct_ge_bool',
'test_direct_ge_bytearray',
'test_direct_ge_bytes',
'test_direct_ge_class',
'test_direct_ge_complex',
'test_direct_ge_dict',
'test_direct_ge_float',
'test_direct_ge_frozenset',
'test_direct_ge_int',
'test_direct_ge_list',
'test_direct_ge_None',
'test_direct_ge_NotImplemented',
'test_direct_ge_range',
'test_direct_ge_set',
'test_direct_ge_slice',
'test_direct_ge_str',
'test_direct_ge_tuple',
'test_direct_gt_bool',
'test_direct_gt_bytearray',
'test_direct_gt_bytes',
'test_direct_gt_class',
'test_direct_gt_complex',
'test_direct_gt_dict',
'test_direct_gt_float',
'test_direct_gt_frozenset',
'test_direct_gt_int',
'test_direct_gt_list',
'test_direct_gt_None',
'test_direct_gt_NotImplemented',
'test_direct_gt_range',
'test_direct_gt_set',
'test_direct_gt_slice',
'test_direct_gt_str',
'test_direct_gt_tuple',
'test_eq_class',
'test_floor_divide_bool',
'test_floor_divide_bytearray',
'test_floor_divide_bytes',
'test_floor_divide_class',
'test_floor_divide_dict',
'test_floor_divide_float',
'test_floor_divide_frozenset',
'test_floor_divide_int',
'test_floor_divide_list',
'test_floor_divide_None',
'test_floor_divide_NotImplemented',
'test_floor_divide_range',
'test_floor_divide_set',
'test_floor_divide_slice',
'test_floor_divide_str',
'test_floor_divide_tuple',
'test_ge_class',
'test_gt_class',
'test_le_class',
'test_lshift_bool',
'test_lshift_bytearray',
'test_lshift_bytes',
'test_lshift_class',
'test_lshift_complex',
'test_lshift_dict',
'test_lshift_float',
'test_lshift_frozenset',
'test_lshift_int',
'test_lshift_list',
'test_lshift_None',
'test_lshift_NotImplemented',
'test_lshift_range',
'test_lshift_set',
'test_lshift_slice',
'test_lshift_str',
'test_lshift_tuple',
'test_lt_class',
'test_modulo_bool',
'test_modulo_bytearray',
'test_modulo_bytes',
'test_modulo_class',
'test_modulo_complex',
'test_modulo_dict',
'test_modulo_float',
'test_modulo_frozenset',
'test_modulo_int',
'test_modulo_list',
'test_modulo_None',
'test_modulo_NotImplemented',
'test_modulo_range',
'test_modulo_set',
'test_modulo_slice',
'test_modulo_str',
'test_modulo_tuple',
'test_multiply_class',
'test_ne_bool',
'test_ne_bytearray',
'test_ne_bytes',
'test_ne_class',
'test_ne_complex',
'test_ne_dict',
'test_ne_float',
'test_ne_frozenset',
'test_ne_int',
'test_ne_list',
'test_ne_None',
'test_ne_NotImplemented',
'test_ne_range',
'test_ne_set',
'test_ne_slice',
'test_ne_str',
'test_ne_tuple',
'test_or_class',
'test_power_bool',
'test_power_bytearray',
'test_power_bytes',
'test_power_class',
'test_power_complex',
'test_power_dict',
'test_power_float',
'test_power_frozenset',
'test_power_int',
'test_power_list',
'test_power_None',
'test_power_NotImplemented',
'test_power_range',
'test_power_set',
'test_power_slice',
'test_power_str',
'test_power_tuple',
'test_rshift_bool',
'test_rshift_bytearray',
'test_rshift_bytes',
'test_rshift_class',
'test_rshift_complex',
'test_rshift_dict',
'test_rshift_float',
'test_rshift_frozenset',
'test_rshift_int',
'test_rshift_list',
'test_rshift_None',
'test_rshift_NotImplemented',
'test_rshift_range',
'test_rshift_set',
'test_rshift_slice',
'test_rshift_str',
'test_rshift_tuple',
'test_subscr_bool',
'test_subscr_bytearray',
'test_subscr_bytes',
'test_subscr_class',
'test_subscr_complex',
'test_subscr_dict',
'test_subscr_float',
'test_subscr_frozenset',
'test_subscr_int',
'test_subscr_list',
'test_subscr_None',
'test_subscr_NotImplemented',
'test_subscr_range',
'test_subscr_set',
'test_subscr_slice',
'test_subscr_str',
'test_subscr_tuple',
'test_subtract_class',
'test_true_divide_bool',
'test_true_divide_bytearray',
'test_true_divide_bytes',
'test_true_divide_class',
'test_true_divide_complex',
'test_true_divide_dict',
'test_true_divide_float',
'test_true_divide_frozenset',
'test_true_divide_int',
'test_true_divide_list',
'test_true_divide_None',
'test_true_divide_NotImplemented',
'test_true_divide_range',
'test_true_divide_set',
'test_true_divide_slice',
'test_true_divide_str',
'test_true_divide_tuple',
'test_xor_class',
]
class InplaceFrozensetOperationTests(InplaceOperationTestCase, TranspileTestCase):
data_type = 'frozenset'
not_implemented = [
'test_add_bool',
'test_add_bytearray',
'test_add_bytes',
'test_add_class',
'test_add_complex',
'test_add_dict',
'test_add_float',
'test_add_frozenset',
'test_add_int',
'test_add_list',
'test_add_None',
'test_add_NotImplemented',
'test_add_range',
'test_add_set',
'test_add_slice',
'test_add_str',
'test_add_tuple',
'test_and_bool',
'test_and_bytearray',
'test_and_bytes',
'test_and_class',
'test_and_complex',
'test_and_dict',
'test_and_float',
'test_and_frozenset',
'test_and_int',
'test_and_list',
'test_and_None',
'test_and_NotImplemented',
'test_and_range',
'test_and_set',
'test_and_slice',
'test_and_str',
'test_and_tuple',
'test_floor_divide_bool',
'test_floor_divide_bytearray',
'test_floor_divide_bytes',
'test_floor_divide_class',
'test_floor_divide_complex',
'test_floor_divide_dict',
'test_floor_divide_float',
'test_floor_divide_frozenset',
'test_floor_divide_int',
'test_floor_divide_list',
'test_floor_divide_None',
'test_floor_divide_NotImplemented',
'test_floor_divide_range',
'test_floor_divide_set',
'test_floor_divide_slice',
'test_floor_divide_str',
'test_floor_divide_tuple',
'test_lshift_bool',
'test_lshift_bytearray',
'test_lshift_bytes',
'test_lshift_class',
'test_lshift_complex',
'test_lshift_dict',
'test_lshift_float',
'test_lshift_frozenset',
'test_lshift_int',
'test_lshift_list',
'test_lshift_None',
'test_lshift_NotImplemented',
'test_lshift_range',
'test_lshift_set',
'test_lshift_slice',
'test_lshift_str',
'test_lshift_tuple',
'test_modulo_bool',
'test_modulo_bytearray',
'test_modulo_bytes',
'test_modulo_class',
'test_modulo_complex',
'test_modulo_dict',
'test_modulo_float',
'test_modulo_frozenset',
'test_modulo_int',
'test_modulo_list',
'test_modulo_None',
'test_modulo_NotImplemented',
'test_modulo_range',
'test_modulo_set',
'test_modulo_slice',
'test_modulo_str',
'test_modulo_tuple',
'test_multiply_bool',
'test_multiply_bytearray',
'test_multiply_bytes',
'test_multiply_class',
'test_multiply_complex',
'test_multiply_dict',
'test_multiply_float',
'test_multiply_frozenset',
'test_multiply_int',
'test_multiply_list',
'test_multiply_None',
'test_multiply_NotImplemented',
'test_multiply_range',
'test_multiply_set',
'test_multiply_slice',
'test_multiply_str',
'test_multiply_tuple',
'test_or_bool',
'test_or_bytearray',
'test_or_bytes',
'test_or_class',
'test_or_complex',
'test_or_dict',
'test_or_float',
'test_or_frozenset',
'test_or_int',
'test_or_list',
'test_or_None',
'test_or_NotImplemented',
'test_or_range',
'test_or_set',
'test_or_slice',
'test_or_str',
'test_or_tuple',
'test_power_bool',
'test_power_bytearray',
'test_power_bytes',
'test_power_class',
'test_power_complex',
'test_power_dict',
'test_power_float',
'test_power_frozenset',
'test_power_int',
'test_power_list',
'test_power_None',
'test_power_NotImplemented',
'test_power_range',
'test_power_set',
'test_power_slice',
'test_power_str',
'test_power_tuple',
'test_rshift_bool',
'test_rshift_bytearray',
'test_rshift_bytes',
'test_rshift_class',
'test_rshift_complex',
'test_rshift_dict',
'test_rshift_float',
'test_rshift_frozenset',
'test_rshift_int',
'test_rshift_list',
'test_rshift_None',
'test_rshift_NotImplemented',
'test_rshift_range',
'test_rshift_set',
'test_rshift_slice',
'test_rshift_str',
'test_rshift_tuple',
'test_subtract_bool',
'test_subtract_bytearray',
'test_subtract_bytes',
'test_subtract_class',
'test_subtract_complex',
'test_subtract_dict',
'test_subtract_float',
'test_subtract_frozenset',
'test_subtract_int',
'test_subtract_list',
'test_subtract_None',
'test_subtract_NotImplemented',
'test_subtract_range',
'test_subtract_set',
'test_subtract_slice',
'test_subtract_str',
'test_subtract_tuple',
'test_true_divide_bool',
'test_true_divide_bytearray',
'test_true_divide_bytes',
'test_true_divide_class',
'test_true_divide_complex',
'test_true_divide_dict',
'test_true_divide_float',
'test_true_divide_frozenset',
'test_true_divide_int',
'test_true_divide_list',
'test_true_divide_None',
'test_true_divide_NotImplemented',
'test_true_divide_range',
'test_true_divide_set',
'test_true_divide_slice',
'test_true_divide_str',
'test_true_divide_tuple',
'test_xor_bool',
'test_xor_bytearray',
'test_xor_bytes',
'test_xor_class',
'test_xor_complex',
'test_xor_dict',
'test_xor_float',
'test_xor_frozenset',
'test_xor_int',
'test_xor_list',
'test_xor_None',
'test_xor_NotImplemented',
'test_xor_range',
'test_xor_set',
'test_xor_slice',
'test_xor_str',
'test_xor_tuple',
]
|
|
'''
Code generator for sprite sheets.
'''
import genutil as util
from util import png
import os
Version = 6
#-------------------------------------------------------------------------------
class Sprite :
def __init__(self) :
self.name = ''
self.x = 0
self.y = 0
self.w = 0
self.h = 0
self.frames = 1
self.anim = ''
self.char = None
#-------------------------------------------------------------------------------
class SpriteSheet :
def __init__(self, input, out_src, out_hdr) :
self.input = input
self.out_src = out_src
self.out_hdr = out_hdr
self.ns = ''
self.imagePath = ''
self.imageWidth = 0
self.imageHeight = 0
self.clampWidth = 0
self.clampHeight = 0
self.imagePixels = None
self.imageInfo = None
self.defSpriteWidth = 0
self.defSpriteHeight = 0
self.sprites = []
def namespace(self, ns) :
self.ns = ns
def image(self, img) :
self.imagePath = os.path.dirname(self.input) + '/' + img
def clampImageSize(self, w, h) :
self.clampWidth = w
self.clampHeight = h
def defaultSpriteSize(self, w, h) :
self.defSpriteWidth = w
self.defSpriteHeight = h
def sprite(self, name, x, y, w=0, h=0, frames=1, anim='none', char=None) :
sprite = Sprite()
sprite.name = name
sprite.x = x
sprite.y = y
sprite.w = w if w != 0 else self.defSpriteWidth
sprite.h = h if h != 0 else self.defSpriteHeight
sprite.frames = frames
sprite.anim = anim
sprite.char = char
self.sprites.append(sprite)
def loadImage(self) :
pngReader = png.Reader(self.imagePath)
img = pngReader.asRGBA8()
self.imageWidth = img[0]
self.imageHeight = img[1]
if self.clampWidth > 0 and self.clampWidth < self.imageWidth :
self.imageWidth = self.clampWidth
print 'Clamped image width to {}'.format(self.imageWidth)
if self.clampHeight > 0 and self.clampHeight < self.imageHeight :
self.imageHeight = self.clampHeight
print 'Clamped image height to {}'.format(self.imageHeight)
self.imagePixels = img[2]
self.imageInfo = img[3]
def buildCharMap(self) :
charMap = [None] * 256
for sprite in self.sprites :
if sprite.char is not None:
ascii = ord(sprite.char[0])
if ascii < 256 :
charMap[ascii] = sprite
return charMap
def writeHeaderTop(self, f) :
f.write('#pragma once\n')
f.write('//-----------------------------------------------------------------------------\n')
f.write('/* #version:{}#\n'.format(Version))
f.write(' machine generated, do not edit!\n')
f.write('*/\n')
f.write('#include "Core/Types.h"\n')
f.write('namespace ' + self.ns + ' {\n')
def writeHeaderBottom(self, f) :
f.write('}\n')
f.write('\n')
def writeSpriteSheet(self, f) :
numPixels = self.imageWidth * self.imageHeight
numBytes = numPixels * 4
f.write('struct Sheet {\n')
f.write(' static const Oryol::int32 Width{' + str(self.imageWidth) + '};\n')
f.write(' static const Oryol::int32 Height{' + str(self.imageHeight) + '};\n')
f.write(' static const Oryol::int32 NumBytes{' + str(numBytes) + '};\n')
f.write(' static const Oryol::uint32 Pixels[{}];\n'.format(numPixels))
f.write(' enum SpriteId {\n')
for sprite in self.sprites :
f.write(' ' + sprite.name + ',\n')
f.write('\n')
f.write(' NumSprites,\n')
f.write(' InvalidSprite\n')
f.write(' };\n')
f.write(' class Anim {\n')
f.write(' public:\n')
f.write(' enum Code {\n')
f.write(' None,\n')
f.write(' Loop,\n')
f.write(' PingPong,\n')
f.write(' Clamp,\n')
f.write(' };\n')
f.write(' };\n')
f.write(' static const SpriteId CharMap[256];\n')
f.write(' static const struct sprite {\n')
f.write(' SpriteId id;\n')
f.write(' Oryol::int32 X;\n')
f.write(' Oryol::int32 Y;\n')
f.write(' Oryol::int32 W;\n')
f.write(' Oryol::int32 H;\n')
f.write(' Oryol::int32 NumFrames;\n')
f.write(' Anim::Code AnimType;\n')
f.write(' Oryol::uint8 Char;\n')
f.write(' } Sprite[NumSprites];\n')
f.write('};\n')
def genHeader(self, absHeaderPath) :
f = open(absHeaderPath, 'w')
self.writeHeaderTop(f)
self.writeSpriteSheet(f)
self.writeHeaderBottom(f)
f.close()
def writeSourceTop(self, f, absSourcePath) :
path, hdrFileAndExt = os.path.split(absSourcePath)
hdrFile, ext = os.path.splitext(hdrFileAndExt)
f.write('//-----------------------------------------------------------------------------\n')
f.write('// #version:{}# machine generated, do not edit!\n'.format(Version))
f.write('//-----------------------------------------------------------------------------\n')
f.write('#include "Pre.h"\n')
f.write('#include "' + hdrFile + '.h"\n')
f.write('\n')
f.write('namespace ' + self.ns + ' {\n')
def writeImageData(self, f) :
width = self.imageWidth
height = self.imageHeight
numPixels = width * height
f.write('const Oryol::uint32 Sheet::Pixels[' + str(numPixels) + '] = {\n')
for y,row in enumerate(self.imagePixels) :
if y < self.imageHeight :
f.write(' ')
for x in xrange(0, width) :
offset = x * 4
r = row[offset]
g = row[offset + 1]
b = row[offset + 2]
a = row[offset + 3]
f.write('0x{:02x}{:02x}{:02x}{:02x},'.format(a, b, g, r))
f.write('\n')
f.write('};\n')
def writeSpriteData(self, f) :
mapAnimType = {
'': 'None',
'none': 'None',
'loop': 'Loop',
'pingpong': 'PingPong',
'clamp': 'Clamp'
}
f.write('const Sheet::sprite Sheet::Sprite[Sheet::NumSprites] = {\n')
for sprite in self.sprites :
name = str(sprite.name)
x = str(sprite.x)
y = str(sprite.y)
w = str(sprite.w)
h = str(sprite.h)
frs = str(sprite.frames)
anm = 'Sheet::Anim::' + mapAnimType[sprite.anim]
char = 0
if sprite.char is not None :
char = ord(sprite.char[0])
char = str(char)
f.write(' { '+name+','+x+','+y+','+w+','+h+','+frs+','+anm+','+char+' },\n')
f.write('};\n')
def writeSpriteCharMap(self, f) :
f.write('const Sheet::SpriteId Sheet::CharMap[256] = {\n')
charMap = self.buildCharMap()
for sprite in charMap :
if sprite is None :
f.write(' Sheet::InvalidSprite,\n')
else :
f.write(' Sheet::' + sprite.name + ',\n')
f.write('};\n')
def writeSourceBottom(self, f) :
f.write('}\n')
f.write('\n')
def genSource(self, absSourcePath) :
f = open(absSourcePath, 'w')
self.writeSourceTop(f, absSourcePath)
self.writeImageData(f)
self.writeSpriteData(f)
self.writeSpriteCharMap(f)
self.writeSourceBottom(f)
f.close()
#-------------------------------------------------------------------------------
def generate(self) :
if util.isDirty(Version, [self.input, self.imagePath], [self.out_src, self.out_hdr]) :
self.loadImage()
self.genHeader(self.out_hdr)
self.genSource(self.out_src)
|
|
#!/usr/bin/python2
from __future__ import print_function
import unittest
import logging
logging.basicConfig(level=logging.INFO)
import datetime
import gzip
import os
# own modules
from Timeseries import Timeseries as Timeseries
from TimeseriesArray import TimeseriesArray as TimeseriesArray
from TimeseriesArrayStats import TimeseriesArrayStats as TimeseriesArrayStats
meta2 = {
"blacklist": [],
"delimiter": "\t",
"headers": [
"fcIfC3Discards",
"fcIfC3InFrames",
"fcIfC3InOctets",
"fcIfC3OutFrames",
"fcIfC3OutOctets",
"hostname",
"ifDescr",
"index",
"ts"
],
"index_keynames": [
"hostname",
"ifDescr"
],
"interval": 300,
"ts_keyname": "ts",
"value_keynames": {
"fcIfC3Discards": "counter64",
"fcIfC3InFrames": "counter64",
"fcIfC3InOctets": "counter64",
"fcIfC3OutFrames": "counter64",
"fcIfC3OutOctets": "counter64",
"index": "asis"
}
}
meta = {
"blacklist": [],
"delimiter": "\t",
"headers": [
"ts",
"hostname",
"com_select",
"uptime",
"com_insert",
"slow_queries",
"bytes_sent",
"com_update",
"connections",
"com_delete",
"qcache_hits",
"questions",
"opened_tables",
"aborted_connects",
"bytes_received",
"created_tmp_tables",
"created_tmp_disk_tables",
"aborted_clients"
],
"index_keynames": [
"hostname"
],
"interval": 300,
"ts_keyname": "ts",
"value_keynames": {
"com_select": "persecond",
"uptime": "asis",
"com_insert": "persecond",
"slow_queries": "asis",
"bytes_sent": "persecond",
"com_update": "persecond",
"connections": "persecond",
"com_delete": "persecond",
"qcache_hits": "persecond",
"questions": "persecond",
"opened_tables": "asis",
"aborted_connects": "persecond",
"bytes_received": "persecond",
"created_tmp_tables": "persecond",
"created_tmp_disk_tables": "persecond",
"aborted_clients": "persecond",
}
}
class Test(unittest.TestCase):
def setUp(self):
self.basedir = "/var/rrd"
self.datestring = "2015-11-30"
self.testfile = "testdata/fcIfC3AccountingTable"
index_keys = (u"hostname", ) # the unicode is important
self.app = TimeseriesArray.load("testdata/", meta["index_keynames"], datatypes=meta["value_keynames"])
def test_str(self):
print("testing __str__")
print(self.app)
def getitem_grouped__(self, key):
pass
def test_items(self):
print("testing items")
for key, value in self.app.items():
assert isinstance(key, tuple)
assert isinstance(value, Timeseries)
def test_values(self):
print("testing values")
for value in self.app.values():
assert isinstance(value, Timeseries)
def test_keys(self):
print("testing keys")
for key in self.app.keys():
assert isinstance(key, tuple)
def test_getitem(self):
print("testing __getitem__")
key = ('nagios.tilak.cc',)
ts = self.app[key]
assert ts[0][1] == 13353334.0
def test_get_index_dict(self):
print("testing get_index_dict")
key = ('nagios.tilak.cc',)
index_dict = self.app.get_index_dict(key)
assert index_dict["hostname"] == key[0]
def test_index_keynames(self):
print("testing index_keynames")
assert self.app.index_keynames == ('hostname',)
def test_value_keynames(self):
print("testing value_keynames")
assert self.app.value_keynames == ['com_select', 'uptime', 'com_insert', 'slow_queries', 'bytes_sent', 'com_update', 'connections', 'com_delete', 'qcache_hits', 'questions', 'opened_tables', 'aborted_connects', 'bytes_received', 'created_tmp_tables', 'created_tmp_disk_tables', 'aborted_clients']
def test_ts_key(self):
print("testing ts_key")
assert self.app.ts_key == "ts"
def test_stats(self):
print("testing stats")
stats = self.app.stats
assert isinstance(stats, TimeseriesArrayStats)
def test_cache(self):
print("testing cache")
# this test app is initialized without caching
assert self.app.cache == False
def set_group_keyname(self, index_keyname, group_func):
pass
def test_to_float(self):
print("testing to_float")
test = self.app.to_float("3.14255469657")
assert test == 3.14255469657
test = self.app.to_float("3,14255469657")
assert test == 3.14255469657
def add(self, data, group_func=None):
pass
def group_add(self, data, group_func):
pass
def append(self, key, timeserie):
pass
def groupby(self, fieldnum, group_func, time_func="avg"):
pass
def test_convert(self):
print("testing load, cache, convert, slice")
tsa = TimeseriesArray.load("testdata/", meta["index_keynames"], datatypes=meta["value_keynames"])
tsa.cache = True # thats crucial, otherwise every timeseries will alwys be read from disk
tsa.convert(colname="uptime", datatype="persecond", newcolname="uptime_persecond")
tsa.convert("uptime", "derive", "uptime_derive")
tsa.convert("uptime", "percent", "uptime_percent")
tsa.convert("com_select", "counter32", "com_select_counter32")
tsa.convert("com_select", "counter64", "com_select_counter64")
tsa.convert("com_select", "gauge32", "com_select_gauge32")
tsa.convert("com_select", "counterreset", "com_select_counterreset")
assert all(newcol in tsa.value_keynames for newcol in ("uptime_persecond", "uptime_derive", "uptime_percent", "com_select_counter32", "com_select_counter64", "com_select_gauge32", "com_select_counterreset"))
tsa1 = tsa.slice(("uptime", "uptime_persecond", "com_select_gauge32"))
print(tsa1[('nagios.tilak.cc',)])
def test_add_calc_col_single(self):
print("testing load, cache, add_calc_col_single, slice")
tsa = TimeseriesArray.load("testdata/", meta["index_keynames"], datatypes=meta["value_keynames"])
tsa.cache = True
tsa.add_calc_col_single("bytes_received", "kbytes_received", lambda a: a / 8)
tsa1 = tsa.slice(("bytes_received", "kbytes_received"))
print(tsa1[('nagios.tilak.cc',)])
def test_add_calc_col_full(self):
print("testing load, add_calc_col_full, cache, slice")
tsa = TimeseriesArray.load("testdata/", meta["index_keynames"], datatypes=meta["value_keynames"])
tsa.cache = True
tsa.add_calc_col_full("kbytes_total", lambda a: (a["bytes_received"] + a["bytes_sent"]) / 8)
tsa1 = tsa.slice(("bytes_received", "bytes_sent", "kbytes_total"))
print(tsa1[('nagios.tilak.cc',)])
def test_remove_col(self):
print("testing remove_col, load, cache")
tsa = TimeseriesArray.load("testdata/", meta["index_keynames"], datatypes=meta["value_keynames"])
tsa.cache = True
tsa.remove_col("uptime")
assert "uptime" not in tsa.value_keynames
try:
tsa.remove_col("unknown")
except KeyError:
# should raise this exception
pass
def test_slice(self):
print("testing slice")
tsa = self.app.slice(("uptime",))
# there should only 2 columns left
assert len(tsa[('nagios.tilak.cc',)][0]) == 2
# check latets uptime value
assert tsa[('nagios.tilak.cc',)][-1][1] == 13439433.0
def test_export(self):
print("testing export, add")
tsa = TimeseriesArray(meta["index_keynames"], meta["value_keynames"], "ts", datatypes=meta["value_keynames"])
for entry in self.app.export():
tsa.add(entry)
assert tsa == self.app
def test_dump(self):
print("testing dump, get_ts_dumpfilename, __eq__")
testdir = "testdata/tsa_testdump"
if not os.path.isdir(testdir):
os.mkdir(testdir)
tsa = TimeseriesArray.load("testdata/fcIfC3AccountingTable", meta2["index_keynames"], datatypes=meta2["value_keynames"], filterkeys=None, index_pattern=None, matchtype="and")
tsa.dump(testdir, overwrite=True)
tsa1 = TimeseriesArray.load(testdir, meta2["index_keynames"], datatypes=meta2["value_keynames"], filterkeys=None, index_pattern=None, matchtype="and")
assert tsa == tsa1
def test_load(self):
print("testing load, get_ts_filename, filtermatch, get_dumpfilename")
tsa = TimeseriesArray.load("testdata/fcIfC3AccountingTable", meta2["index_keynames"], datatypes=meta2["value_keynames"], filterkeys=None, index_pattern=None, matchtype="and")
#for key in tsa.keys():
# print(key)
# loading only Timeseries matching this index key
filterkeys = {"hostname" : "fca-sr2-8gb-21", "ifDescr" : None}
tsa = TimeseriesArray.load("testdata/fcIfC3AccountingTable", meta2["index_keynames"], datatypes=meta2["value_keynames"], filterkeys=filterkeys, index_pattern=None, matchtype="and")
for key in tsa.keys():
assert key[0] == "fca-sr2-8gb-21"
# loading only Timeseries matching this index key
filterkeys = {"hostname" : None, "ifDescr" : "port-channel 1"}
tsa = TimeseriesArray.load("testdata/fcIfC3AccountingTable", meta2["index_keynames"], datatypes=meta2["value_keynames"], filterkeys=filterkeys, index_pattern=None, matchtype="and")
for key in tsa.keys():
assert key[1] == "port-channel 1"
# loading only Timeseries matching this index key using or
filterkeys = {"hostname" : "fca-sr2-8gb-21", "ifDescr" : "port-channel 1"}
tsa = TimeseriesArray.load("testdata/fcIfC3AccountingTable", meta2["index_keynames"], datatypes=meta2["value_keynames"], filterkeys=filterkeys, index_pattern=None, matchtype="or")
for key in tsa.keys():
assert key[1] == "port-channel 1" or key[0] == "fca-sr2-8gb-21"
# using regular expression to filter some index_keys
tsa = TimeseriesArray.load("testdata/fcIfC3AccountingTable", meta2["index_keynames"], datatypes=meta2["value_keynames"], filterkeys=None, index_pattern="(.*)fca-(.*)", matchtype="and")
for key in tsa.keys():
assert "fca-" in str(key)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
unittest.main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SignatureDef utility functions implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import utils
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export(
'saved_model.build_signature_def',
v1=[
'saved_model.build_signature_def',
'saved_model.signature_def_utils.build_signature_def'
])
@deprecation.deprecated_endpoints(
'saved_model.signature_def_utils.build_signature_def')
def build_signature_def(inputs=None, outputs=None, method_name=None):
"""Utility function to build a SignatureDef protocol buffer.
Args:
inputs: Inputs of the SignatureDef defined as a proto map of string to
tensor info.
outputs: Outputs of the SignatureDef defined as a proto map of string to
tensor info.
method_name: Method name of the SignatureDef as a string.
Returns:
A SignatureDef protocol buffer constructed based on the supplied arguments.
"""
signature_def = meta_graph_pb2.SignatureDef()
if inputs is not None:
for item in inputs:
signature_def.inputs[item].CopyFrom(inputs[item])
if outputs is not None:
for item in outputs:
signature_def.outputs[item].CopyFrom(outputs[item])
if method_name is not None:
signature_def.method_name = method_name
return signature_def
@tf_export(
'saved_model.regression_signature_def',
v1=[
'saved_model.regression_signature_def',
'saved_model.signature_def_utils.regression_signature_def'
])
@deprecation.deprecated_endpoints(
'saved_model.signature_def_utils.regression_signature_def')
def regression_signature_def(examples, predictions):
"""Creates regression signature from given examples and predictions.
This function produces signatures intended for use with the TensorFlow Serving
Regress API (tensorflow_serving/apis/prediction_service.proto), and so
constrains the input and output types to those allowed by TensorFlow Serving.
Args:
examples: A string `Tensor`, expected to accept serialized tf.Examples.
predictions: A float `Tensor`.
Returns:
A regression-flavored signature_def.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('Regression examples cannot be None.')
if not isinstance(examples, ops.Tensor):
raise ValueError('Regression examples must be a string Tensor.')
if predictions is None:
raise ValueError('Regression predictions cannot be None.')
input_tensor_info = utils.build_tensor_info(examples)
if input_tensor_info.dtype != types_pb2.DT_STRING:
raise ValueError('Regression examples must be a string Tensor.')
signature_inputs = {signature_constants.REGRESS_INPUTS: input_tensor_info}
output_tensor_info = utils.build_tensor_info(predictions)
if output_tensor_info.dtype != types_pb2.DT_FLOAT:
raise ValueError('Regression output must be a float Tensor.')
signature_outputs = {signature_constants.REGRESS_OUTPUTS: output_tensor_info}
signature_def = build_signature_def(
signature_inputs, signature_outputs,
signature_constants.REGRESS_METHOD_NAME)
return signature_def
@tf_export(
'saved_model.classification_signature_def',
v1=[
'saved_model.classification_signature_def',
'saved_model.signature_def_utils.classification_signature_def'
])
@deprecation.deprecated_endpoints(
'saved_model.signature_def_utils.classification_signature_def')
def classification_signature_def(examples, classes, scores):
"""Creates classification signature from given examples and predictions.
This function produces signatures intended for use with the TensorFlow Serving
Classify API (tensorflow_serving/apis/prediction_service.proto), and so
constrains the input and output types to those allowed by TensorFlow Serving.
Args:
examples: A string `Tensor`, expected to accept serialized tf.Examples.
classes: A string `Tensor`. Note that the ClassificationResponse message
requires that class labels are strings, not integers or anything else.
scores: a float `Tensor`.
Returns:
A classification-flavored signature_def.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('Classification examples cannot be None.')
if not isinstance(examples, ops.Tensor):
raise ValueError('Classification examples must be a string Tensor.')
if classes is None and scores is None:
raise ValueError('Classification classes and scores cannot both be None.')
input_tensor_info = utils.build_tensor_info(examples)
if input_tensor_info.dtype != types_pb2.DT_STRING:
raise ValueError('Classification examples must be a string Tensor.')
signature_inputs = {signature_constants.CLASSIFY_INPUTS: input_tensor_info}
signature_outputs = {}
if classes is not None:
classes_tensor_info = utils.build_tensor_info(classes)
if classes_tensor_info.dtype != types_pb2.DT_STRING:
raise ValueError('Classification classes must be a string Tensor.')
signature_outputs[signature_constants.CLASSIFY_OUTPUT_CLASSES] = (
classes_tensor_info)
if scores is not None:
scores_tensor_info = utils.build_tensor_info(scores)
if scores_tensor_info.dtype != types_pb2.DT_FLOAT:
raise ValueError('Classification scores must be a float Tensor.')
signature_outputs[signature_constants.CLASSIFY_OUTPUT_SCORES] = (
scores_tensor_info)
signature_def = build_signature_def(
signature_inputs, signature_outputs,
signature_constants.CLASSIFY_METHOD_NAME)
return signature_def
@tf_export(
'saved_model.predict_signature_def',
v1=[
'saved_model.predict_signature_def',
'saved_model.signature_def_utils.predict_signature_def'
])
@deprecation.deprecated_endpoints(
'saved_model.signature_def_utils.predict_signature_def')
def predict_signature_def(inputs, outputs):
"""Creates prediction signature from given inputs and outputs.
This function produces signatures intended for use with the TensorFlow Serving
Predict API (tensorflow_serving/apis/prediction_service.proto). This API
imposes no constraints on the input and output types.
Args:
inputs: dict of string to `Tensor`.
outputs: dict of string to `Tensor`.
Returns:
A prediction-flavored signature_def.
Raises:
ValueError: If inputs or outputs is `None`.
"""
if inputs is None or not inputs:
raise ValueError('Prediction inputs cannot be None or empty.')
if outputs is None or not outputs:
raise ValueError('Prediction outputs cannot be None or empty.')
signature_inputs = {key: utils.build_tensor_info(tensor)
for key, tensor in inputs.items()}
signature_outputs = {key: utils.build_tensor_info(tensor)
for key, tensor in outputs.items()}
signature_def = build_signature_def(
signature_inputs, signature_outputs,
signature_constants.PREDICT_METHOD_NAME)
return signature_def
def supervised_train_signature_def(
inputs, loss, predictions=None, metrics=None):
return _supervised_signature_def(
signature_constants.SUPERVISED_TRAIN_METHOD_NAME, inputs, loss=loss,
predictions=predictions, metrics=metrics)
def supervised_eval_signature_def(
inputs, loss, predictions=None, metrics=None):
return _supervised_signature_def(
signature_constants.SUPERVISED_EVAL_METHOD_NAME, inputs, loss=loss,
predictions=predictions, metrics=metrics)
def _supervised_signature_def(
method_name, inputs, loss=None, predictions=None,
metrics=None):
"""Creates a signature for training and eval data.
This function produces signatures that describe the inputs and outputs
of a supervised process, such as training or evaluation, that
results in loss, metrics, and the like. Note that this function only requires
inputs to be not None.
Args:
method_name: Method name of the SignatureDef as a string.
inputs: dict of string to `Tensor`.
loss: dict of string to `Tensor` representing computed loss.
predictions: dict of string to `Tensor` representing the output predictions.
metrics: dict of string to `Tensor` representing metric ops.
Returns:
A train- or eval-flavored signature_def.
Raises:
ValueError: If inputs or outputs is `None`.
"""
if inputs is None or not inputs:
raise ValueError('{} inputs cannot be None or empty.'.format(method_name))
signature_inputs = {key: utils.build_tensor_info(tensor)
for key, tensor in inputs.items()}
signature_outputs = {}
for output_set in (loss, predictions, metrics):
if output_set is not None:
sig_out = {key: utils.build_tensor_info(tensor)
for key, tensor in output_set.items()}
signature_outputs.update(sig_out)
signature_def = build_signature_def(
signature_inputs, signature_outputs, method_name)
return signature_def
@tf_export(
'saved_model.is_valid_signature',
v1=[
'saved_model.is_valid_signature',
'saved_model.signature_def_utils.is_valid_signature'
])
@deprecation.deprecated_endpoints(
'saved_model.signature_def_utils.is_valid_signature')
def is_valid_signature(signature_def):
"""Determine whether a SignatureDef can be served by TensorFlow Serving."""
if signature_def is None:
return False
return (_is_valid_classification_signature(signature_def) or
_is_valid_regression_signature(signature_def) or
_is_valid_predict_signature(signature_def))
def _is_valid_predict_signature(signature_def):
"""Determine whether the argument is a servable 'predict' SignatureDef."""
if signature_def.method_name != signature_constants.PREDICT_METHOD_NAME:
return False
if not signature_def.inputs.keys():
return False
if not signature_def.outputs.keys():
return False
return True
def _is_valid_regression_signature(signature_def):
"""Determine whether the argument is a servable 'regress' SignatureDef."""
if signature_def.method_name != signature_constants.REGRESS_METHOD_NAME:
return False
if (set(signature_def.inputs.keys())
!= set([signature_constants.REGRESS_INPUTS])):
return False
if (signature_def.inputs[signature_constants.REGRESS_INPUTS].dtype !=
types_pb2.DT_STRING):
return False
if (set(signature_def.outputs.keys())
!= set([signature_constants.REGRESS_OUTPUTS])):
return False
if (signature_def.outputs[signature_constants.REGRESS_OUTPUTS].dtype !=
types_pb2.DT_FLOAT):
return False
return True
def _is_valid_classification_signature(signature_def):
"""Determine whether the argument is a servable 'classify' SignatureDef."""
if signature_def.method_name != signature_constants.CLASSIFY_METHOD_NAME:
return False
if (set(signature_def.inputs.keys())
!= set([signature_constants.CLASSIFY_INPUTS])):
return False
if (signature_def.inputs[signature_constants.CLASSIFY_INPUTS].dtype !=
types_pb2.DT_STRING):
return False
allowed_outputs = set([signature_constants.CLASSIFY_OUTPUT_CLASSES,
signature_constants.CLASSIFY_OUTPUT_SCORES])
if not signature_def.outputs.keys():
return False
if set(signature_def.outputs.keys()) - allowed_outputs:
return False
if (signature_constants.CLASSIFY_OUTPUT_CLASSES in signature_def.outputs
and
signature_def.outputs[signature_constants.CLASSIFY_OUTPUT_CLASSES].dtype
!= types_pb2.DT_STRING):
return False
if (signature_constants.CLASSIFY_OUTPUT_SCORES in signature_def.outputs
and
signature_def.outputs[signature_constants.CLASSIFY_OUTPUT_SCORES].dtype !=
types_pb2.DT_FLOAT):
return False
return True
|
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from WebIDL import IDLInterface, IDLExternalInterface
autogenerated_comment = "/* THIS FILE IS AUTOGENERATED - DO NOT EDIT */\n"
class Configuration:
"""
Represents global configuration state based on IDL parse data and
the configuration file.
"""
def __init__(self, filename, parseData):
# Read the configuration file.
glbl = {}
execfile(filename, glbl)
config = glbl['DOMInterfaces']
# Build descriptors for all the interfaces we have in the parse data.
# This allows callers to specify a subset of interfaces by filtering
# |parseData|.
self.descriptors = []
self.interfaces = {}
self.maxProtoChainLength = 0;
for thing in parseData:
# Some toplevel things are sadly types, and those have an
# isInterface that doesn't mean the same thing as IDLObject's
# isInterface()...
if (not isinstance(thing, IDLInterface) and
not isinstance(thing, IDLExternalInterface)):
continue
iface = thing
self.interfaces[iface.identifier.name] = iface
if iface.identifier.name not in config:
# Completely skip consequential interfaces with no descriptor
# because chances are we don't need to do anything interesting
# with them.
if iface.isConsequential():
continue
entry = {}
else:
entry = config[iface.identifier.name]
if not isinstance(entry, list):
assert isinstance(entry, dict)
entry = [entry]
elif len(entry) == 1 and entry[0].get("workers", False):
# List with only a workers descriptor means we should
# infer a mainthread descriptor. If you want only
# workers bindings, don't use a list here.
entry.append({})
self.descriptors.extend([Descriptor(self, iface, x) for x in entry])
# Mark the descriptors for which only a single nativeType implements
# an interface.
for descriptor in self.descriptors:
intefaceName = descriptor.interface.identifier.name
otherDescriptors = [d for d in self.descriptors
if d.interface.identifier.name == intefaceName]
descriptor.uniqueImplementation = len(otherDescriptors) == 1
self.enums = [e for e in parseData if e.isEnum()]
self.dictionaries = [d for d in parseData if d.isDictionary()]
# Keep the descriptor list sorted for determinism.
self.descriptors.sort(lambda x,y: cmp(x.name, y.name))
def getInterface(self, ifname):
return self.interfaces[ifname]
def getDescriptors(self, **filters):
"""Gets the descriptors that match the given filters."""
curr = self.descriptors
for key, val in filters.iteritems():
if key == 'webIDLFile':
getter = lambda x: x.interface.filename()
elif key == 'hasInterfaceObject':
getter = lambda x: (not x.interface.isExternal() and
x.interface.hasInterfaceObject())
elif key == 'hasInterfacePrototypeObject':
getter = lambda x: (not x.interface.isExternal() and
x.interface.hasInterfacePrototypeObject())
elif key == 'hasInterfaceOrInterfacePrototypeObject':
getter = lambda x: x.hasInterfaceOrInterfacePrototypeObject()
elif key == 'isCallback':
getter = lambda x: x.interface.isCallback()
elif key == 'isExternal':
getter = lambda x: x.interface.isExternal()
else:
getter = lambda x: getattr(x, key)
curr = filter(lambda x: getter(x) == val, curr)
return curr
def getEnums(self, webIDLFile):
return filter(lambda e: e.filename() == webIDLFile, self.enums)
def getDictionaries(self, webIDLFile):
return filter(lambda d: d.filename() == webIDLFile, self.dictionaries)
def getDescriptor(self, interfaceName, workers):
"""
Gets the appropriate descriptor for the given interface name
and the given workers boolean.
"""
iface = self.getInterface(interfaceName)
descriptors = self.getDescriptors(interface=iface)
# The only filter we currently have is workers vs non-workers.
matches = filter(lambda x: x.workers is workers, descriptors)
# After filtering, we should have exactly one result.
if len(matches) is not 1:
raise NoSuchDescriptorError("For " + interfaceName + " found " +
str(len(matches)) + " matches");
return matches[0]
def getDescriptorProvider(self, workers):
"""
Gets a descriptor provider that can provide descriptors as needed,
for the given workers boolean
"""
return DescriptorProvider(self, workers)
class NoSuchDescriptorError(TypeError):
def __init__(self, str):
TypeError.__init__(self, str)
class DescriptorProvider:
"""
A way of getting descriptors for interface names
"""
def __init__(self, config, workers):
self.config = config
self.workers = workers
def getDescriptor(self, interfaceName):
"""
Gets the appropriate descriptor for the given interface name given the
context of the current descriptor. This selects the appropriate
implementation for cases like workers.
"""
return self.config.getDescriptor(interfaceName, self.workers)
class Descriptor(DescriptorProvider):
"""
Represents a single descriptor for an interface. See Bindings.conf.
"""
def __init__(self, config, interface, desc):
DescriptorProvider.__init__(self, config, desc.get('workers', False))
self.interface = interface
# Read the desc, and fill in the relevant defaults.
ifaceName = self.interface.identifier.name
if self.interface.isExternal() or self.interface.isCallback():
if self.workers:
nativeTypeDefault = "JSObject"
else:
nativeTypeDefault = "nsIDOM" + ifaceName
else:
if self.workers:
nativeTypeDefault = "mozilla::dom::workers::" + ifaceName
else:
nativeTypeDefault = "mozilla::dom::" + ifaceName
self.nativeType = desc.get('nativeType', nativeTypeDefault)
self.hasInstanceInterface = desc.get('hasInstanceInterface', None)
# Do something sane for JSObject
if self.nativeType == "JSObject":
headerDefault = "jsapi.h"
else:
if self.workers:
headerDefault = "mozilla/dom/workers/bindings/%s.h" % ifaceName
else:
headerDefault = self.nativeType
headerDefault = headerDefault.replace("::", "/") + ".h"
self.headerFile = desc.get('headerFile', headerDefault)
if self.interface.isCallback() or self.interface.isExternal():
if 'castable' in desc:
raise TypeError("%s is external or callback but has a castable "
"setting" % self.interface.identifier.name)
self.castable = False
else:
self.castable = desc.get('castable', True)
self.notflattened = desc.get('notflattened', False)
self.register = desc.get('register', True)
self.hasXPConnectImpls = desc.get('hasXPConnectImpls', False)
# If we're concrete, we need to crawl our ancestor interfaces and mark
# them as having a concrete descendant.
self.concrete = desc.get('concrete', not self.interface.isExternal())
if self.concrete:
self.proxy = False
operations = {
'IndexedGetter': None,
'IndexedSetter': None,
'IndexedCreator': None,
'IndexedDeleter': None,
'NamedGetter': None,
'NamedSetter': None,
'NamedCreator': None,
'NamedDeleter': None,
'Stringifier': None
}
iface = self.interface
while iface:
for m in iface.members:
if not m.isMethod():
continue
def addOperation(operation, m):
if not operations[operation]:
operations[operation] = m
def addIndexedOrNamedOperation(operation, m):
self.proxy = True
if m.isIndexed():
operation = 'Indexed' + operation
else:
assert m.isNamed()
operation = 'Named' + operation
addOperation(operation, m)
if m.isStringifier():
addOperation('Stringifier', m)
else:
if m.isGetter():
addIndexedOrNamedOperation('Getter', m)
if m.isSetter():
addIndexedOrNamedOperation('Setter', m)
if m.isCreator():
addIndexedOrNamedOperation('Creator', m)
if m.isDeleter():
addIndexedOrNamedOperation('Deleter', m)
raise TypeError("deleter specified on %s but we "
"don't support deleters yet" %
self.interface.identifier.name)
iface.setUserData('hasConcreteDescendant', True)
iface = iface.parent
if self.proxy:
self.operations = operations
iface = self.interface
while iface:
iface.setUserData('hasProxyDescendant', True)
iface = iface.parent
if self.interface.isExternal() and 'prefable' in desc:
raise TypeError("%s is external but has a prefable setting" %
self.interface.identifier.name)
self.prefable = desc.get('prefable', False)
if self.workers:
if desc.get('nativeOwnership', 'worker') != 'worker':
raise TypeError("Worker descriptor for %s should have 'worker' "
"as value for nativeOwnership" %
self.interface.identifier.name)
self.nativeOwnership = "worker"
else:
self.nativeOwnership = desc.get('nativeOwnership', 'nsisupports')
if not self.nativeOwnership in ['owned', 'refcounted', 'nsisupports']:
raise TypeError("Descriptor for %s has unrecognized value (%s) "
"for nativeOwnership" %
(self.interface.identifier.name, self.nativeOwnership))
self.customTrace = desc.get('customTrace', self.workers)
self.customFinalize = desc.get('customFinalize', self.workers)
self.wrapperCache = self.workers or desc.get('wrapperCache', True)
if not self.wrapperCache and self.prefable:
raise TypeError("Descriptor for %s is prefable but not wrappercached" %
self.interface.identifier.name)
def make_name(name):
return name + "_workers" if self.workers else name
self.name = make_name(interface.identifier.name)
# self.extendedAttributes is a dict of dicts, keyed on
# all/getterOnly/setterOnly and then on member name. Values are an
# array of extended attributes.
self.extendedAttributes = { 'all': {}, 'getterOnly': {}, 'setterOnly': {} }
def addExtendedAttribute(attribute, config):
def add(key, members, attribute):
for member in members:
self.extendedAttributes[key].setdefault(member, []).append(attribute)
if isinstance(config, dict):
for key in ['all', 'getterOnly', 'setterOnly']:
add(key, config.get(key, []), attribute)
elif isinstance(config, list):
add('all', config, attribute)
else:
assert isinstance(config, str)
if config == '*':
iface = self.interface
while iface:
add('all', map(lambda m: m.name, iface.members), attribute)
iface = iface.parent
else:
add('all', [config], attribute)
for attribute in ['implicitJSContext', 'resultNotAddRefed']:
addExtendedAttribute(attribute, desc.get(attribute, {}))
self.binaryNames = desc.get('binaryNames', {})
# Build the prototype chain.
self.prototypeChain = []
parent = interface
while parent:
self.prototypeChain.insert(0, make_name(parent.identifier.name))
parent = parent.parent
config.maxProtoChainLength = max(config.maxProtoChainLength,
len(self.prototypeChain))
def hasInterfaceOrInterfacePrototypeObject(self):
# Forward-declared interfaces don't need either interface object or
# interface prototype object as they're going to use QI (on main thread)
# or be passed as a JSObject (on worker threads).
if self.interface.isExternal():
return False
return self.interface.hasInterfaceObject() or self.interface.hasInterfacePrototypeObject()
def getExtendedAttributes(self, member, getter=False, setter=False):
def ensureValidThrowsExtendedAttribute(attr):
assert(attr is None or attr is True or len(attr) == 1)
if (attr is not None and attr is not True and
'Workers' not in attr and 'MainThread' not in attr):
raise TypeError("Unknown value for 'Throws': " + attr[0])
def maybeAppendInfallibleToAttrs(attrs, throws):
ensureValidThrowsExtendedAttribute(throws)
if (throws is None or
(throws is not True and
('Workers' not in throws or not self.workers) and
('MainThread' not in throws or self.workers))):
attrs.append("infallible")
name = member.identifier.name
if member.isMethod():
attrs = self.extendedAttributes['all'].get(name, [])
throws = member.getExtendedAttribute("Throws")
maybeAppendInfallibleToAttrs(attrs, throws)
return attrs
assert member.isAttr()
assert bool(getter) != bool(setter)
key = 'getterOnly' if getter else 'setterOnly'
attrs = self.extendedAttributes['all'].get(name, []) + self.extendedAttributes[key].get(name, [])
throws = member.getExtendedAttribute("Throws")
if throws is None:
throwsAttr = "GetterThrows" if getter else "SetterThrows"
throws = member.getExtendedAttribute(throwsAttr)
maybeAppendInfallibleToAttrs(attrs, throws)
return attrs
|
|
#
# (c) Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from openstack_dashboard import api
from openstack_dashboard.api import base
from openstack_dashboard.api.rest import neutron
from openstack_dashboard.test import helpers as test
from openstack_dashboard.test.test_data import neutron_data
from openstack_dashboard.test.test_data.utils import TestData
TEST = TestData(neutron_data.data)
class NeutronNetworksTestCase(test.TestCase):
def setUp(self):
super(NeutronNetworksTestCase, self).setUp()
self._networks = [test.mock_factory(n)
for n in TEST.api_networks.list()]
@mock.patch.object(neutron.api, 'neutron')
def test_get_list_for_tenant(self, client):
request = self.mock_rest_request()
networks = self._networks
client.network_list_for_tenant.return_value = networks
response = neutron.Networks().get(request)
self.assertStatusCode(response, 200)
self.assertItemsCollectionEqual(response, TEST.api_networks.list())
client.network_list_for_tenant.assert_called_once_with(
request, request.user.tenant_id)
@mock.patch.object(neutron.api, 'neutron')
def test_create(self, client):
self._test_create(
'{"name": "mynetwork"}',
{'name': 'mynetwork'}
)
@mock.patch.object(neutron.api, 'neutron')
def test_create_with_bogus_param(self, client):
self._test_create(
'{"name": "mynetwork","bilbo":"baggins"}',
{'name': 'mynetwork'}
)
@mock.patch.object(neutron.api, 'neutron')
def _test_create(self, supplied_body, expected_call, client):
request = self.mock_rest_request(body=supplied_body)
client.network_create.return_value = self._networks[0]
response = neutron.Networks().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response['location'],
'/api/neutron/networks/'
+ str(TEST.api_networks.first().get("id")))
self.assertEqual(response.json, TEST.api_networks.first())
#
# Services
#
@test.create_stubs({api.base: ('is_service_enabled',)})
@test.create_stubs({api.neutron: ('is_extension_supported',)})
@mock.patch.object(neutron.api, 'neutron')
def test_services_get(self, client):
request = self.mock_rest_request(
GET={"network_id": "the_network"})
api.base.is_service_enabled(request, 'network').AndReturn(True)
api.neutron.is_extension_supported(request, 'agent').AndReturn(True)
client.agent_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': '1'}}),
mock.Mock(**{'to_dict.return_value': {'id': '2'}})
]
self.mox.ReplayAll()
response = neutron.Services().get(request)
self.assertStatusCode(response, 200)
client.agent_list.assert_called_once_with(
request, network_id='the_network')
self.assertEqual(response.content.decode('utf-8'),
'{"items": [{"id": "1"}, {"id": "2"}]}')
@test.create_stubs({api.base: ('is_service_enabled',)})
def test_services_get_disabled(self):
request = self.mock_rest_request(
GET={"network_id": self._networks[0].id})
api.base.is_service_enabled(request, 'network').AndReturn(False)
self.mox.ReplayAll()
response = neutron.Services().get(request)
self.assertStatusCode(response, 501)
class NeutronSubnetsTestCase(test.TestCase):
def setUp(self):
super(NeutronSubnetsTestCase, self).setUp()
self._networks = [test.mock_factory(n)
for n in TEST.api_networks.list()]
self._subnets = [test.mock_factory(n)
for n in TEST.api_subnets.list()]
@mock.patch.object(neutron.api, 'neutron')
def test_get(self, client):
request = self.mock_rest_request(
GET={"network_id": self._networks[0].id})
client.subnet_list.return_value = [self._subnets[0]]
response = neutron.Subnets().get(request)
self.assertStatusCode(response, 200)
client.subnet_list.assert_called_once_with(
request, network_id=TEST.api_networks.first().get("id"))
@mock.patch.object(neutron.api, 'neutron')
def test_create(self, client):
request = self.mock_rest_request(
body='{"network_id": "%s",'
' "ip_version": "4",'
' "cidr": "192.168.199.0/24"}' % self._networks[0].id)
client.subnet_create.return_value = self._subnets[0]
response = neutron.Subnets().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response['location'],
'/api/neutron/subnets/' +
str(TEST.api_subnets.first().get("id")))
self.assertEqual(response.json, TEST.api_subnets.first())
class NeutronPortsTestCase(test.TestCase):
def setUp(self):
super(NeutronPortsTestCase, self).setUp()
self._networks = [test.mock_factory(n)
for n in TEST.api_networks.list()]
self._ports = [test.mock_factory(n)
for n in TEST.api_ports.list()]
@mock.patch.object(neutron.api, 'neutron')
def test_get(self, client):
request = self.mock_rest_request(
GET={"network_id": self._networks[0].id})
client.port_list.return_value = [self._ports[0]]
response = neutron.Ports().get(request)
self.assertStatusCode(response, 200)
client.port_list.assert_called_once_with(
request, network_id=TEST.api_networks.first().get("id"))
class NeutronExtensionsTestCase(test.TestCase):
def setUp(self):
super(NeutronExtensionsTestCase, self).setUp()
self._extensions = [n for n in TEST.api_extensions.list()]
@mock.patch.object(neutron.api, 'neutron')
def test_list_extensions(self, nc):
request = self.mock_rest_request(**{'GET': {}})
nc.list_extensions.return_value = self._extensions
response = neutron.Extensions().get(request)
self.assertStatusCode(response, 200)
self.assertItemsCollectionEqual(response, TEST.api_extensions.list())
nc.list_extensions.assert_called_once_with(request)
class NeutronDefaultQuotasTestCase(test.TestCase):
@test.create_stubs({base: ('is_service_enabled',)})
@mock.patch.object(neutron.api, 'neutron')
def test_quotas_sets_defaults_get_when_service_is_enabled(self, client):
filters = {'user': {'tenant_id': 'tenant'}}
request = self.mock_rest_request(**{'GET': dict(filters)})
base.is_service_enabled(request, 'network').AndReturn(True)
client.tenant_quota_get.return_value = [
base.Quota("network", 100),
base.Quota("q2", 101)]
self.mox.ReplayAll()
response = neutron.DefaultQuotaSets().get(request)
self.assertStatusCode(response, 200)
self.assertItemsCollectionEqual(response, [
{'limit': 100, 'display_name': 'Networks', 'name': 'network'},
{'limit': 101, 'display_name': 'Q2', 'name': 'q2'}])
client.tenant_quota_get.assert_called_once_with(
request,
request.user.tenant_id)
@test.create_stubs({neutron.api.base: ('is_service_enabled',)})
@mock.patch.object(neutron.api, 'neutron')
def test_quota_sets_defaults_get_when_service_is_disabled(self, client):
filters = {'user': {'tenant_id': 'tenant'}}
request = self.mock_rest_request(**{'GET': dict(filters)})
base.is_service_enabled(request, 'network').AndReturn(False)
self.mox.ReplayAll()
response = neutron.DefaultQuotaSets().get(request)
self.assertStatusCode(response, 501)
self.assertEqual(response.content.decode('utf-8'),
'"Service Neutron is disabled."')
client.tenant_quota_get.assert_not_called()
class NeutronQuotaSetsTestCase(test.TestCase):
def setUp(self):
super(NeutronQuotaSetsTestCase, self).setUp()
quota_set = self.neutron_quotas.list()[0]
self._quota_data = {}
for quota in quota_set:
self._quota_data[quota.name] = quota.limit
@mock.patch.object(neutron, 'quotas')
@mock.patch.object(neutron.api, 'neutron')
@mock.patch.object(neutron.api, 'base')
def test_quotas_sets_patch(self, bc, nc, qc):
request = self.mock_rest_request(body='''
{"network": "5", "subnet": "5", "port": "50",
"router": "5", "floatingip": "50",
"security_group": "5", "security_group_rule": "50",
"volumes": "5", "cores": "50"}
''')
qc.get_disabled_quotas.return_value = []
qc.NEUTRON_QUOTA_FIELDS = (n for n in self._quota_data)
bc.is_service_enabled.return_value = True
nc.is_extension_supported.return_value = True
response = neutron.QuotasSets().patch(request, 'spam123')
self.assertStatusCode(response, 204)
self.assertEqual(response.content.decode('utf-8'), '')
nc.tenant_quota_update.assert_called_once_with(
request, 'spam123', network='5',
subnet='5', port='50', router='5',
floatingip='50', security_group='5',
security_group_rule='50')
@mock.patch.object(neutron, 'quotas')
@mock.patch.object(neutron.api, 'neutron')
@mock.patch.object(neutron.api, 'base')
def test_quotas_sets_patch_when_service_is_disabled(self, bc, nc, qc):
request = self.mock_rest_request(body='''
{"network": "5", "subnet": "5", "port": "50",
"router": "5", "floatingip": "50",
"security_group": "5", "security_group_rule": "50",
"volumes": "5", "cores": "50"}
''')
qc.get_disabled_quotas.return_value = []
qc.NEUTRON_QUOTA_FIELDS = (n for n in self._quota_data)
bc.is_service_enabled.return_value = False
response = neutron.QuotasSets().patch(request, 'spam123')
message = \
'"Service Neutron is disabled or quotas extension not available."'
self.assertStatusCode(response, 501)
self.assertEqual(response.content.decode('utf-8'), message)
nc.tenant_quota_update.assert_not_called()
def mock_obj_to_dict(r):
return mock.Mock(**{'to_dict.return_value': r})
def mock_factory(r):
"""mocks all the attributes as well as the to_dict """
mocked = mock_obj_to_dict(r)
mocked.configure_mock(**r)
return mocked
|
|
"""
sentry.testutils.cases
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = (
'TestCase', 'TransactionTestCase', 'APITestCase', 'AuthProviderTestCase',
'RuleTestCase', 'PermissionTestCase', 'PluginTestCase'
)
import base64
import os.path
import urllib
from django.conf import settings
from django.contrib.auth import login
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test import TestCase, TransactionTestCase
from django.utils.importlib import import_module
from exam import before, Exam
from nydus.db import create_cluster
from rest_framework.test import APITestCase as BaseAPITestCase
from sentry import auth
from sentry.auth.providers.dummy import DummyProvider
from sentry.constants import MODULE_ROOT
from sentry.models import GroupMeta, OrganizationMemberType, ProjectOption
from sentry.plugins import plugins
from sentry.rules import EventState
from sentry.utils import json
from .fixtures import Fixtures
from .helpers import AuthProvider, Feature, get_auth_header
def create_redis_conn():
options = {
'engine': 'nydus.db.backends.redis.Redis',
}
options.update(settings.SENTRY_REDIS_OPTIONS)
return create_cluster(options)
_redis_conn = create_redis_conn()
def flush_redis():
_redis_conn.flushdb()
class BaseTestCase(Fixtures, Exam):
urls = 'tests.sentry.web.urls'
def assertRequiresAuthentication(self, path, method='GET'):
resp = getattr(self.client, method.lower())(path)
assert resp.status_code == 302
assert resp['Location'].startswith('http://testserver' + reverse('sentry-login'))
@before
def setup_session(self):
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session.save()
self.session = session
def feature(self, name, active=True):
"""
>>> with self.feature('feature:name')
>>> # ...
"""
return Feature(name, active)
def auth_provider(self, name, cls):
"""
>>> with self.auth_provider('name', Provider)
>>> # ...
"""
return AuthProvider(name, cls)
def save_session(self):
self.session.save()
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
session_cookie = settings.SESSION_COOKIE_NAME
self.client.cookies[session_cookie] = self.session.session_key
self.client.cookies[session_cookie].update(cookie_data)
def login_as(self, user):
user.backend = settings.AUTHENTICATION_BACKENDS[0]
request = HttpRequest()
request.session = self.session
login(request, user)
request.user = user
# Save the session values.
self.save_session()
def load_fixture(self, filepath):
filepath = os.path.join(
MODULE_ROOT,
'tests',
'fixtures',
filepath,
)
with open(filepath, 'rb') as fp:
return fp.read()
def _pre_setup(self):
super(BaseTestCase, self)._pre_setup()
cache.clear()
ProjectOption.objects.clear_local_cache()
GroupMeta.objects.clear_local_cache()
def _post_teardown(self):
super(BaseTestCase, self)._post_teardown()
flush_redis()
def _makeMessage(self, data):
return json.dumps(data)
def _makePostMessage(self, data):
return base64.b64encode(self._makeMessage(data))
def _postWithHeader(self, data, key=None, secret=None):
if key is None:
key = self.projectkey.public_key
secret = self.projectkey.secret_key
message = self._makePostMessage(data)
with self.settings(CELERY_ALWAYS_EAGER=True):
resp = self.client.post(
reverse('sentry-api-store'), message,
content_type='application/octet-stream',
HTTP_X_SENTRY_AUTH=get_auth_header('_postWithHeader', key, secret),
)
return resp
def _getWithReferer(self, data, key=None, referer='getsentry.com', protocol='4'):
if key is None:
key = self.projectkey.public_key
headers = {}
if referer is not None:
headers['HTTP_REFERER'] = referer
message = self._makeMessage(data)
qs = {
'sentry_version': protocol,
'sentry_client': 'raven-js/lol',
'sentry_key': key,
'sentry_data': message,
}
with self.settings(CELERY_ALWAYS_EAGER=True):
resp = self.client.get(
'%s?%s' % (reverse('sentry-api-store', args=(self.project.pk,)), urllib.urlencode(qs)),
**headers
)
return resp
_postWithSignature = _postWithHeader
_postWithNewSignature = _postWithHeader
class TestCase(BaseTestCase, TestCase):
pass
class TransactionTestCase(BaseTestCase, TransactionTestCase):
pass
class APITestCase(BaseTestCase, BaseAPITestCase):
pass
class AuthProviderTestCase(TestCase):
provider = DummyProvider
provider_name = 'dummy'
def setUp(self):
super(AuthProviderTestCase, self).setUp()
auth.register(self.provider_name, self.provider)
self.addCleanup(auth.unregister, self.provider_name, self.provider)
class RuleTestCase(TestCase):
rule_cls = None
def get_event(self):
return self.event
def get_rule(self, data=None):
return self.rule_cls(
project=self.project,
data=data or {},
)
def get_state(self, **kwargs):
kwargs.setdefault('is_new', True)
kwargs.setdefault('is_regression', True)
kwargs.setdefault('is_sample', True)
kwargs.setdefault('rule_is_active', False)
kwargs.setdefault('rule_last_active', None)
return EventState(**kwargs)
def assertPasses(self, rule, event=None, **kwargs):
if event is None:
event = self.event
state = self.get_state(**kwargs)
assert rule.passes(event, state) is True
def assertDoesNotPass(self, rule, event=None, **kwargs):
if event is None:
event = self.event
state = self.get_state(**kwargs)
assert rule.passes(event, state) is False
class PermissionTestCase(TestCase):
def setUp(self):
super(PermissionTestCase, self).setUp()
self.owner = self.create_user()
self.organization = self.create_organization(owner=self.owner)
self.team = self.create_team(organization=self.organization)
def assert_can_access(self, user, path, method='GET'):
self.login_as(user)
resp = getattr(self.client, method.lower())(path)
assert resp.status_code >= 200 and resp.status_code < 300
def assert_cannot_access(self, user, path, method='GET'):
self.login_as(user)
resp = getattr(self.client, method.lower())(path)
assert resp.status_code >= 300
def assert_team_member_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
)
self.assert_can_access(user, path)
def assert_org_member_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
)
self.assert_can_access(user, path)
def assert_teamless_member_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
)
self.assert_can_access(user, path)
def assert_team_member_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
)
self.assert_cannot_access(user, path)
def assert_org_member_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
)
self.assert_cannot_access(user, path)
def assert_teamless_member_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
)
self.assert_cannot_access(user, path)
def assert_team_admin_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
type=OrganizationMemberType.ADMIN,
)
self.assert_can_access(user, path)
def assert_org_admin_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
type=OrganizationMemberType.ADMIN,
)
self.assert_can_access(user, path)
def assert_teamless_admin_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
type=OrganizationMemberType.ADMIN,
)
self.assert_can_access(user, path)
def assert_team_admin_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
type=OrganizationMemberType.ADMIN,
)
self.assert_cannot_access(user, path)
def assert_org_admin_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
type=OrganizationMemberType.ADMIN,
)
self.assert_cannot_access(user, path)
def assert_teamless_admin_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
type=OrganizationMemberType.ADMIN,
)
self.assert_cannot_access(user, path)
def assert_team_owner_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
type=OrganizationMemberType.OWNER,
)
self.assert_can_access(user, path)
def assert_org_owner_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
type=OrganizationMemberType.OWNER,
)
self.assert_can_access(user, path)
def assert_teamless_owner_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
type=OrganizationMemberType.OWNER,
)
self.assert_can_access(user, path)
def assert_team_owner_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
type=OrganizationMemberType.OWNER,
)
self.assert_cannot_access(user, path)
def assert_org_owner_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
type=OrganizationMemberType.OWNER,
)
self.assert_cannot_access(user, path)
def assert_teamless_owner_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
type=OrganizationMemberType.OWNER,
)
self.assert_cannot_access(user, path)
def assert_non_member_cannot_access(self, path):
user = self.create_user()
self.assert_cannot_access(user, path)
class PluginTestCase(TestCase):
plugin = None
def setUp(self):
super(PluginTestCase, self).setUp()
plugins.register(self.plugin)
self.addCleanup(plugins.unregister, self.plugin)
|
|
import pytest
from datetime import datetime
from ... import api
from ... import utils
from ...api import InvalidEntry, MissingEntry
@pytest.fixture
def gradebook(request):
gb = api.Gradebook("sqlite:///:memory:")
def fin():
gb.close()
request.addfinalizer(fin)
return gb
@pytest.fixture
def assignment(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=1, cell_type='code')
gradebook.add_grade_cell('test2', 'p1', 'foo', max_score=2, cell_type='markdown')
gradebook.add_solution_cell('solution1', 'p1', 'foo')
gradebook.add_solution_cell('test2', 'p1', 'foo')
gradebook.add_source_cell('test1', 'p1', 'foo', cell_type='code')
gradebook.add_source_cell('test2', 'p1', 'foo', cell_type='markdown')
gradebook.add_source_cell('solution1', 'p1', 'foo', cell_type='code')
return gradebook
def test_init(gradebook):
assert gradebook.students == []
assert gradebook.assignments == []
#### Test students
def test_add_student(gradebook):
s = gradebook.add_student('12345')
assert s.id == '12345'
assert gradebook.students == [s]
# try adding a duplicate student
with pytest.raises(InvalidEntry):
gradebook.add_student('12345')
# try adding a student with arguments
s = gradebook.add_student('6789', last_name="Bar", first_name="Foo", email="[email protected]")
assert s.id == '6789'
assert s.last_name == "Bar"
assert s.first_name == "Foo"
assert s.email == "[email protected]"
def test_add_duplicate_student(gradebook):
# we also need this test because this will cause an IntegrityError
# under the hood rather than a FlushError
gradebook.add_student('12345')
with pytest.raises(InvalidEntry):
gradebook.add_student('12345')
def test_find_student(gradebook):
s1 = gradebook.add_student('12345')
assert gradebook.find_student('12345') == s1
s2 = gradebook.add_student('abcd')
assert gradebook.find_student('12345') == s1
assert gradebook.find_student('abcd') == s2
def test_find_nonexistant_student(gradebook):
with pytest.raises(MissingEntry):
gradebook.find_student('12345')
def test_remove_student(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
assignment.remove_student('hacker123')
with pytest.raises(MissingEntry):
assignment.find_submission('foo', 'hacker123')
with pytest.raises(MissingEntry):
assignment.find_student('hacker123')
def test_update_or_create_student(gradebook):
# first test creating it
s1 = gradebook.update_or_create_student('hacker123')
assert gradebook.find_student('hacker123') == s1
assert s1.first_name is None
# now test finding/updating it
s2 = gradebook.update_or_create_student('hacker123', first_name='Alyssa')
assert s1 == s2
assert s2.first_name == 'Alyssa'
#### Test assignments
def test_add_assignment(gradebook):
a = gradebook.add_assignment('foo')
assert a.name == 'foo'
assert gradebook.assignments == [a]
# try adding a duplicate assignment
with pytest.raises(InvalidEntry):
gradebook.add_assignment('foo')
# try adding an assignment with arguments
now = datetime.utcnow()
a = gradebook.add_assignment('bar', duedate=now)
assert a.name == 'bar'
assert a.duedate == now
# try adding with a string timestamp
a = gradebook.add_assignment('baz', duedate=now.isoformat())
assert a.name == 'baz'
assert a.duedate == now
def test_add_duplicate_assignment(gradebook):
gradebook.add_assignment('foo')
with pytest.raises(InvalidEntry):
gradebook.add_assignment('foo')
def test_find_assignment(gradebook):
a1 = gradebook.add_assignment('foo')
assert gradebook.find_assignment('foo') == a1
a2 = gradebook.add_assignment('bar')
assert gradebook.find_assignment('foo') == a1
assert gradebook.find_assignment('bar') == a2
def test_find_nonexistant_assignment(gradebook):
with pytest.raises(MissingEntry):
gradebook.find_assignment('foo')
def test_remove_assignment(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
notebooks = assignment.find_assignment('foo').notebooks
grade_cells = [x for nb in notebooks for x in nb.grade_cells]
solution_cells = [x for nb in notebooks for x in nb.solution_cells]
source_cells = [x for nb in notebooks for x in nb.source_cells]
assignment.remove_assignment('foo')
for nb in notebooks:
assert assignment.db.query(api.SubmittedNotebook).filter(api.SubmittedNotebook.id == nb.id).all() == []
for grade_cell in grade_cells:
assert assignment.db.query(api.GradeCell).filter(api.GradeCell.id == grade_cell.id).all() == []
for solution_cell in solution_cells:
assert assignment.db.query(api.SolutionCell).filter(api.SolutionCell.id == solution_cell.id).all() == []
for source_cell in source_cells:
assert assignment.db.query(api.SourceCell).filter(api.SourceCell.id == source_cell.id).all() == []
with pytest.raises(MissingEntry):
assignment.find_assignment('foo')
assert assignment.find_student('hacker123').submissions == []
def test_update_or_create_assignment(gradebook):
# first test creating it
a1 = gradebook.update_or_create_assignment('foo')
assert gradebook.find_assignment('foo') == a1
assert a1.duedate is None
# now test finding/updating it
a2 = gradebook.update_or_create_assignment('foo', duedate="2015-02-02 14:58:23.948203 PST")
assert a1 == a2
assert a2.duedate == utils.parse_utc("2015-02-02 14:58:23.948203 PST")
#### Test notebooks
def test_add_notebook(gradebook):
a = gradebook.add_assignment('foo')
n = gradebook.add_notebook('p1', 'foo')
assert n.name == 'p1'
assert n.assignment == a
assert a.notebooks == [n]
# try adding a duplicate assignment
with pytest.raises(InvalidEntry):
gradebook.add_notebook('p1', 'foo')
def test_add_duplicate_notebook(gradebook):
# it should be ok to add a notebook with the same name, as long as
# it's for different assignments
gradebook.add_assignment('foo')
gradebook.add_assignment('bar')
n1 = gradebook.add_notebook('p1', 'foo')
n2 = gradebook.add_notebook('p1', 'bar')
assert n1.id != n2.id
# but not ok to add a notebook with the same name for the same assignment
with pytest.raises(InvalidEntry):
gradebook.add_notebook('p1', 'foo')
def test_find_notebook(gradebook):
gradebook.add_assignment('foo')
n1 = gradebook.add_notebook('p1', 'foo')
assert gradebook.find_notebook('p1', 'foo') == n1
n2 = gradebook.add_notebook('p2', 'foo')
assert gradebook.find_notebook('p1', 'foo') == n1
assert gradebook.find_notebook('p2', 'foo') == n2
def test_find_nonexistant_notebook(gradebook):
# check that it doesn't find it when there is nothing in the db
with pytest.raises(MissingEntry):
gradebook.find_notebook('p1', 'foo')
# check that it doesn't find it even if the assignment exists
gradebook.add_assignment('foo')
with pytest.raises(MissingEntry):
gradebook.find_notebook('p1', 'foo')
def test_update_or_create_notebook(gradebook):
# first test creating it
gradebook.add_assignment('foo')
n1 = gradebook.update_or_create_notebook('p1', 'foo')
assert gradebook.find_notebook('p1', 'foo') == n1
# now test finding/updating it
n2 = gradebook.update_or_create_notebook('p1', 'foo')
assert n1 == n2
def test_remove_notebook(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
notebooks = assignment.find_assignment('foo').notebooks
for nb in notebooks:
grade_cells = [x for x in nb.grade_cells]
solution_cells = [x for x in nb.solution_cells]
source_cells = [x for x in nb.source_cells]
assignment.remove_notebook(nb.name, 'foo')
assert assignment.db.query(api.SubmittedNotebook).filter(api.SubmittedNotebook.id == nb.id).all() == []
for grade_cell in grade_cells:
assert assignment.db.query(api.GradeCell).filter(api.GradeCell.id == grade_cell.id).all() == []
for solution_cell in solution_cells:
assert assignment.db.query(api.SolutionCell).filter(api.SolutionCell.id == solution_cell.id).all() == []
for source_cell in source_cells:
assert assignment.db.query(api.SourceCell).filter(api.SourceCell.id == source_cell.id).all() == []
with pytest.raises(MissingEntry):
assignment.find_notebook(nb.name, 'foo')
#### Test grade cells
def test_add_grade_cell(gradebook):
gradebook.add_assignment('foo')
n = gradebook.add_notebook('p1', 'foo')
gc = gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=2, cell_type='markdown')
assert gc.name == 'test1'
assert gc.max_score == 2
assert gc.cell_type == 'markdown'
assert n.grade_cells == [gc]
assert gc.notebook == n
def test_add_grade_cell_with_args(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gc = gradebook.add_grade_cell(
'test1', 'p1', 'foo',
max_score=3, cell_type="code")
assert gc.name == 'test1'
assert gc.max_score == 3
assert gc.cell_type == "code"
def test_create_invalid_grade_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
with pytest.raises(InvalidEntry):
gradebook.add_grade_cell(
'test1', 'p1', 'foo',
max_score=3, cell_type="something")
def test_add_duplicate_grade_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=1, cell_type='code')
with pytest.raises(InvalidEntry):
gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=2, cell_type='markdown')
def test_find_grade_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gc1 = gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=1, cell_type='code')
assert gradebook.find_grade_cell('test1', 'p1', 'foo') == gc1
gc2 = gradebook.add_grade_cell('test2', 'p1', 'foo', max_score=2, cell_type='code')
assert gradebook.find_grade_cell('test1', 'p1', 'foo') == gc1
assert gradebook.find_grade_cell('test2', 'p1', 'foo') == gc2
def test_find_nonexistant_grade_cell(gradebook):
with pytest.raises(MissingEntry):
gradebook.find_grade_cell('test1', 'p1', 'foo')
gradebook.add_assignment('foo')
with pytest.raises(MissingEntry):
gradebook.find_grade_cell('test1', 'p1', 'foo')
gradebook.add_notebook('p1', 'foo')
with pytest.raises(MissingEntry):
gradebook.find_grade_cell('test1', 'p1', 'foo')
def test_update_or_create_grade_cell(gradebook):
# first test creating it
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gc1 = gradebook.update_or_create_grade_cell('test1', 'p1', 'foo', max_score=2, cell_type='code')
assert gc1.max_score == 2
assert gc1.cell_type == 'code'
assert gradebook.find_grade_cell('test1', 'p1', 'foo') == gc1
# now test finding/updating it
gc2 = gradebook.update_or_create_grade_cell('test1', 'p1', 'foo', max_score=3)
assert gc1 == gc2
assert gc1.max_score == 3
assert gc1.cell_type == 'code'
#### Test solution cells
def test_add_solution_cell(gradebook):
gradebook.add_assignment('foo')
n = gradebook.add_notebook('p1', 'foo')
sc = gradebook.add_solution_cell('test1', 'p1', 'foo')
assert sc.name == 'test1'
assert n.solution_cells == [sc]
assert sc.notebook == n
def test_add_duplicate_solution_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gradebook.add_solution_cell('test1', 'p1', 'foo')
with pytest.raises(InvalidEntry):
gradebook.add_solution_cell('test1', 'p1', 'foo')
def test_find_solution_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
sc1 = gradebook.add_solution_cell('test1', 'p1', 'foo')
assert gradebook.find_solution_cell('test1', 'p1', 'foo') == sc1
sc2 = gradebook.add_solution_cell('test2', 'p1', 'foo')
assert gradebook.find_solution_cell('test1', 'p1', 'foo') == sc1
assert gradebook.find_solution_cell('test2', 'p1', 'foo') == sc2
def test_find_nonexistant_solution_cell(gradebook):
with pytest.raises(MissingEntry):
gradebook.find_solution_cell('test1', 'p1', 'foo')
gradebook.add_assignment('foo')
with pytest.raises(MissingEntry):
gradebook.find_solution_cell('test1', 'p1', 'foo')
gradebook.add_notebook('p1', 'foo')
with pytest.raises(MissingEntry):
gradebook.find_solution_cell('test1', 'p1', 'foo')
def test_update_or_create_solution_cell(gradebook):
# first test creating it
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
sc1 = gradebook.update_or_create_solution_cell('test1', 'p1', 'foo')
assert gradebook.find_solution_cell('test1', 'p1', 'foo') == sc1
# now test finding/updating it
sc2 = gradebook.update_or_create_solution_cell('test1', 'p1', 'foo')
assert sc1 == sc2
#### Test source cells
def test_add_source_cell(gradebook):
gradebook.add_assignment('foo')
n = gradebook.add_notebook('p1', 'foo')
sc = gradebook.add_source_cell('test1', 'p1', 'foo', cell_type="code")
assert sc.name == 'test1'
assert sc.cell_type == 'code'
assert n.source_cells == [sc]
assert sc.notebook == n
def test_add_source_cell_with_args(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
sc = gradebook.add_source_cell(
'test1', 'p1', 'foo',
source="blah blah blah",
cell_type="code", checksum="abcde")
assert sc.name == 'test1'
assert sc.source == "blah blah blah"
assert sc.cell_type == "code"
assert sc.checksum == "abcde"
def test_create_invalid_source_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
with pytest.raises(InvalidEntry):
gradebook.add_source_cell(
'test1', 'p1', 'foo',
source="blah blah blah",
cell_type="something", checksum="abcde")
def test_add_duplicate_source_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gradebook.add_source_cell('test1', 'p1', 'foo', cell_type="code")
with pytest.raises(InvalidEntry):
gradebook.add_source_cell('test1', 'p1', 'foo', cell_type="code")
def test_find_source_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
sc1 = gradebook.add_source_cell('test1', 'p1', 'foo', cell_type="code")
assert gradebook.find_source_cell('test1', 'p1', 'foo') == sc1
sc2 = gradebook.add_source_cell('test2', 'p1', 'foo', cell_type="code")
assert gradebook.find_source_cell('test1', 'p1', 'foo') == sc1
assert gradebook.find_source_cell('test2', 'p1', 'foo') == sc2
def test_find_nonexistant_source_cell(gradebook):
with pytest.raises(MissingEntry):
gradebook.find_source_cell('test1', 'p1', 'foo')
gradebook.add_assignment('foo')
with pytest.raises(MissingEntry):
gradebook.find_source_cell('test1', 'p1', 'foo')
gradebook.add_notebook('p1', 'foo')
with pytest.raises(MissingEntry):
gradebook.find_source_cell('test1', 'p1', 'foo')
def test_update_or_create_source_cell(gradebook):
# first test creating it
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
sc1 = gradebook.update_or_create_source_cell('test1', 'p1', 'foo', cell_type='code')
assert sc1.cell_type == 'code'
assert gradebook.find_source_cell('test1', 'p1', 'foo') == sc1
# now test finding/updating it
assert sc1.checksum == None
sc2 = gradebook.update_or_create_source_cell('test1', 'p1', 'foo', checksum="123456")
assert sc1 == sc2
assert sc1.cell_type == 'code'
assert sc1.checksum == "123456"
#### Test submissions
def test_add_submission(assignment):
assignment.add_student('hacker123')
assignment.add_student('bitdiddle')
s1 = assignment.add_submission('foo', 'hacker123')
s2 = assignment.add_submission('foo', 'bitdiddle')
assert assignment.assignment_submissions('foo') == [s2, s1]
assert assignment.student_submissions('hacker123') == [s1]
assert assignment.student_submissions('bitdiddle') == [s2]
assert assignment.find_submission('foo', 'hacker123') == s1
assert assignment.find_submission('foo', 'bitdiddle') == s2
def test_add_duplicate_submission(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
with pytest.raises(InvalidEntry):
assignment.add_submission('foo', 'hacker123')
def test_remove_submission(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
submission = assignment.find_submission('foo', 'hacker123')
notebooks = submission.notebooks
grades = [x for nb in notebooks for x in nb.grades]
comments = [x for nb in notebooks for x in nb.comments]
assignment.remove_submission('foo', 'hacker123')
for nb in notebooks:
assert assignment.db.query(api.SubmittedNotebook).filter(api.SubmittedNotebook.id == nb.id).all() == []
for grade in grades:
assert assignment.db.query(api.Grade).filter(api.Grade.id == grade.id).all() == []
for comment in comments:
assert assignment.db.query(api.Comment).filter(api.Comment.id == comment.id).all() == []
with pytest.raises(MissingEntry):
assignment.find_submission('foo', 'hacker123')
def test_update_or_create_submission(assignment):
assignment.add_student('hacker123')
s1 = assignment.update_or_create_submission('foo', 'hacker123')
assert s1.timestamp is None
s2 = assignment.update_or_create_submission('foo', 'hacker123', timestamp="2015-02-02 14:58:23.948203 PST")
assert s1 == s2
assert s2.timestamp == utils.parse_utc("2015-02-02 14:58:23.948203 PST")
def test_find_submission_notebook(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
with pytest.raises(MissingEntry):
assignment.find_submission_notebook('p2', 'foo', 'hacker123')
n2 = assignment.find_submission_notebook('p1', 'foo', 'hacker123')
assert n1 == n2
def test_find_submission_notebook_by_id(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
with pytest.raises(MissingEntry):
assignment.find_submission_notebook_by_id('12345')
n2 = assignment.find_submission_notebook_by_id(n1.id)
assert n1 == n2
def test_remove_submission_notebook(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
submission = assignment.find_submission('foo', 'hacker123')
notebooks = submission.notebooks
for nb in notebooks:
grades = [x for x in nb.grades]
comments = [x for x in nb.comments]
assignment.remove_submission_notebook(nb.name, 'foo', 'hacker123')
assert assignment.db.query(api.SubmittedNotebook).filter(api.SubmittedNotebook.id == nb.id).all() == []
for grade in grades:
assert assignment.db.query(api.Grade).filter(api.Grade.id == grade.id).all() == []
for comment in comments:
assert assignment.db.query(api.Comment).filter(api.Comment.id == comment.id).all() == []
with pytest.raises(MissingEntry):
assignment.find_submission_notebook(nb.name, 'foo', 'hacker123')
def test_find_grade(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
grades = n1.grades
for g1 in grades:
g2 = assignment.find_grade(g1.name, 'p1', 'foo', 'hacker123')
assert g1 == g2
with pytest.raises(MissingEntry):
assignment.find_grade('asdf', 'p1', 'foo', 'hacker123')
def test_find_grade_by_id(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
grades = n1.grades
for g1 in grades:
g2 = assignment.find_grade_by_id(g1.id)
assert g1 == g2
with pytest.raises(MissingEntry):
assignment.find_grade_by_id('12345')
def test_find_comment(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
comments = n1.comments
for c1 in comments:
c2 = assignment.find_comment(c1.name, 'p1', 'foo', 'hacker123')
assert c1 == c2
with pytest.raises(MissingEntry):
assignment.find_comment('asdf', 'p1', 'foo', 'hacker123')
def test_find_comment_by_id(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
comments = n1.comments
for c1 in comments:
c2 = assignment.find_comment_by_id(c1.id)
assert c1 == c2
with pytest.raises(MissingEntry):
assignment.find_comment_by_id('12345')
### Test average scores
def test_average_assignment_score(assignment):
assert assignment.average_assignment_score('foo') == 0.0
assert assignment.average_assignment_code_score('foo') == 0.0
assert assignment.average_assignment_written_score('foo') == 0.0
assignment.add_student('hacker123')
assignment.add_student('bitdiddle')
assignment.add_submission('foo', 'hacker123')
assignment.add_submission('foo', 'bitdiddle')
assert assignment.average_assignment_score('foo') == 0.0
assert assignment.average_assignment_code_score('foo') == 0.0
assert assignment.average_assignment_written_score('foo') == 0.0
g1 = assignment.find_grade("test1", "p1", "foo", "hacker123")
g2 = assignment.find_grade("test2", "p1", "foo", "hacker123")
g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle")
g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle")
g1.manual_score = 0.5
g2.manual_score = 2
g3.manual_score = 1
g4.manual_score = 1
assignment.db.commit()
assert assignment.average_assignment_score('foo') == 2.25
assert assignment.average_assignment_code_score('foo') == 0.75
assert assignment.average_assignment_written_score('foo') == 1.5
def test_average_notebook_score(assignment):
assert assignment.average_notebook_score('p1', 'foo') == 0
assert assignment.average_notebook_code_score('p1', 'foo') == 0
assert assignment.average_notebook_written_score('p1', 'foo') == 0
assignment.add_student('hacker123')
assignment.add_student('bitdiddle')
assignment.add_submission('foo', 'hacker123')
assignment.add_submission('foo', 'bitdiddle')
assert assignment.average_notebook_score('p1', 'foo') == 0.0
assert assignment.average_notebook_code_score('p1', 'foo') == 0.0
assert assignment.average_notebook_written_score('p1', 'foo') == 0.0
g1 = assignment.find_grade("test1", "p1", "foo", "hacker123")
g2 = assignment.find_grade("test2", "p1", "foo", "hacker123")
g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle")
g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle")
g1.manual_score = 0.5
g2.manual_score = 2
g3.manual_score = 1
g4.manual_score = 1
assignment.db.commit()
assert assignment.average_notebook_score('p1', 'foo') == 2.25
assert assignment.average_notebook_code_score('p1', 'foo') == 0.75
assert assignment.average_notebook_written_score('p1', 'foo') == 1.5
## Test mass dictionary queries
def test_student_dicts(assignment):
assignment.add_student('hacker123')
assignment.add_student('bitdiddle')
assignment.add_student('louisreasoner')
assignment.add_submission('foo', 'hacker123')
assignment.add_submission('foo', 'bitdiddle')
g1 = assignment.find_grade("test1", "p1", "foo", "hacker123")
g2 = assignment.find_grade("test2", "p1", "foo", "hacker123")
g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle")
g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle")
g1.manual_score = 0.5
g2.manual_score = 2
g3.manual_score = 1
g4.manual_score = 1
assignment.db.commit()
students = assignment.student_dicts()
a = sorted(students, key=lambda x: x["id"])
b = sorted([x.to_dict() for x in assignment.students], key=lambda x: x["id"])
assert a == b
def test_notebook_submission_dicts(assignment):
assignment.add_student('hacker123')
assignment.add_student('bitdiddle')
s1 = assignment.add_submission('foo', 'hacker123')
s2 = assignment.add_submission('foo', 'bitdiddle')
s1.flagged = True
s2.flagged = False
g1 = assignment.find_grade("test1", "p1", "foo", "hacker123")
g2 = assignment.find_grade("test2", "p1", "foo", "hacker123")
g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle")
g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle")
g1.manual_score = 0.5
g2.manual_score = 2
g3.manual_score = 1
g4.manual_score = 1
assignment.db.commit()
notebook = assignment.find_notebook("p1", "foo")
submissions = assignment.notebook_submission_dicts("p1", "foo")
a = sorted(submissions, key=lambda x: x["id"])
b = sorted([x.to_dict() for x in notebook.submissions], key=lambda x: x["id"])
assert a == b
|
|
import random
import threading
import os
from sqlalchemy.sql.expression import and_, select
from sqlalchemy.sql.functions import func
from gengine.app.model import t_subject_device, t_subject_messages
from gengine.base.model import update_connection
from gengine.base.settings import get_settings
from gengine.base.util import lstrip_word
from gengine.metadata import DBSession
threadlocal = threading.local()
import logging
log = logging.getLogger(__name__)
try:
from apns import APNs, Payload
except ImportError as e:
log.info("tapns3 not installed")
try:
from gcm import GCM
except ImportError as e:
log.info("python-gcm not installed")
def get_prod_apns():
"""
http://stackoverflow.com/questions/1762555/creating-pem-file-for-apns
Step 1: Create Certificate .pem from Certificate .p12
Command: openssl pkcs12 -clcerts -nokeys -out apns-dev-cert.pem -in apns-dev-cert.p12
Step 2: Create Key .pem from Key .p12
Command : openssl pkcs12 -nocerts -out apns-dev-key.pem -in apns-dev-key.p12
Step 3: If you want to remove pass phrase asked in second step
Command : openssl rsa -in apns-dev-key.pem -out apns-dev-key-noenc.pem
"""
if not hasattr(threadlocal, "prod_apns"):
settings = get_settings()
cert_file = os.environ.get("APNS_CERT", settings.get("apns.prod.certificate"))
key_file = os.environ.get("APNS_KEY", settings.get("apns.prod.key"))
sandbox = False # other_helpers.boolify(os.environ.get("APNS_SANDBOX",settings.get("apns.sandbox")))
threadlocal.prod_apns = APNs(use_sandbox=sandbox, cert_file=cert_file, key_file=key_file, enhanced=True)
def response_listener(error_response):
log.debug("client get error-response: " + str(error_response))
threadlocal.prod_apns.gateway_server.register_response_listener(response_listener)
return threadlocal.prod_apns
def get_dev_apns():
"""
http://stackoverflow.com/questions/1762555/creating-pem-file-for-apns
Step 1: Create Certificate .pem from Certificate .p12
Command: openssl pkcs12 -clcerts -nokeys -out apns-dev-cert.pem -in apns-dev-cert.p12
Step 2: Create Key .pem from Key .p12
Command : openssl pkcs12 -nocerts -out apns-dev-key.pem -in apns-dev-key.p12
Step 3: If you want to remove pass phrase asked in second step
Command : openssl rsa -in apns-dev-key.pem -out apns-dev-key-noenc.pem
"""
if not hasattr(threadlocal, "dev_apns"):
settings = get_settings()
cert_file = os.environ.get("APNS_CERT", settings.get("apns.dev.certificate"))
key_file = os.environ.get("APNS_KEY", settings.get("apns.dev.key"))
sandbox = True # other_helpers.boolify(os.environ.get("APNS_SANDBOX",settings.get("apns.sandbox")))
threadlocal.dev_apns = APNs(use_sandbox=sandbox, cert_file=cert_file, key_file=key_file, enhanced=True)
def response_listener(error_response):
log.debug("client get error-response: " + str(error_response))
threadlocal.dev_apns.gateway_server.register_response_listener(response_listener)
return threadlocal.dev_apns
def get_gcm():
if not hasattr(threadlocal, "gcm"):
settings = get_settings()
# JSON request
API_KEY = os.environ.get("GCM_API_KEY", settings.get("gcm.api_key"))
threadlocal.gcm = GCM(API_KEY)
return threadlocal.gcm
def prod_apns_feedback():
apns_feedback(get_prod_apns(), "prod_")
def dev_apns_feedback():
apns_feedback(get_dev_apns(), "dev_")
def apns_feedback(apns, prefix):
# Get feedback messages.
uS = update_connection()
for (token_hex, fail_time) in apns.feedback_server.items():
try:
if not isinstance(token_hex, str):
token_hex = token_hex.decode("utf8")
token_hex = prefix + token_hex
log.debug("APNS Feedback Entry: %s", token_hex + "_" + str(fail_time))
# do stuff with token_hex and fail_time
q = t_subject_device.select().where(t_subject_device.c.push_id==token_hex)
rows = uS.execute(q).fetchall()
for device in rows:
log.debug("APNSPushID found in Database: %s", token_hex)
if fail_time > device["registered"]:
log.debug("Fail-Time is before Registered-At")
uS.execute(t_subject_device.delete().where(
t_subject_device.c.device_id == device["device_id"],
t_subject_device.c.subject_id == device["subject_id_id"],
))
except:
log.exception("Processing APNS Feedback failed for an entry.")
def gcm_feedback(response):
# Successfully handled registration_ids
if 'success' in response:
for reg_id, success_id in response['success'].items():
log.debug('Successfully sent notification for reg_id {0}'.format(reg_id))
# Handling errors
if 'errors' in response:
for error, reg_ids in response['errors'].items():
# Check for errors and act accordingly
if error in ['NotRegistered', 'InvalidRegistration']:
# Remove reg_ids from database
for reg_id in reg_ids:
q = t_subject_device.delete().where(t_subject_device.c.push_id == reg_id)
DBSession.execute(q)
# Repace reg_id with canonical_id in your database
if 'canonical' in response:
for reg_id, canonical_id in response['canonical'].items():
if not isinstance(reg_id, str):
reg_id = reg_id.decode("utf8")
log.debug("Replacing reg_id: {0} with canonical_id: {1} in db".format(reg_id, canonical_id))
q = t_subject_device.update().values({
"push_id" : canonical_id
}).where(t_subject_device.c.push_id == reg_id)
DBSession.execute(q)
DBSession.flush()
def send_push_message(
subject_id,
text="",
custom_payload={},
title="Gamification-Engine",
android_text=None,
ios_text=None):
message_count = DBSession.execute(select([func.count("*").label("c")],from_obj=t_subject_messages).where(and_(
t_subject_messages.c.subject_id == subject_id,
t_subject_messages.c.is_read == False
))).scalar()
data = dict({"title": title,
"badge": message_count}, **custom_payload)
settings = get_settings()
if not ios_text:
ios_text = text
if not android_text:
android_text = text
rows = DBSession.execute(select([t_subject_device.c.push_id, t_subject_device.c.device_os], from_obj=t_subject_device).distinct().where(t_subject_device.c.subject_id==subject_id)).fetchall()
for device in rows:
if "ios" in device.device_os.lower():
identifier = random.getrandbits(32)
if custom_payload:
payload = Payload(alert=ios_text, custom=data, badge=message_count, sound="default")
else:
payload = Payload(alert=ios_text, custom=data, badge=message_count, sound="default")
log.debug("Sending Push message to User (ID: %s)", subject_id)
if device.push_id.startswith("prod_"):
get_prod_apns().gateway_server.send_notification(device.push_id[5:], payload, identifier=identifier)
elif device.push_id.startswith("dev_"):
get_dev_apns().gateway_server.send_notification(device.push_id[4:], payload, identifier=identifier)
if "android" in device.device_os.lower():
log.debug("Sending Push message to User (ID: %s)", subject_id)
push_id = lstrip_word(device.push_id, "dev_")
push_id = lstrip_word(push_id, "prod_")
response = get_gcm().json_request(registration_ids=[push_id, ],
data={"message": android_text, "data": data, "title": title},
restricted_package_name=os.environ.get("GCM_PACKAGE", settings.get("gcm.package","")),
priority='high',
delay_while_idle=False)
if response:
gcm_feedback(response)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Transformed Distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import identity_bijector
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
__all__ = [
"TransformedDistribution",
]
# The following helper functions attempt to statically perform a TF operation.
# These functions make debugging easier since we can do more validation during
# graph construction.
def _static_value(x):
"""Returns the static value of a `Tensor` or `None`."""
return tensor_util.constant_value(ops.convert_to_tensor(x))
def _logical_and(*args):
"""Convenience function which attempts to statically `reduce_all`."""
args_ = [_static_value(x) for x in args]
if any(x is not None and not bool(x) for x in args_):
return constant_op.constant(False)
if all(x is not None and bool(x) for x in args_):
return constant_op.constant(True)
if len(args) == 2:
return math_ops.logical_and(*args)
return math_ops.reduce_all(args)
def _logical_equal(x, y):
"""Convenience function which attempts to statically compute `x == y`."""
x_ = _static_value(x)
y_ = _static_value(y)
if x_ is None or y_ is None:
return math_ops.equal(x, y)
return constant_op.constant(np.array_equal(x_, y_))
def _logical_not(x):
"""Convenience function which attempts to statically apply `logical_not`."""
x_ = _static_value(x)
if x_ is None:
return math_ops.logical_not(x)
return constant_op.constant(np.logical_not(x_))
def _concat_vectors(*args):
"""Convenience function which concatenates input vectors."""
args_ = [_static_value(x) for x in args]
if any(x_ is None for x_ in args_):
return array_ops.concat(args, 0)
return constant_op.constant([x_ for vec_ in args_ for x_ in vec_])
def _pick_scalar_condition(pred, cond_true, cond_false):
"""Convenience function which chooses the condition based on the predicate."""
# Note: This function is only valid if all of pred, cond_true, and cond_false
# are scalars. This means its semantics are arguably more like tf.cond than
# tf.select even though we use tf.select to implement it.
pred_ = _static_value(pred)
if pred_ is None:
return array_ops.where(pred, cond_true, cond_false)
return cond_true if pred_ else cond_false
def _ones_like(x):
"""Convenience function attempts to statically construct `ones_like`."""
# Should only be used for small vectors.
if x.get_shape().is_fully_defined():
return array_ops.ones(x.get_shape().as_list(), dtype=x.dtype)
return array_ops.ones_like(x)
def _ndims_from_shape(shape):
"""Returns `Tensor`'s `rank` implied by a `Tensor` shape."""
if shape.get_shape().ndims not in (None, 1):
raise ValueError("input is not a valid shape: not 1D")
if not shape.dtype.is_integer:
raise TypeError("input is not a valid shape: wrong dtype")
if shape.get_shape().is_fully_defined():
return constant_op.constant(shape.get_shape().as_list()[0])
return array_ops.shape(shape)[0]
def _is_scalar_from_shape(shape):
"""Returns `True` `Tensor` if `Tensor` shape implies a scalar."""
return _logical_equal(_ndims_from_shape(shape), 0)
class TransformedDistribution(distribution_lib.Distribution):
"""A Transformed Distribution.
A `TransformedDistribution` models `p(y)` given a base distribution `p(x)`,
and a deterministic, invertible, differentiable transform, `Y = g(X)`. The
transform is typically an instance of the `Bijector` class and the base
distribution is typically an instance of the `Distribution` class.
A `Bijector` is expected to implement the following functions:
- `forward`,
- `inverse`,
- `inverse_log_det_jacobian`.
The semantics of these functions are outlined in the `Bijector` documentation.
We now describe how a `TransformedDistribution` alters the input/outputs of a
`Distribution` associated with a random variable (rv) `X`.
Write `cdf(Y=y)` for an absolutely continuous cumulative distribution function
of random variable `Y`; write the probability density function `pdf(Y=y) :=
d^k / (dy_1,...,dy_k) cdf(Y=y)` for its derivative wrt to `Y` evaluated at
`y`. Assume that `Y = g(X)` where `g` is a deterministic diffeomorphism,
i.e., a non-random, continuous, differentiable, and invertible function.
Write the inverse of `g` as `X = g^{-1}(Y)` and `(J o g)(x)` for the Jacobian
of `g` evaluated at `x`.
A `TransformedDistribution` implements the following operations:
* `sample`
Mathematically: `Y = g(X)`
Programmatically: `bijector.forward(distribution.sample(...))`
* `log_prob`
Mathematically: `(log o pdf)(Y=y) = (log o pdf o g^{-1})(y)
+ (log o abs o det o J o g^{-1})(y)`
Programmatically: `(distribution.log_prob(bijector.inverse(y))
+ bijector.inverse_log_det_jacobian(y))`
* `log_cdf`
Mathematically: `(log o cdf)(Y=y) = (log o cdf o g^{-1})(y)`
Programmatically: `distribution.log_cdf(bijector.inverse(x))`
* and similarly for: `cdf`, `prob`, `log_survival_function`,
`survival_function`.
A simple example constructing a Log-Normal distribution from a Normal
distribution:
```python
ds = tf.contrib.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Exp(),
name="LogNormalTransformedDistribution")
```
A `LogNormal` made from callables:
```python
ds = tf.contrib.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Inline(
forward_fn=tf.exp,
inverse_fn=tf.log,
inverse_log_det_jacobian_fn=(
lambda y: -tf.reduce_sum(tf.log(y), axis=-1)),
name="LogNormalTransformedDistribution")
```
Another example constructing a Normal from a StandardNormal:
```python
ds = tf.contrib.distributions
normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Affine(
shift=-1.,
scale_identity_multiplier=2.)
name="NormalTransformedDistribution")
```
A `TransformedDistribution`'s batch- and event-shape are implied by the base
distribution unless explicitly overridden by `batch_shape` or `event_shape`
arguments. Specifying an overriding `batch_shape` (`event_shape`) is
permitted only if the base distribution has scalar batch-shape (event-shape).
The bijector is applied to the distribution as if the distribution possessed
the overridden shape(s). The following example demonstrates how to construct a
multivariate Normal as a `TransformedDistribution`.
```python
ds = tf.contrib.distributions
# We will create two MVNs with batch_shape = event_shape = 2.
mean = [[-1., 0], # batch:0
[0., 1]] # batch:1
chol_cov = [[[1., 0],
[0, 1]], # batch:0
[[1, 0],
[2, 2]]] # batch:1
mvn1 = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Affine(shift=mean, scale_tril=chol_cov),
batch_shape=[2], # Valid because base_distribution.batch_shape == [].
event_shape=[2]) # Valid because base_distribution.event_shape == [].
mvn2 = ds.MultivariateNormalTriL(loc=mean, scale_tril=chol_cov)
# mvn1.log_prob(x) == mvn2.log_prob(x)
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
distribution,
bijector=None,
batch_shape=None,
event_shape=None,
validate_args=False,
name=None):
"""Construct a Transformed Distribution.
Args:
distribution: The base distribution instance to transform. Typically an
instance of `Distribution`.
bijector: The object responsible for calculating the transformation.
Typically an instance of `Bijector`. `None` means `Identity()`.
batch_shape: `integer` vector `Tensor` which overrides `distribution`
`batch_shape`; valid only if `distribution.is_scalar_batch()`.
event_shape: `integer` vector `Tensor` which overrides `distribution`
`event_shape`; valid only if `distribution.is_scalar_event()`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class. Default:
`bijector.name + distribution.name`.
"""
parameters = dict(locals())
name = name or (("" if bijector is None else bijector.name) +
distribution.name)
with ops.name_scope(name, values=[event_shape, batch_shape]) as name:
# For convenience we define some handy constants.
self._zero = constant_op.constant(0, dtype=dtypes.int32, name="zero")
self._empty = constant_op.constant([], dtype=dtypes.int32, name="empty")
if bijector is None:
bijector = identity_bijector.Identity(validate_args=validate_args)
# We will keep track of a static and dynamic version of
# self._is_{batch,event}_override. This way we can do more prior to graph
# execution, including possibly raising Python exceptions.
self._override_batch_shape = self._maybe_validate_shape_override(
batch_shape, distribution.is_scalar_batch(), validate_args,
"batch_shape")
self._is_batch_override = _logical_not(_logical_equal(
_ndims_from_shape(self._override_batch_shape), self._zero))
self._is_maybe_batch_override = bool(
tensor_util.constant_value(self._override_batch_shape) is None or
tensor_util.constant_value(self._override_batch_shape).size != 0)
self._override_event_shape = self._maybe_validate_shape_override(
event_shape, distribution.is_scalar_event(), validate_args,
"event_shape")
self._is_event_override = _logical_not(_logical_equal(
_ndims_from_shape(self._override_event_shape), self._zero))
self._is_maybe_event_override = bool(
tensor_util.constant_value(self._override_event_shape) is None or
tensor_util.constant_value(self._override_event_shape).size != 0)
# To convert a scalar distribution into a multivariate distribution we
# will draw dims from the sample dims, which are otherwise iid. This is
# easy to do except in the case that the base distribution has batch dims
# and we're overriding event shape. When that case happens the event dims
# will incorrectly be to the left of the batch dims. In this case we'll
# cyclically permute left the new dims.
self._needs_rotation = _logical_and(
self._is_event_override,
_logical_not(self._is_batch_override),
_logical_not(distribution.is_scalar_batch()))
override_event_ndims = _ndims_from_shape(self._override_event_shape)
self._rotate_ndims = _pick_scalar_condition(
self._needs_rotation, override_event_ndims, 0)
# We'll be reducing the head dims (if at all), i.e., this will be []
# if we don't need to reduce.
self._reduce_event_indices = math_ops.range(
self._rotate_ndims - override_event_ndims, self._rotate_ndims)
self._distribution = distribution
self._bijector = bijector
super(TransformedDistribution, self).__init__(
dtype=self._distribution.dtype,
reparameterization_type=self._distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=self._distribution.allow_nan_stats,
parameters=parameters,
# We let TransformedDistribution access _graph_parents since this class
# is more like a baseclass than derived.
graph_parents=(distribution._graph_parents + # pylint: disable=protected-access
bijector.graph_parents),
name=name)
@property
def distribution(self):
"""Base distribution, p(x)."""
return self._distribution
@property
def bijector(self):
"""Function transforming x => y."""
return self._bijector
def _event_shape_tensor(self):
return self.bijector.forward_event_shape_tensor(
distribution_util.pick_vector(
self._is_event_override,
self._override_event_shape,
self.distribution.event_shape_tensor()))
def _event_shape(self):
# If there's a chance that the event_shape has been overridden, we return
# what we statically know about the `event_shape_override`. This works
# because: `_is_maybe_event_override` means `static_override` is `None` or a
# non-empty list, i.e., we don't statically know the `event_shape` or we do.
#
# Since the `bijector` may change the `event_shape`, we then forward what we
# know to the bijector. This allows the `bijector` to have final say in the
# `event_shape`.
static_override = tensor_util.constant_value_as_shape(
self._override_event_shape)
return self.bijector.forward_event_shape(
static_override
if self._is_maybe_event_override
else self.distribution.event_shape)
def _batch_shape_tensor(self):
return distribution_util.pick_vector(
self._is_batch_override,
self._override_batch_shape,
self.distribution.batch_shape_tensor())
def _batch_shape(self):
# If there's a chance that the batch_shape has been overridden, we return
# what we statically know about the `batch_shape_override`. This works
# because: `_is_maybe_batch_override` means `static_override` is `None` or a
# non-empty list, i.e., we don't statically know the `batch_shape` or we do.
#
# Notice that this implementation parallels the `_event_shape` except that
# the `bijector` doesn't get to alter the `batch_shape`. Recall that
# `batch_shape` is a property of a distribution while `event_shape` is
# shared between both the `distribution` instance and the `bijector`.
static_override = tensor_util.constant_value_as_shape(
self._override_batch_shape)
return (static_override
if self._is_maybe_batch_override
else self.distribution.batch_shape)
def _sample_n(self, n, seed=None):
sample_shape = _concat_vectors(
distribution_util.pick_vector(self._needs_rotation, self._empty, [n]),
self._override_batch_shape,
self._override_event_shape,
distribution_util.pick_vector(self._needs_rotation, [n], self._empty))
x = self.distribution.sample(sample_shape=sample_shape, seed=seed)
x = self._maybe_rotate_dims(x)
# We'll apply the bijector in the `_call_sample_n` function.
return x
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
# We override `_call_sample_n` rather than `_sample_n` so we can ensure that
# the result of `self.bijector.forward` is not modified (and thus caching
# works).
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
# First, generate samples. We will possibly generate extra samples in the
# event that we need to reinterpret the samples as part of the
# event_shape.
x = self._sample_n(n, seed, **kwargs)
# Next, we reshape `x` into its final form. We do this prior to the call
# to the bijector to ensure that the bijector caching works.
batch_event_shape = array_ops.shape(x)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
x = array_ops.reshape(x, final_shape)
# Finally, we apply the bijector's forward transformation. For caching to
# work, it is imperative that this is the last modification to the
# returned result.
y = self.bijector.forward(x, **kwargs)
y = self._set_sample_static_shape(y, sample_shape)
return y
def _log_prob(self, y):
# For caching to work, it is imperative that the bijector is the first to
# modify the input.
x = self.bijector.inverse(y)
event_ndims = self._maybe_get_static_event_ndims()
ildj = self.bijector.inverse_log_det_jacobian(y, event_ndims=event_ndims)
if self.bijector._is_injective: # pylint: disable=protected-access
return self._finish_log_prob_for_one_fiber(y, x, ildj, event_ndims)
lp_on_fibers = [
self._finish_log_prob_for_one_fiber(y, x_i, ildj_i, event_ndims)
for x_i, ildj_i in zip(x, ildj)]
return math_ops.reduce_logsumexp(array_ops.stack(lp_on_fibers), axis=0)
def _finish_log_prob_for_one_fiber(self, y, x, ildj, event_ndims):
"""Finish computation of log_prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
log_prob = self.distribution.log_prob(x)
if self._is_maybe_event_override:
log_prob = math_ops.reduce_sum(log_prob, self._reduce_event_indices)
log_prob += math_ops.cast(ildj, log_prob.dtype)
if self._is_maybe_event_override and isinstance(event_ndims, int):
log_prob.set_shape(
array_ops.broadcast_static_shape(
y.get_shape().with_rank_at_least(1)[:-event_ndims],
self.batch_shape))
return log_prob
def _prob(self, y):
x = self.bijector.inverse(y)
event_ndims = self._maybe_get_static_event_ndims()
ildj = self.bijector.inverse_log_det_jacobian(y, event_ndims=event_ndims)
if self.bijector._is_injective: # pylint: disable=protected-access
return self._finish_prob_for_one_fiber(y, x, ildj, event_ndims)
prob_on_fibers = [
self._finish_prob_for_one_fiber(y, x_i, ildj_i, event_ndims)
for x_i, ildj_i in zip(x, ildj)]
return sum(prob_on_fibers)
def _finish_prob_for_one_fiber(self, y, x, ildj, event_ndims):
"""Finish computation of prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
prob = self.distribution.prob(x)
if self._is_maybe_event_override:
prob = math_ops.reduce_prod(prob, self._reduce_event_indices)
prob *= math_ops.exp(math_ops.cast(ildj, prob.dtype))
if self._is_maybe_event_override and isinstance(event_ndims, int):
prob.set_shape(
array_ops.broadcast_static_shape(
y.get_shape().with_rank_at_least(1)[:-event_ndims],
self.batch_shape))
return prob
def _log_cdf(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("log_cdf is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("log_cdf is not implemented when "
"bijector is not injective.")
x = self.bijector.inverse(y)
return self.distribution.log_cdf(x)
def _cdf(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("cdf is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("cdf is not implemented when "
"bijector is not injective.")
x = self.bijector.inverse(y)
return self.distribution.cdf(x)
def _log_survival_function(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("log_survival_function is not implemented when "
"overriding event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("log_survival_function is not implemented when "
"bijector is not injective.")
x = self.bijector.inverse(y)
return self.distribution.log_survival_function(x)
def _survival_function(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("survival_function is not implemented when "
"overriding event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("survival_function is not implemented when "
"bijector is not injective.")
x = self.bijector.inverse(y)
return self.distribution.survival_function(x)
def _quantile(self, value):
if self._is_maybe_event_override:
raise NotImplementedError("quantile is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("quantile is not implemented when "
"bijector is not injective.")
# x_q is the "qth quantile" of X iff q = P[X <= x_q]. Now, since X =
# g^{-1}(Y), q = P[X <= x_q] = P[g^{-1}(Y) <= x_q] = P[Y <= g(x_q)],
# implies the qth quantile of Y is g(x_q).
inv_cdf = self.distribution.quantile(value)
return self.bijector.forward(inv_cdf)
def _entropy(self):
if not self.bijector.is_constant_jacobian:
raise NotImplementedError("entropy is not implemented")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("entropy is not implemented when "
"bijector is not injective.")
# Suppose Y = g(X) where g is a diffeomorphism and X is a continuous rv. It
# can be shown that:
# H[Y] = H[X] + E_X[(log o abs o det o J o g)(X)].
# If is_constant_jacobian then:
# E_X[(log o abs o det o J o g)(X)] = (log o abs o det o J o g)(c)
# where c can by anything.
entropy = self.distribution.entropy()
if self._is_maybe_event_override:
# H[X] = sum_i H[X_i] if X_i are mutually independent.
# This means that a reduce_sum is a simple rescaling.
entropy *= math_ops.cast(math_ops.reduce_prod(self._override_event_shape),
dtype=entropy.dtype.base_dtype)
if self._is_maybe_batch_override:
new_shape = array_ops.concat([
_ones_like(self._override_batch_shape),
self.distribution.batch_shape_tensor()
], 0)
entropy = array_ops.reshape(entropy, new_shape)
multiples = array_ops.concat([
self._override_batch_shape,
_ones_like(self.distribution.batch_shape_tensor())
], 0)
entropy = array_ops.tile(entropy, multiples)
dummy = array_ops.zeros(
shape=array_ops.concat(
[self.batch_shape_tensor(), self.event_shape_tensor()],
0),
dtype=self.dtype)
event_ndims = (self.event_shape.ndims if self.event_shape.ndims is not None
else array_ops.size(self.event_shape_tensor()))
ildj = self.bijector.inverse_log_det_jacobian(
dummy, event_ndims=event_ndims)
entropy -= math_ops.cast(ildj, entropy.dtype)
entropy.set_shape(self.batch_shape)
return entropy
def _maybe_validate_shape_override(self, override_shape, base_is_scalar,
validate_args, name):
"""Helper to __init__ which ensures override batch/event_shape are valid."""
if override_shape is None:
override_shape = []
override_shape = ops.convert_to_tensor(override_shape, dtype=dtypes.int32,
name=name)
if not override_shape.dtype.is_integer:
raise TypeError("shape override must be an integer")
override_is_scalar = _is_scalar_from_shape(override_shape)
if tensor_util.constant_value(override_is_scalar):
return self._empty
dynamic_assertions = []
if override_shape.get_shape().ndims is not None:
if override_shape.get_shape().ndims != 1:
raise ValueError("shape override must be a vector")
elif validate_args:
dynamic_assertions += [check_ops.assert_rank(
override_shape, 1,
message="shape override must be a vector")]
if tensor_util.constant_value(override_shape) is not None:
if any(s <= 0 for s in tensor_util.constant_value(override_shape)):
raise ValueError("shape override must have positive elements")
elif validate_args:
dynamic_assertions += [check_ops.assert_positive(
override_shape,
message="shape override must have positive elements")]
is_both_nonscalar = _logical_and(_logical_not(base_is_scalar),
_logical_not(override_is_scalar))
if tensor_util.constant_value(is_both_nonscalar) is not None:
if tensor_util.constant_value(is_both_nonscalar):
raise ValueError("base distribution not scalar")
elif validate_args:
dynamic_assertions += [check_ops.assert_equal(
is_both_nonscalar, False,
message="base distribution not scalar")]
if not dynamic_assertions:
return override_shape
return control_flow_ops.with_dependencies(
dynamic_assertions, override_shape)
def _maybe_rotate_dims(self, x, rotate_right=False):
"""Helper which rolls left event_dims left or right event_dims right."""
needs_rotation_const = tensor_util.constant_value(self._needs_rotation)
if needs_rotation_const is not None and not needs_rotation_const:
return x
ndims = array_ops.rank(x)
n = (ndims - self._rotate_ndims) if rotate_right else self._rotate_ndims
return array_ops.transpose(
x, _concat_vectors(math_ops.range(n, ndims), math_ops.range(0, n)))
def _maybe_get_static_event_ndims(self):
if self.event_shape.ndims is not None:
return self.event_shape.ndims
event_ndims = array_ops.size(self.event_shape_tensor())
event_ndims_ = distribution_util.maybe_get_static_value(event_ndims)
if event_ndims_ is not None:
return event_ndims_
return event_ndims
|
|
# Copyright 2018-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import logging
import os
import os.path
import time
import uuid
import warnings
from collections import deque
from io import SEEK_END, BytesIO
from typing import (
AnyStr,
Callable,
Dict,
Iterable,
MutableMapping,
Optional,
Tuple,
Type,
Union,
)
from unittest.mock import Mock
import attr
from typing_extensions import Deque
from zope.interface import implementer
from twisted.internet import address, threads, udp
from twisted.internet._resolver import SimpleResolverComplexifier
from twisted.internet.defer import Deferred, fail, maybeDeferred, succeed
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import (
IAddress,
IHostnameResolver,
IProtocol,
IPullProducer,
IPushProducer,
IReactorPluggableNameResolver,
IReactorTime,
IResolverSimple,
ITransport,
)
from twisted.python.failure import Failure
from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
from twisted.web.http_headers import Headers
from twisted.web.resource import IResource
from twisted.web.server import Request, Site
from synapse.config.database import DatabaseConnectionConfig
from synapse.http.site import SynapseRequest
from synapse.server import HomeServer
from synapse.storage import DataStore
from synapse.storage.engines import PostgresEngine, create_engine
from synapse.types import JsonDict
from synapse.util import Clock
from tests.utils import (
LEAVE_DB,
POSTGRES_BASE_DB,
POSTGRES_HOST,
POSTGRES_PASSWORD,
POSTGRES_USER,
SQLITE_PERSIST_DB,
USE_POSTGRES_FOR_TESTS,
MockClock,
default_config,
)
logger = logging.getLogger(__name__)
class TimedOutException(Exception):
"""
A web query timed out.
"""
@attr.s
class FakeChannel:
"""
A fake Twisted Web Channel (the part that interfaces with the
wire).
"""
site = attr.ib(type=Union[Site, "FakeSite"])
_reactor = attr.ib()
result = attr.ib(type=dict, default=attr.Factory(dict))
_ip = attr.ib(type=str, default="127.0.0.1")
_producer: Optional[Union[IPullProducer, IPushProducer]] = None
@property
def json_body(self):
return json.loads(self.text_body)
@property
def text_body(self) -> str:
"""The body of the result, utf-8-decoded.
Raises an exception if the request has not yet completed.
"""
if not self.is_finished:
raise Exception("Request not yet completed")
return self.result["body"].decode("utf8")
def is_finished(self) -> bool:
"""check if the response has been completely received"""
return self.result.get("done", False)
@property
def code(self):
if not self.result:
raise Exception("No result yet.")
return int(self.result["code"])
@property
def headers(self) -> Headers:
if not self.result:
raise Exception("No result yet.")
h = Headers()
for i in self.result["headers"]:
h.addRawHeader(*i)
return h
def writeHeaders(self, version, code, reason, headers):
self.result["version"] = version
self.result["code"] = code
self.result["reason"] = reason
self.result["headers"] = headers
def write(self, content):
assert isinstance(content, bytes), "Should be bytes! " + repr(content)
if "body" not in self.result:
self.result["body"] = b""
self.result["body"] += content
def registerProducer(self, producer, streaming):
self._producer = producer
self.producerStreaming = streaming
def _produce():
if self._producer:
self._producer.resumeProducing()
self._reactor.callLater(0.1, _produce)
if not streaming:
self._reactor.callLater(0.0, _produce)
def unregisterProducer(self):
if self._producer is None:
return
self._producer = None
def requestDone(self, _self):
self.result["done"] = True
def getPeer(self):
# We give an address so that getClientIP returns a non null entry,
# causing us to record the MAU
return address.IPv4Address("TCP", self._ip, 3423)
def getHost(self):
# this is called by Request.__init__ to configure Request.host.
return address.IPv4Address("TCP", "127.0.0.1", 8888)
def isSecure(self):
return False
@property
def transport(self):
return self
def await_result(self, timeout_ms: int = 1000) -> None:
"""
Wait until the request is finished.
"""
end_time = self._reactor.seconds() + timeout_ms / 1000.0
self._reactor.run()
while not self.is_finished():
# If there's a producer, tell it to resume producing so we get content
if self._producer:
self._producer.resumeProducing()
if self._reactor.seconds() > end_time:
raise TimedOutException("Timed out waiting for request to finish.")
self._reactor.advance(0.1)
def extract_cookies(self, cookies: MutableMapping[str, str]) -> None:
"""Process the contents of any Set-Cookie headers in the response
Any cookines found are added to the given dict
"""
headers = self.headers.getRawHeaders("Set-Cookie")
if not headers:
return
for h in headers:
parts = h.split(";")
k, v = parts[0].split("=", maxsplit=1)
cookies[k] = v
class FakeSite:
"""
A fake Twisted Web Site, with mocks of the extra things that
Synapse adds.
"""
server_version_string = b"1"
site_tag = "test"
access_logger = logging.getLogger("synapse.access.http.fake")
def __init__(self, resource: IResource, reactor: IReactorTime):
"""
Args:
resource: the resource to be used for rendering all requests
"""
self._resource = resource
self.reactor = reactor
def getResourceFor(self, request):
return self._resource
def make_request(
reactor,
site: Union[Site, FakeSite],
method: Union[bytes, str],
path: Union[bytes, str],
content: Union[bytes, str, JsonDict] = b"",
access_token: Optional[str] = None,
request: Type[Request] = SynapseRequest,
shorthand: bool = True,
federation_auth_origin: Optional[bytes] = None,
content_is_form: bool = False,
await_result: bool = True,
custom_headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = None,
client_ip: str = "127.0.0.1",
) -> FakeChannel:
"""
Make a web request using the given method, path and content, and render it
Returns the fake Channel object which records the response to the request.
Args:
reactor:
site: The twisted Site to use to render the request
method: The HTTP request method ("verb").
path: The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces and such).
content: The body of the request. JSON-encoded, if a str of bytes.
access_token: The access token to add as authorization for the request.
request: The request class to create.
shorthand: Whether to try and be helpful and prefix the given URL
with the usual REST API path, if it doesn't contain it.
federation_auth_origin: if set to not-None, we will add a fake
Authorization header pretenting to be the given server name.
content_is_form: Whether the content is URL encoded form data. Adds the
'Content-Type': 'application/x-www-form-urlencoded' header.
await_result: whether to wait for the request to complete rendering. If true,
will pump the reactor until the the renderer tells the channel the request
is finished.
custom_headers: (name, value) pairs to add as request headers
client_ip: The IP to use as the requesting IP. Useful for testing
ratelimiting.
Returns:
channel
"""
if not isinstance(method, bytes):
method = method.encode("ascii")
if not isinstance(path, bytes):
path = path.encode("ascii")
# Decorate it to be the full path, if we're using shorthand
if (
shorthand
and not path.startswith(b"/_matrix")
and not path.startswith(b"/_synapse")
):
if path.startswith(b"/"):
path = path[1:]
path = b"/_matrix/client/r0/" + path
if not path.startswith(b"/"):
path = b"/" + path
if isinstance(content, dict):
content = json.dumps(content).encode("utf8")
if isinstance(content, str):
content = content.encode("utf8")
channel = FakeChannel(site, reactor, ip=client_ip)
req = request(channel, site)
req.content = BytesIO(content)
# Twisted expects to be at the end of the content when parsing the request.
req.content.seek(0, SEEK_END)
if access_token:
req.requestHeaders.addRawHeader(
b"Authorization", b"Bearer " + access_token.encode("ascii")
)
if federation_auth_origin is not None:
req.requestHeaders.addRawHeader(
b"Authorization",
b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin,),
)
if content:
if content_is_form:
req.requestHeaders.addRawHeader(
b"Content-Type", b"application/x-www-form-urlencoded"
)
else:
# Assume the body is JSON
req.requestHeaders.addRawHeader(b"Content-Type", b"application/json")
if custom_headers:
for k, v in custom_headers:
req.requestHeaders.addRawHeader(k, v)
req.parseCookies()
req.requestReceived(method, path, b"1.1")
if await_result:
channel.await_result()
return channel
@implementer(IReactorPluggableNameResolver)
class ThreadedMemoryReactorClock(MemoryReactorClock):
"""
A MemoryReactorClock that supports callFromThread.
"""
def __init__(self):
self.threadpool = ThreadPool(self)
self._tcp_callbacks: Dict[Tuple[str, int], Callable] = {}
self._udp = []
self.lookups: Dict[str, str] = {}
self._thread_callbacks: Deque[Callable[[], None]] = deque()
lookups = self.lookups
@implementer(IResolverSimple)
class FakeResolver:
def getHostByName(self, name, timeout=None):
if name not in lookups:
return fail(DNSLookupError("OH NO: unknown %s" % (name,)))
return succeed(lookups[name])
self.nameResolver = SimpleResolverComplexifier(FakeResolver())
super().__init__()
def installNameResolver(self, resolver: IHostnameResolver) -> IHostnameResolver:
raise NotImplementedError()
def listenUDP(self, port, protocol, interface="", maxPacketSize=8196):
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
self._udp.append(p)
return p
def callFromThread(self, callback, *args, **kwargs):
"""
Make the callback fire in the next reactor iteration.
"""
cb = lambda: callback(*args, **kwargs)
# it's not safe to call callLater() here, so we append the callback to a
# separate queue.
self._thread_callbacks.append(cb)
def getThreadPool(self):
return self.threadpool
def add_tcp_client_callback(self, host: str, port: int, callback: Callable):
"""Add a callback that will be invoked when we receive a connection
attempt to the given IP/port using `connectTCP`.
Note that the callback gets run before we return the connection to the
client, which means callbacks cannot block while waiting for writes.
"""
self._tcp_callbacks[(host, port)] = callback
def connectTCP(self, host: str, port: int, factory, timeout=30, bindAddress=None):
"""Fake L{IReactorTCP.connectTCP}."""
conn = super().connectTCP(
host, port, factory, timeout=timeout, bindAddress=None
)
callback = self._tcp_callbacks.get((host, port))
if callback:
callback()
return conn
def advance(self, amount):
# first advance our reactor's time, and run any "callLater" callbacks that
# makes ready
super().advance(amount)
# now run any "callFromThread" callbacks
while True:
try:
callback = self._thread_callbacks.popleft()
except IndexError:
break
callback()
# check for more "callLater" callbacks added by the thread callback
# This isn't required in a regular reactor, but it ends up meaning that
# our database queries can complete in a single call to `advance` [1] which
# simplifies tests.
#
# [1]: we replace the threadpool backing the db connection pool with a
# mock ThreadPool which doesn't really use threads; but we still use
# reactor.callFromThread to feed results back from the db functions to the
# main thread.
super().advance(0)
class ThreadPool:
"""
Threadless thread pool.
"""
def __init__(self, reactor):
self._reactor = reactor
def start(self):
pass
def stop(self):
pass
def callInThreadWithCallback(self, onResult, function, *args, **kwargs):
def _(res):
if isinstance(res, Failure):
onResult(False, res)
else:
onResult(True, res)
d = Deferred()
d.addCallback(lambda x: function(*args, **kwargs))
d.addBoth(_)
self._reactor.callLater(0, d.callback, True)
return d
def _make_test_homeserver_synchronous(server: HomeServer) -> None:
"""
Make the given test homeserver's database interactions synchronous.
"""
clock = server.get_clock()
for database in server.get_datastores().databases:
pool = database._db_pool
def runWithConnection(func, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runWithConnection,
func,
*args,
**kwargs,
)
def runInteraction(interaction, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runInteraction,
interaction,
*args,
**kwargs,
)
pool.runWithConnection = runWithConnection
pool.runInteraction = runInteraction
# Replace the thread pool with a threadless 'thread' pool
pool.threadpool = ThreadPool(clock._reactor)
pool.running = True
# We've just changed the Databases to run DB transactions on the same
# thread, so we need to disable the dedicated thread behaviour.
server.get_datastores().main.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING = False
def get_clock() -> Tuple[ThreadedMemoryReactorClock, Clock]:
clock = ThreadedMemoryReactorClock()
hs_clock = Clock(clock)
return clock, hs_clock
@implementer(ITransport)
@attr.s(cmp=False)
class FakeTransport:
"""
A twisted.internet.interfaces.ITransport implementation which sends all its data
straight into an IProtocol object: it exists to connect two IProtocols together.
To use it, instantiate it with the receiving IProtocol, and then pass it to the
sending IProtocol's makeConnection method:
server = HTTPChannel()
client.makeConnection(FakeTransport(server, self.reactor))
If you want bidirectional communication, you'll need two instances.
"""
other = attr.ib()
"""The Protocol object which will receive any data written to this transport.
:type: twisted.internet.interfaces.IProtocol
"""
_reactor = attr.ib()
"""Test reactor
:type: twisted.internet.interfaces.IReactorTime
"""
_protocol = attr.ib(default=None)
"""The Protocol which is producing data for this transport. Optional, but if set
will get called back for connectionLost() notifications etc.
"""
_peer_address: Optional[IAddress] = attr.ib(default=None)
"""The value to be returend by getPeer"""
disconnecting = False
disconnected = False
connected = True
buffer = attr.ib(default=b"")
producer = attr.ib(default=None)
autoflush = attr.ib(default=True)
def getPeer(self):
return self._peer_address
def getHost(self):
return None
def loseConnection(self, reason=None):
if not self.disconnecting:
logger.info("FakeTransport: loseConnection(%s)", reason)
self.disconnecting = True
if self._protocol:
self._protocol.connectionLost(reason)
# if we still have data to write, delay until that is done
if self.buffer:
logger.info(
"FakeTransport: Delaying disconnect until buffer is flushed"
)
else:
self.connected = False
self.disconnected = True
def abortConnection(self):
logger.info("FakeTransport: abortConnection()")
if not self.disconnecting:
self.disconnecting = True
if self._protocol:
self._protocol.connectionLost(None)
self.disconnected = True
def pauseProducing(self):
if not self.producer:
return
self.producer.pauseProducing()
def resumeProducing(self):
if not self.producer:
return
self.producer.resumeProducing()
def unregisterProducer(self):
if not self.producer:
return
self.producer = None
def registerProducer(self, producer, streaming):
self.producer = producer
self.producerStreaming = streaming
def _produce():
if not self.producer:
# we've been unregistered
return
# some implementations of IProducer (for example, FileSender)
# don't return a deferred.
d = maybeDeferred(self.producer.resumeProducing)
d.addCallback(lambda x: self._reactor.callLater(0.1, _produce))
if not streaming:
self._reactor.callLater(0.0, _produce)
def write(self, byt):
if self.disconnecting:
raise Exception("Writing to disconnecting FakeTransport")
self.buffer = self.buffer + byt
# always actually do the write asynchronously. Some protocols (notably the
# TLSMemoryBIOProtocol) get very confused if a read comes back while they are
# still doing a write. Doing a callLater here breaks the cycle.
if self.autoflush:
self._reactor.callLater(0.0, self.flush)
def writeSequence(self, seq):
for x in seq:
self.write(x)
def flush(self, maxbytes=None):
if not self.buffer:
# nothing to do. Don't write empty buffers: it upsets the
# TLSMemoryBIOProtocol
return
if self.disconnected:
return
if maxbytes is not None:
to_write = self.buffer[:maxbytes]
else:
to_write = self.buffer
logger.info("%s->%s: %s", self._protocol, self.other, to_write)
try:
self.other.dataReceived(to_write)
except Exception as e:
logger.exception("Exception writing to protocol: %s", e)
return
self.buffer = self.buffer[len(to_write) :]
if self.buffer and self.autoflush:
self._reactor.callLater(0.0, self.flush)
if not self.buffer and self.disconnecting:
logger.info("FakeTransport: Buffer now empty, completing disconnect")
self.disconnected = True
def connect_client(
reactor: ThreadedMemoryReactorClock, client_id: int
) -> Tuple[IProtocol, AccumulatingProtocol]:
"""
Connect a client to a fake TCP transport.
Args:
reactor
factory: The connecting factory to build.
"""
factory = reactor.tcpClients.pop(client_id)[2]
client = factory.buildProtocol(None)
server = AccumulatingProtocol()
server.makeConnection(FakeTransport(client, reactor))
client.makeConnection(FakeTransport(server, reactor))
return client, server
class TestHomeServer(HomeServer):
DATASTORE_CLASS = DataStore
def setup_test_homeserver(
cleanup_func,
name="test",
config=None,
reactor=None,
homeserver_to_use: Type[HomeServer] = TestHomeServer,
**kwargs,
):
"""
Setup a homeserver suitable for running tests against. Keyword arguments
are passed to the Homeserver constructor.
If no datastore is supplied, one is created and given to the homeserver.
Args:
cleanup_func : The function used to register a cleanup routine for
after the test.
Calling this method directly is deprecated: you should instead derive from
HomeserverTestCase.
"""
if reactor is None:
from twisted.internet import reactor
if config is None:
config = default_config(name, parse=True)
config.ldap_enabled = False
if "clock" not in kwargs:
kwargs["clock"] = MockClock()
if USE_POSTGRES_FOR_TESTS:
test_db = "synapse_test_%s" % uuid.uuid4().hex
database_config = {
"name": "psycopg2",
"args": {
"database": test_db,
"host": POSTGRES_HOST,
"password": POSTGRES_PASSWORD,
"user": POSTGRES_USER,
"cp_min": 1,
"cp_max": 5,
},
}
else:
if SQLITE_PERSIST_DB:
# The current working directory is in _trial_temp, so this gets created within that directory.
test_db_location = os.path.abspath("test.db")
logger.debug("Will persist db to %s", test_db_location)
# Ensure each test gets a clean database.
try:
os.remove(test_db_location)
except FileNotFoundError:
pass
else:
logger.debug("Removed existing DB at %s", test_db_location)
else:
test_db_location = ":memory:"
database_config = {
"name": "sqlite3",
"args": {"database": test_db_location, "cp_min": 1, "cp_max": 1},
}
if "db_txn_limit" in kwargs:
database_config["txn_limit"] = kwargs["db_txn_limit"]
database = DatabaseConnectionConfig("master", database_config)
config.database.databases = [database]
db_engine = create_engine(database.config)
# Create the database before we actually try and connect to it, based off
# the template database we generate in setupdb()
if isinstance(db_engine, PostgresEngine):
db_conn = db_engine.module.connect(
database=POSTGRES_BASE_DB,
user=POSTGRES_USER,
host=POSTGRES_HOST,
password=POSTGRES_PASSWORD,
)
db_conn.autocommit = True
cur = db_conn.cursor()
cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,))
cur.execute(
"CREATE DATABASE %s WITH TEMPLATE %s;" % (test_db, POSTGRES_BASE_DB)
)
cur.close()
db_conn.close()
hs = homeserver_to_use(
name,
config=config,
version_string="Synapse/tests",
reactor=reactor,
)
# Install @cache_in_self attributes
for key, val in kwargs.items():
setattr(hs, "_" + key, val)
# Mock TLS
hs.tls_server_context_factory = Mock()
hs.tls_client_options_factory = Mock()
hs.setup()
if homeserver_to_use == TestHomeServer:
hs.setup_background_tasks()
if isinstance(db_engine, PostgresEngine):
database = hs.get_datastores().databases[0]
# We need to do cleanup on PostgreSQL
def cleanup():
import psycopg2
# Close all the db pools
database._db_pool.close()
dropped = False
# Drop the test database
db_conn = db_engine.module.connect(
database=POSTGRES_BASE_DB,
user=POSTGRES_USER,
host=POSTGRES_HOST,
password=POSTGRES_PASSWORD,
)
db_conn.autocommit = True
cur = db_conn.cursor()
# Try a few times to drop the DB. Some things may hold on to the
# database for a few more seconds due to flakiness, preventing
# us from dropping it when the test is over. If we can't drop
# it, warn and move on.
for _ in range(5):
try:
cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,))
db_conn.commit()
dropped = True
except psycopg2.OperationalError as e:
warnings.warn(
"Couldn't drop old db: " + str(e), category=UserWarning
)
time.sleep(0.5)
cur.close()
db_conn.close()
if not dropped:
warnings.warn("Failed to drop old DB.", category=UserWarning)
if not LEAVE_DB:
# Register the cleanup hook
cleanup_func(cleanup)
# bcrypt is far too slow to be doing in unit tests
# Need to let the HS build an auth handler and then mess with it
# because AuthHandler's constructor requires the HS, so we can't make one
# beforehand and pass it in to the HS's constructor (chicken / egg)
async def hash(p):
return hashlib.md5(p.encode("utf8")).hexdigest()
hs.get_auth_handler().hash = hash
async def validate_hash(p, h):
return hashlib.md5(p.encode("utf8")).hexdigest() == h
hs.get_auth_handler().validate_hash = validate_hash
# Make the threadpool and database transactions synchronous for testing.
_make_test_homeserver_synchronous(hs)
return hs
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class HttpSuccess(object):
"""HttpSuccess operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def head200(
self, custom_headers=None, raw=False, **operation_config):
"""Return 200 status code if successful.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/200'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get200(
self, custom_headers=None, raw=False, **operation_config):
"""Get 200 success.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/200'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put200(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Put boolean value true returning 200 success.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/200'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch200(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Patch true Boolean value in request returning 200.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/200'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post200(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Post bollean value true in request that returns a 200.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/200'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete200(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Delete simple boolean value true returns 200.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/200'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put201(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Put true Boolean value in request returns 201.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/201'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post201(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Post true Boolean value in request returns 201 (Created).
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/201'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put202(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Put true Boolean value in request returns 202 (Accepted).
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/202'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [202]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch202(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Patch true Boolean value in request returns 202.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/202'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [202]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post202(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Post true Boolean value in request returns 202 (Accepted).
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/202'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [202]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete202(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Delete true Boolean value in request returns 202 (accepted).
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/202'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [202]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def head204(
self, custom_headers=None, raw=False, **operation_config):
"""Return 204 status code if successful.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/204'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put204(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Put true Boolean value in request returns 204 (no content).
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/204'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [204]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch204(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Patch true Boolean value in request returns 204 (no content).
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/204'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [204]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post204(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Post true Boolean value in request returns 204 (no content).
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/204'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [204]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete204(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Delete true Boolean value in request returns 204 (no content).
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/204'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [204]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def head404(
self, custom_headers=None, raw=False, **operation_config):
"""Return 404 status code.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/success/404'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204, 404]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
|
# -*- coding: utf-8 -*-
from contextlib import contextmanager
import sys
from . import cv_types as cv
@contextmanager
def fwopen(filename=None):
f = open(filename, 'w') if filename else sys.stdout
yield f
if filename:
f.close()
class CVML():
def __new__(cls, file_name=None, markup=None):
if not hasattr(cls, '_CVML__ML') or len(cls.__ML) == 0:
raise CVMLException('Not found markup formatter.')
if markup:
if isinstance(markup, CVML):
ml = markup()
else:
ml = cls.__ML.get(markup.lower())
elif file_name:
ml = cls.__ML.get(file_name.rsplit('.', 1)[-1].lower())
else:
ml = cls
if ml:
return super().__new__(ml)
else:
raise CVWriterException('Markup not fount')
@classmethod
def register(cls, names, markup):
if not issubclass(markup, CVML):
return
if not hasattr(cls, '__ML'):
cls.__ML = {}
if isinstance(names, str):
nms = [names.lower()]
else:
nms = [n.lower() for n in names]
for n in nms:
cls.__ML[n] = markup
def __getattr__(self, name):
wlist = ['title', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'strong', 'p']
if name.lower() in wlist:
return self.__write
else:
raise AttributeError(name)
def __write(self, text):
return '%s\n' % text
def ul(self, items):
if isinstance(items, str):
return '- {0}\n'.format(items)
else:
return ''.join([self.ul(i) for i in items])
def fl(self, items):
if isinstance(items, str):
return self.ul(items)
else:
if isinstance(items, str) or len(items) < 2:
return self.ul(items)
else:
if items[0]:
return '- {0} | {1}'.format(items[0], items[1])
else:
return '%s\n' % items[1]
def fd(self, text):
return self.fl(text)
class CVReST(CVML):
def __str__(self):
return 'RestructuredText'
def __getattr__(self, name):
hl = dict(h1='=', h2='-', h3='~', h4='"', h5="'", h6='`')
if name in hl:
return self.__h(hl[name])
else:
raise AttributeError(name)
def title(self, text):
s = '#' * len(text)
return '{0}\n{1}\n{0}\n\n'.format(s, text)
def __h(self, hx):
def __hx(text):
s = hx * len(text)
return '{0}\n{1}\n\n'.format(text, s)
return __hx
def ul(self, items, idx=0):
s = ['-', '*', '+'][idx % 3]
if isinstance(items, str):
return '{1} {0}\n'.format(items, s)
else:
return '{0}\n'.format(''.join([self.ul(i, idx+1) for i in items]))
def fl(self, items):
if isinstance(items, str):
return self.ul(items)
else:
if isinstance(items, str) or len(items) < 2:
return self.ul(items)
else:
if items[0]:
return self.ul('**{0}** {1}'.format(*items))
else:
return self.ul('%s' % items[1])
def fd(self, item):
return ':{0}: {1}\n'.format(*item)
def strong(self, text):
return '**{0}**\n\n'.format(text)
def p(self, text):
return '{0}\n\n'.format(text)
class CVWriter():
def __init__(self, person):
self.p = person
def write(self, file_name=None, markup=None):
ml = CVML(file_name, markup)
lwrote = []
with fwopen(file_name) as f:
f.write(ml.title(self.p.full_name))
f.write(ml.h1(self.p.aim))
lwrote.append(self.__write_NamedFileds('summary', f, ml))
lwrote.append(self.__write_WorkExcperience(f, ml))
lwrote.append(self.__write_Education(f, ml))
lwrote.append(self.__write_NamedFileds('lang', f, ml))
for i in [s for s in self.p if s[0] not in lwrote]:
self.__write_FieldList(f, ml, i)
def __write_Education(self, f, ml):
ed = self.p.get('education') or self.p.get('academic')
if ed:
f.write(ml.h2('\n%s' % ed[0]))
for e in ed[1]:
if e.end:
s1 = '{0} - {1}. {2}'.format(e.start, e.end, e.name)
else:
s1 = '{0}. {1}'.format(e.start, e.name)
f.write(ml.strong(s1))
if e.dep:
f.write(ml.p(e.dep))
if e.grad:
f.write(ml.p('Graduate: {0}'.format(e.grad)))
return ed[0]
return None
def __write_WorkExcperience(self, f, ml):
we = self.p['experience']
f.write(ml.h2('\n%s' % we[0]))
for w in we[1]:
f.write(ml.h3('{0} -- {1}'.format(w.start.strf(), w.end.strf()
if w.end else 'now')))
f.write(ml.h4(w.company))
f.write(ml.h4(w.position))
for (k, v) in w:
f.write(ml.h5('%s:' % k))
f.write(ml.ul(v))
return we[0]
def __write_FieldList(self, f, ml, itm):
f.write(ml.h2('\n%s' % itm[0]))
for s in itm[1]:
if isinstance(s, str):
f.write('%s\n' % s)
else:
try:
ff = [ml.ul, ml.fd, ml.fl][s[2]]
except Exception:
ff = ml.ul
f.write('%s' % ff(s))
return itm[0]
def __write_NamedFileds(self, name, f, ml):
itm = self.p[name]
return self.__write_FieldList(f, ml, itm)
class CVMLException(cv.CVException):
pass
class CVWriterException(cv.CVException):
pass
CVML.register(['rest', 'rst'], CVReST)
|
|
#-*- coding: utf-8 -*-
import numpy as np
import matplotlib
#matplotlib.use('GTKAgg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.image import NonUniformImage
import matplotlib.colors as colo
from dear.spectrum import cqt, dft, auditory, SpectrogramFile
from dear.analysis import MFCCs
def plot_spectrogram(spec, Xd=(0,1), Yd=(0,1), norm=colo.LogNorm(vmin=0.000001), figname=None):
#
x_min, x_max = Xd
y_min, y_max = Yd
#
fig = plt.figure(num=figname)
nf = len(spec)
for ch, data in enumerate(spec):
#print ch, data.shape
x = np.linspace(x_min, x_max, data.shape[0])
y = np.linspace(y_min, y_max, data.shape[1])
#print x[0],x[-1],y[0],y[-1]
ax = fig.add_subplot(nf*100+11+ch)
im = NonUniformImage(ax, interpolation='bilinear', cmap=cm.gray_r,
norm=norm)
im.set_data(x, y, data.T)
ax.images.append(im)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_title('Channel %d' % ch)
#ax.set_xlabel('timeline')
ax.set_ylabel('frequency')
print 'Statistics: max<%.3f> min<%.3f> mean<%.3f> median<%.3f>' % (data.max(), data.min(), data.mean(), np.median(data))
#
plt.show()
if __name__ == '__main__':
import getopt, sys
def exit_with_usage():
print """Usage: $ python -m dear.spectrogram <options> /path/to/song
Options:
[-s] start time in second, default 0
[-t] end time, default is duration of song
[-o] output file
[-g] type of spectrogram, default dft:
dft --- Discrete Fourier Transform
[-w] window size, default 2048
[-h] step size, default 512
cqt --- Constant-Q Transform
[-q] Q, default 34
[-h] hop in second, default 0.02
cnt --- Constant-N Transform
[-n] N, default 24
[-h] hop in second, default 0.02
[-r] resize window size of each of the bands if specified.
gmt --- Gammatone Wavelet Transform
[-n] N, default 64
[-w] combine length in second, default 0.025
[-h] hop in second, default 0.01
[-f] frequency boundary, default (110, 4200)
Y1...Y5 --- Auditory Spectrograms
[-n] N, default 64
[-f] frequency boundary, default (110, 4200)
[-c] Combine frames by -w and -h.
[-w] combine length in second, default 0.025
[-h] hop in second, default 0.01
mfcc --- MFCCs Spectrogram
[-n] number of bands, default 20
[-w] window size, default 2048
[-h] step size, default 1024
[-f] frequency boundary, default (0, 7040)
"""
exit()
try:
opts, args = getopt.getopt(sys.argv[1:], "g:s:t:o:h:w:q:n:f:b:rc")
except getopt.GetoptError as ex:
print ex
exit_with_usage()
if len(args) != 1:
#print args
exit_with_usage()
import dear.io as io
decoder = io.get_decoder(name='audioread')
audio = decoder.Audio(args[0])
print "SampleRate: %d Hz\nChannel(s): %d\nDuration: %d sec"\
% (audio.samplerate, audio.channels, audio.duration)
graph = 'dft'
st = 0
to = None
outfile = None
norm=colo.LogNorm(vmin=0.000001)
for o, a in opts:
if o == '-s':
st = float(a)
elif o == '-t':
to = float(a)
elif o == '-g':
graph = a
assert graph in ('dft','cqt','cnt','gmt','Y1','Y2','Y3','Y4','Y5','mfcc')
elif o == '-o':
outfile = a
if to is None or to > audio.duration:
r_to = audio.duration
else:
r_to = to
if graph == 'dft':
win = 2048
hop = 512
for o, a in opts:
if o == '-w':
win = int(a)
elif o == '-h':
hop = int(a)
spec = [[]]
gram = dft.PowerSpectrum(audio)
for freqs in gram.walk(win, hop, start=st, end=to, join_channels=True):
spec[0].append(freqs)
#
elif graph == 'mfcc':
N = 20
fmin, fmax = 0., 7040.
win = 2048
hop = 1024
for o, a in opts:
if o == '-w':
win = int(a)
elif o == '-h':
hop = int(a)
elif o == '-n':
N = int(a)
elif o == '-f':
fmin, fmax = [float(f) for f in a.split(',',1)]
spec = [[]]
gram = MFCCs(audio)
for freqs in gram.walk(N, fmin, fmax, win, hop, st, to):
spec[0].append(freqs)
norm = colo.Normalize()
#
elif graph == 'cqt':
Q = 34
hop = 0.02
for o, a in opts:
if o == '-q':
Q = int(a)
elif o == '-h':
hop = float(a)
spec = [[]]
gram = cqt.CQTPowerSpectrum(audio)
print 'total:', int((r_to-st)/hop)
for t,freqs in enumerate(gram.walk(Q=Q, freq_base=55., freq_max=7040, hop=hop, start=st, end=to, join_channels=True)):
if t%100 == 0:
sys.stdout.write('%d...' % t)
sys.stdout.flush()
spec[0].append(freqs)
#
elif graph == 'cnt':
N = 24
hop = 0.02
rw = False
for o, a in opts:
if o == '-n':
N = int(a)
elif o == '-h':
hop = float(a)
elif o == '-r':
rw = True
spec = [[]]
gram = cqt.CNTPowerSpectrum(audio)
print 'total:', int((r_to-st)/hop)
for t, freqs in enumerate(
gram.walk(N=N, freq_base=55., freq_max=7040, hop=hop, start=st, end=to, resize_win=rw)):
if t%100==0:
sys.stdout.write('%d...' % t)
sys.stdout.flush()
spec[0].append(freqs)
print ""
#
elif graph == 'gmt':
N = 64
win = 0.025
hop = 0.010
freqs = [110., 4435.]
combine=False
for o, a in opts:
if o == '-n':
N = int(a)
elif o == '-h':
hop = float(a)
elif o == '-w':
win = float(a)
elif o == '-f':
freqs = [float(f) for f in a.split(',',1)]
elif o == '-c':
combine=True
spec = [[]]
gram = auditory.GammatoneSpectrum(audio)
print 'total:', int((r_to-st)/hop)
for t, freqs in enumerate(
gram.walk(N=N, freq_base=freqs[0], freq_max=freqs[1],
start=st, end=to, combine=combine, twin=win, thop=hop)):
if t%100==0:
sys.stdout.write('%d...' % t)
sys.stdout.flush()
spec[0].append(freqs)
print ""
#
elif graph in ('Y1','Y2','Y3','Y4','Y5'):
N = 64
win = 0.025
hop = 0.010
freqs = [110., 4435.]
combine = False
for o, a in opts:
if o == '-n':
N = int(a)
elif o == '-h':
hop = float(a)
elif o == '-w':
win = float(a)
elif o == '-f':
freqs = [float(f) for f in a.split(',',1)]
elif o == '-c':
combine=True
spec = [[]]
gram = getattr(auditory,graph)
gram = gram(audio)
print 'total:', int((r_to-st)/hop)
for t, freqs in enumerate(
gram.walk(N=N, freq_base=freqs[0], freq_max=freqs[1],
start=st, end=to, combine=combine, twin=win, thop=hop)):
if t%100==0:
sys.stdout.write('%d...' % t)
sys.stdout.flush()
spec[0].append(freqs)
print ""
# to dB scale
dBmax, dBmin = -15., -70.
if graph in ('dft','cqt','cnt'):
magmin = 10**(dBmin/20)
for g in spec:
for i,frame in enumerate(g):
g[i] = 20*np.log10(np.maximum(frame/20.,magmin))
norm = colo.Normalize(vmin=dBmin, vmax=dBmax)
elif graph in ('gmt','Y1','Y2','Y3','Y4','Y5'):
magmin = 10**(dBmin/20)
for g in spec:
for i,frame in enumerate(g):
g[i] = 20*np.log10(np.maximum(frame,magmin))
norm = colo.Normalize(vmin=dBmin, vmax=dBmax)
figname = "%s - %s" % (graph, audio.path)
plot_spectrogram(np.array(spec), (0,len(spec[0])), (0,len(spec[0][0])), norm=norm, figname=figname)
if outfile:
out = SpectrogramFile(outfile, 'w')
out.dump(spec[0])
out.close()
|
|
"""
A library written in CUDA Python for generating reduction kernels
"""
from numba.np.numpy_support import from_dtype
_WARPSIZE = 32
_NUMWARPS = 4
def _gpu_reduce_factory(fn, nbtype):
from numba import cuda
reduce_op = cuda.jit(device=True)(fn)
inner_sm_size = _WARPSIZE + 1 # plus one to avoid SM collision
max_blocksize = _NUMWARPS * _WARPSIZE
@cuda.jit(device=True)
def inner_warp_reduction(sm_partials, init):
"""
Compute reduction within a single warp
"""
tid = cuda.threadIdx.x
warpid = tid // _WARPSIZE
laneid = tid % _WARPSIZE
sm_this = sm_partials[warpid, :]
sm_this[laneid] = init
# XXX expect warp synchronization
width = _WARPSIZE // 2
while width:
if laneid < width:
old = sm_this[laneid]
sm_this[laneid] = reduce_op(old, sm_this[laneid + width])
width //= 2
# XXX expect warp synchronization
@cuda.jit(device=True)
def device_reduce_full_block(arr, partials, sm_partials):
"""
Partially reduce `arr` into `partials` using `sm_partials` as working
space. The algorithm goes like:
array chunks of 128: | 0 | 128 | 256 | 384 | 512 |
block-0: | x | | | x | |
block-1: | | x | | | x |
block-2: | | | x | | |
The array is divided into chunks of 128 (size of a threadblock).
The threadblocks consumes the chunks in roundrobin scheduling.
First, a threadblock loads a chunk into temp memory. Then, all
subsequent chunks are combined into the temp memory.
Once all chunks are processed. Inner-block reduction is performed
on the temp memory. So that, there will just be one scalar result
per block. The result from each block is stored to `partials` at
the dedicated slot.
"""
tid = cuda.threadIdx.x
blkid = cuda.blockIdx.x
blksz = cuda.blockDim.x
gridsz = cuda.gridDim.x
# block strided loop to compute the reduction
start = tid + blksz * blkid
stop = arr.size
step = blksz * gridsz
# load first value
tmp = arr[start]
# loop over all values in block-stride
for i in range(start + step, stop, step):
tmp = reduce_op(tmp, arr[i])
cuda.syncthreads()
# inner-warp reduction
inner_warp_reduction(sm_partials, tmp)
cuda.syncthreads()
# at this point, only the first slot for each warp in tsm_partials
# is valid.
# finish up block reduction
# warning: this is assuming 4 warps.
# assert numwarps == 4
if tid < 2:
sm_partials[tid, 0] = reduce_op(sm_partials[tid, 0],
sm_partials[tid + 2, 0])
if tid == 0:
partials[blkid] = reduce_op(sm_partials[0, 0], sm_partials[1, 0])
@cuda.jit(device=True)
def device_reduce_partial_block(arr, partials, sm_partials):
"""
This computes reduction on `arr`.
This device function must be used by 1 threadblock only.
The blocksize must match `arr.size` and must not be greater than 128.
"""
tid = cuda.threadIdx.x
blkid = cuda.blockIdx.x
blksz = cuda.blockDim.x
warpid = tid // _WARPSIZE
laneid = tid % _WARPSIZE
size = arr.size
# load first value
tid = cuda.threadIdx.x
value = arr[tid]
sm_partials[warpid, laneid] = value
cuda.syncthreads()
if (warpid + 1) * _WARPSIZE < size:
# fully populated warps
inner_warp_reduction(sm_partials, value)
else:
# partially populated warps
# NOTE: this uses a very inefficient sequential algorithm
if laneid == 0:
sm_this = sm_partials[warpid, :]
base = warpid * _WARPSIZE
for i in range(1, size - base):
sm_this[0] = reduce_op(sm_this[0], sm_this[i])
cuda.syncthreads()
# finish up
if tid == 0:
num_active_warps = (blksz + _WARPSIZE - 1) // _WARPSIZE
result = sm_partials[0, 0]
for i in range(1, num_active_warps):
result = reduce_op(result, sm_partials[i, 0])
partials[blkid] = result
def gpu_reduce_block_strided(arr, partials, init, use_init):
"""
Perform reductions on *arr* and writing out partial reduction result
into *partials*. The length of *partials* is determined by the
number of threadblocks. The initial value is set with *init*.
Launch config:
Blocksize must be multiple of warpsize and it is limited to 4 warps.
"""
tid = cuda.threadIdx.x
sm_partials = cuda.shared.array((_NUMWARPS, inner_sm_size),
dtype=nbtype)
if cuda.blockDim.x == max_blocksize:
device_reduce_full_block(arr, partials, sm_partials)
else:
device_reduce_partial_block(arr, partials, sm_partials)
# deal with the initializer
if use_init and tid == 0 and cuda.blockIdx.x == 0:
partials[0] = reduce_op(partials[0], init)
return cuda.jit(gpu_reduce_block_strided)
class Reduce(object):
"""Create a reduction object that reduces values using a given binary
function. The binary function is compiled once and cached inside this
object. Keeping this object alive will prevent re-compilation.
"""
_cache = {}
def __init__(self, functor):
"""
:param functor: A function implementing a binary operation for
reduction. It will be compiled as a CUDA device
function using ``cuda.jit(device=True)``.
"""
self._functor = functor
def _compile(self, dtype):
key = self._functor, dtype
if key in self._cache:
kernel = self._cache[key]
else:
kernel = _gpu_reduce_factory(self._functor, from_dtype(dtype))
self._cache[key] = kernel
return kernel
def __call__(self, arr, size=None, res=None, init=0, stream=0):
"""Performs a full reduction.
:param arr: A host or device array.
:param size: Optional integer specifying the number of elements in
``arr`` to reduce. If this parameter is not specified, the
entire array is reduced.
:param res: Optional device array into which to write the reduction
result to. The result is written into the first element of
this array. If this parameter is specified, then no
communication of the reduction output takes place from the
device to the host.
:param init: Optional initial value for the reduction, the type of which
must match ``arr.dtype``.
:param stream: Optional CUDA stream in which to perform the reduction.
If no stream is specified, the default stream of 0 is
used.
:return: If ``res`` is specified, ``None`` is returned. Otherwise, the
result of the reduction is returned.
"""
from numba import cuda
# ensure 1d array
if arr.ndim != 1:
raise TypeError("only support 1D array")
# adjust array size
if size is not None:
arr = arr[:size]
init = arr.dtype.type(init) # ensure the right type
# return `init` if `arr` is empty
if arr.size < 1:
return init
kernel = self._compile(arr.dtype)
# Perform the reduction on the GPU
blocksize = _NUMWARPS * _WARPSIZE
size_full = (arr.size // blocksize) * blocksize
size_partial = arr.size - size_full
full_blockct = min(size_full // blocksize, _WARPSIZE * 2)
# allocate size of partials array
partials_size = full_blockct
if size_partial:
partials_size += 1
partials = cuda.device_array(shape=partials_size, dtype=arr.dtype)
if size_full:
# kernel for the fully populated threadblocks
kernel[full_blockct, blocksize, stream](arr[:size_full],
partials[:full_blockct],
init,
True)
if size_partial:
# kernel for partially populated threadblocks
kernel[1, size_partial, stream](arr[size_full:],
partials[full_blockct:],
init,
not full_blockct)
if partials.size > 1:
# finish up
kernel[1, partials_size, stream](partials, partials, init, False)
# handle return value
if res is not None:
res[:1].copy_to_device(partials[:1], stream=stream)
return
else:
return partials[0]
|
|
# Copyright 1999-2000 by Jeffrey Chang. All rights reserved.
# Copyright 2008 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Provides code to access NCBI over the WWW.
The main Entrez web page is available at:
http://www.ncbi.nlm.nih.gov/Entrez/
A list of the Entrez utilities is available at:
http://www.ncbi.nlm.nih.gov/entrez/utils/utils_index.html
Variables:
email Set the Entrez email parameter (default is not set).
tool Set the Entrez tool parameter (default is biopython).
Functions:
efetch Retrieves records in the requested format from a list of one or
more primary IDs or from the user's environment
epost Posts a file containing a list of primary IDs for future use in
the user's environment to use with subsequent search strategies
esearch Searches and retrieves primary IDs (for use in EFetch, ELink,
and ESummary) and term translations and optionally retains
results for future use in the user's environment.
elink Checks for the existence of an external or Related Articles link
from a list of one or more primary IDs. Retrieves primary IDs
and relevancy scores for links to Entrez databases or Related
Articles; creates a hyperlink to the primary LinkOut provider
for a specific ID and database, or lists LinkOut URLs
and Attributes for multiple IDs.
einfo Provides field index term counts, last update, and available
links for each database.
esummary Retrieves document summaries from a list of primary IDs or from
the user's environment.
egquery Provides Entrez database counts in XML for a single search
using Global Query.
espell Retrieves spelling suggestions.
read Parses the XML results returned by any of the above functions.
Typical usage is:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> handle = Entrez.einfo() # or esearch, efetch, ...
>>> record = Entrez.read(handle)
>>> handle.close()
where record is now a Python dictionary or list.
parse Parses the XML results returned by those of the above functions
which can return multiple records - such as efetch, esummary
and elink. Typical usage is:
>>> handle = Entrez.efetch("pubmed", id="19304878,14630660", retmode="xml")
>>> records = Entrez.parse(handle)
>>> for record in records:
... # each record is a Python dictionary or list.
... print record['MedlineCitation']['Article']['ArticleTitle']
Biopython: freely available Python tools for computational molecular biology and bioinformatics.
PDB file parser and structure class implemented in Python.
>>> handle.close()
This function is appropriate only if the XML file contains
multiple records, and is particular useful for large files.
_open Internally used function.
"""
import urllib
import urllib2
import time
import warnings
#import os.path
from entrez.parser import DataHandler
#from Bio._py3k import _binary_to_string_handle, _as_bytes
email = None
tool = "biopython"
# XXX retmode?
def epost(db, **keywds):
"""Post a file of identifiers for future use.
Posts a file containing a list of UIs for future use in the user's
environment to use with subsequent search strategies.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/entrez/query/static/epost_help.html
Return a handle to the results.
Raises an IOError exception if there's a network error.
"""
cgi = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/epost.fcgi'
variables = {'db': db}
variables.update(keywds)
return _open(cgi, variables, post=True)
def efetch(db, **keywords):
"""Fetches Entrez results which are returned as a handle.
EFetch retrieves records in the requested format from a list of one or
more UIs or from user's environment.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/entrez/query/static/efetch_help.html
Return a handle to the results.
Raises an IOError exception if there's a network error.
Short example:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> handle = Entrez.efetch(db="nucleotide", id="57240072", rettype="gb", retmode="text")
>>> print handle.readline().strip()
LOCUS AY851612 892 bp DNA linear PLN 10-APR-2007
>>> handle.close()
Warning: The NCBI changed the default retmode in Feb 2012, so many
databases which previously returned text output now give XML.
"""
cgi = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'
variables = {'db': db}
variables.update(keywords)
post = False
try:
ids = variables["id"]
except KeyError:
pass
else:
if isinstance(ids, list):
ids = ",".join(ids)
variables["id"] = ids
if ids.count(",") >= 200:
# NCBI prefers an HTTP POST instead of an HTTP GET if there are
# more than about 200 IDs
post = True
return _open(cgi, variables, post)
def esearch(db, term, **keywds):
"""ESearch runs an Entrez search and returns a handle to the results.
ESearch searches and retrieves primary IDs (for use in EFetch, ELink
and ESummary) and term translations, and optionally retains results
for future use in the user's environment.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/entrez/query/static/esearch_help.html
Return a handle to the results which are always in XML format.
Raises an IOError exception if there's a network error.
Short example:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> handle = Entrez.esearch(db="nucleotide", retmax=10, term="opuntia[ORGN] accD")
>>> record = Entrez.read(handle)
>>> handle.close()
>>> record["Count"] >= 2
True
>>> "156535671" in record["IdList"]
True
>>> "156535673" in record["IdList"]
True
"""
cgi = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'
variables = {'db': db,
'term': term}
variables.update(keywds)
return _open(cgi, variables)
def elink(**keywds):
"""ELink checks for linked external articles and returns a handle.
ELink checks for the existence of an external or Related Articles link
from a list of one or more primary IDs; retrieves IDs and relevancy
scores for links to Entrez databases or Related Articles; creates a
hyperlink to the primary LinkOut provider for a specific ID and
database, or lists LinkOut URLs and attributes for multiple IDs.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/entrez/query/static/elink_help.html
Return a handle to the results, by default in XML format.
Raises an IOError exception if there's a network error.
This example finds articles related to the Biopython application
note's entry in the PubMed database:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> pmid = "19304878"
>>> handle = Entrez.elink(dbfrom="pubmed", id=pmid, linkname="pubmed_pubmed")
>>> record = Entrez.read(handle)
>>> handle.close()
>>> print record[0]["LinkSetDb"][0]["LinkName"]
pubmed_pubmed
>>> linked = [link["Id"] for link in record[0]["LinkSetDb"][0]["Link"]]
>>> "17121776" in linked
True
This is explained in much more detail in the Biopython Tutorial.
"""
cgi = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi'
variables = {}
variables.update(keywds)
return _open(cgi, variables)
def einfo(**keywds):
"""EInfo returns a summary of the Entez databases as a results handle.
EInfo provides field names, index term counts, last update, and
available links for each Entrez database.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/entrez/query/static/einfo_help.html
Return a handle to the results, by default in XML format.
Raises an IOError exception if there's a network error.
Short example:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> record = Entrez.read(Entrez.einfo())
>>> 'pubmed' in record['DbList']
True
"""
cgi = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/einfo.fcgi'
variables = {}
variables.update(keywds)
return _open(cgi, variables)
def esummary(**keywds):
"""ESummary retrieves document summaries as a results handle.
ESummary retrieves document summaries from a list of primary IDs or
from the user's environment.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/entrez/query/static/esummary_help.html
Return a handle to the results, by default in XML format.
Raises an IOError exception if there's a network error.
This example discovers more about entry 30367 in the journals database:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> handle = Entrez.esummary(db="journals", id="30367")
>>> record = Entrez.read(handle)
>>> handle.close()
>>> print record[0]["Id"]
30367
>>> print record[0]["Title"]
Computational biology and chemistry
"""
cgi = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi'
variables = {}
variables.update(keywds)
return _open(cgi, variables)
def egquery(**keywds):
"""EGQuery provides Entrez database counts for a global search.
EGQuery provides Entrez database counts in XML for a single search
using Global Query.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/entrez/query/static/egquery_help.html
Return a handle to the results in XML format.
Raises an IOError exception if there's a network error.
This quick example based on a longer version from the Biopython
Tutorial just checks there are over 60 matches for 'Biopython'
in PubMedCentral:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> handle = Entrez.egquery(term="biopython")
>>> record = Entrez.read(handle)
>>> handle.close()
>>> for row in record["eGQueryResult"]:
... if "pmc" in row["DbName"]:
... print row["Count"] > 60
True
"""
cgi = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/egquery.fcgi'
variables = {}
variables.update(keywds)
return _open(cgi, variables)
def espell(**keywds):
"""ESpell retrieves spelling suggestions, returned in a results handle.
ESpell retrieves spelling suggestions, if available.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/entrez/query/static/espell_help.html
Return a handle to the results, by default in XML format.
Raises an IOError exception if there's a network error.
Short example:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> record = Entrez.read(Entrez.espell(term="biopythooon"))
>>> print record["Query"]
biopythooon
>>> print record["CorrectedQuery"]
biopython
"""
cgi = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/espell.fcgi'
variables = {}
variables.update(keywds)
return _open(cgi, variables)
def read(handle, validate=True):
"""Parses an XML file from the NCBI Entrez Utilities into python objects.
This function parses an XML file created by NCBI's Entrez Utilities,
returning a multilevel data structure of Python lists and dictionaries.
Most XML files returned by NCBI's Entrez Utilities can be parsed by
this function, provided its DTD is available. Biopython includes the
DTDs for most commonly used Entrez Utilities.
If validate is True (default), the parser will validate the XML file
against the DTD, and raise an error if the XML file contains tags that
are not represented in the DTD. If validate is False, the parser will
simply skip such tags.
Whereas the data structure seems to consist of generic Python lists,
dictionaries, strings, and so on, each of these is actually a class
derived from the base type. This allows us to store the attributes
(if any) of each element in a dictionary my_element.attributes, and
the tag name in my_element.tag.
"""
#from Parser import DataHandler
handler = DataHandler(validate)
record = handler.read(handle)
return record
def parse(handle, validate=True):
"""Parses an XML file from the NCBI Entrez Utilities into python objects.
This function parses an XML file created by NCBI's Entrez Utilities,
returning a multilevel data structure of Python lists and dictionaries.
This function is suitable for XML files that (in Python) can be represented
as a list of individual records. Whereas 'read' reads the complete file
and returns a single Python list, 'parse' is a generator function that
returns the records one by one. This function is therefore particularly
useful for parsing large files.
Most XML files returned by NCBI's Entrez Utilities can be parsed by
this function, provided its DTD is available. Biopython includes the
DTDs for most commonly used Entrez Utilities.
If validate is True (default), the parser will validate the XML file
against the DTD, and raise an error if the XML file contains tags that
are not represented in the DTD. If validate is False, the parser will
simply skip such tags.
Whereas the data structure seems to consist of generic Python lists,
dictionaries, strings, and so on, each of these is actually a class
derived from the base type. This allows us to store the attributes
(if any) of each element in a dictionary my_element.attributes, and
the tag name in my_element.tag.
"""
#from Parser import DataHandler
handler = DataHandler(validate)
records = handler.parse(handle)
return records
def _open(cgi, params={}, post=False):
"""Helper function to build the URL and open a handle to it (PRIVATE).
Open a handle to Entrez. cgi is the URL for the cgi script to access.
params is a dictionary with the options to pass to it. Does some
simple error checking, and will raise an IOError if it encounters one.
This function also enforces the "up to three queries per second rule"
to avoid abusing the NCBI servers.
"""
# NCBI requirement: At most three queries per second.
# Equivalently, at least a third of second between queries
delay = 0.333333334
current = time.time()
wait = _open.previous + delay - current
if wait > 0:
time.sleep(wait)
_open.previous = current + wait
else:
_open.previous = current
# Remove None values from the parameters
for key, value in params.items():
if value is None:
del params[key]
# Tell Entrez that we are using Biopython (or whatever the user has
# specified explicitly in the parameters or by changing the default)
if not "tool" in params:
params["tool"] = tool
# Tell Entrez who we are
if not "email" in params:
if email is not None:
params["email"] = email
else:
warnings.warn("""
Email address is not specified.
To make use of NCBI's E-utilities, NCBI strongly recommends you to specify
your email address with each request. From June 1, 2010, this will be
mandatory. As an example, if your email address is [email protected], you
can specify it as follows:
from Bio import Entrez
Entrez.email = '[email protected]'
In case of excessive usage of the E-utilities, NCBI will attempt to contact
a user at the email address provided before blocking access to the
E-utilities.""", UserWarning)
# Open a handle to Entrez.
options = urllib.urlencode(params, doseq=True)
#print cgi + "?" + options
try:
if post:
#HTTP POST
#handle = urllib2.urlopen(cgi, data=_as_bytes(options))
handle = urllib2.urlopen(cgi, data=options)
else:
#HTTP GET
cgi += "?" + options
handle = urllib2.urlopen(cgi)
except urllib2.HTTPError, exception:
raise exception
#return _binary_to_string_handle(handle)
return handle
_open.previous = 0
|
|
''' This example creates plots for all silicon properties available in Scarce.
'''
import numpy as np
import matplotlib.pylab as plt
from scarce import silicon
def plot_depletion_depth():
V_bias = np.linspace(0, 100., 1000.)
plt.clf()
# Plot depletion depth for different bias voltages
for n_eff in [1, 10, 100]:
for temperature in [300]:
depletion_depth = silicon.get_depletion_depth(
V_bias=V_bias,
n_eff=n_eff,
temperature=temperature)
plt.plot(V_bias,
depletion_depth,
linewidth=2.,
label=r'$\mathrm{N_{eff}=%d\cdot10^{12}/cm^3}}$, $T=%d$'
% (n_eff, temperature))
plt.title('Depletion depth in silicon')
plt.xlabel('Bias voltage [$\mathrm{V}}$]')
plt.ylabel('Depletion depth [$\mathrm{um}$]')
plt.legend(loc=0)
plt.grid()
plt.savefig('DepletionDepth.pdf', layout='tight')
def plot_depletion_depth_res():
# Plot depletion depth as a function of the resistivity
temperature = 300 # [K]
e_field = 1e3 # [V/cm]
V_bias = 100 # [V]
n_eff = np.logspace(0, 3, 1000.)
resistivity = silicon.get_resistivity(n_eff,
is_n_type=False,
temperature=temperature,
e_field=e_field)
depletion_depth = silicon.get_depletion_depth(
V_bias=V_bias,
n_eff=n_eff,
temperature=temperature)
plt.plot(resistivity,
depletion_depth,
linewidth=2.,
label='$\mathrm{p-type, V_{bias}=%d\ V}$, $T=%d$'
% (V_bias, temperature))
plt.plot(resistivity,
0.3 * np.sqrt(V_bias * resistivity),
'--',
linewidth=2.,
label=r'$0.3\sqrt{V_{bias}\ \mathrm{[V]}\cdot\rho\ \mathrm{[\Omega - cm]}}$')
resistivity = silicon.get_resistivity(n_eff,
is_n_type=True,
temperature=temperature,
e_field=e_field)
depletion_depth = silicon.get_depletion_depth(
V_bias=V_bias,
n_eff=n_eff,
temperature=temperature)
plt.plot(resistivity,
depletion_depth,
linewidth=2.,
label='$\mathrm{n-type, V_{bias}=%d\ V}$, $T=%d$'
% (V_bias, temperature))
plt.title('Depletion depth in silicon')
plt.xlabel('Resistivity [$\mathrm{\Omega - cm}$]')
plt.ylabel('Depletion depth [$\mathrm{um}$]')
plt.legend(loc=0)
plt.grid()
plt.savefig('DepletionDepthResistivity.pdf', layout='tight')
def plot_depletion_voltage():
n_eff = np.linspace(0.1, 100., 1000.)
plt.clf()
# Plot depletion voltage
for distance in [100, 150, 200, 250]:
plt.plot(n_eff, silicon.get_depletion_voltage(
n_eff, distance=distance), linewidth=2.,
label='Electrode distance = %d um' % distance)
plt.title(
'Full depletion voltage in silicon')
plt.xlabel('Effective doping concentration [$\mathrm{10^{12} / cm^3}}$]')
plt.ylabel('Depletion Voltage [$\mathrm{V}}$]')
plt.xscale('log')
plt.yscale('log')
plt.legend(loc=0)
plt.grid()
plt.savefig('DepletionVoltage.pdf', layout='tight')
def plot_diffusion_potential():
n_eff = np.linspace(0.1, 100., 1000.)
plt.clf()
# Plot diffusion potential
for temperature in [200, 250, 300, 350]:
plt.plot(n_eff, silicon.get_diffusion_potential(
n_eff, temperature=temperature), linewidth=2.,
label='T = %d' % temperature)
plt.title(
'Diffusion potential at thermal equilibrium in silicon')
plt.xlabel('Effective doping concentration [$\mathrm{10^{12} / cm^3}}$]')
plt.ylabel('Diffusion potential [$\mathrm{V}$]')
plt.legend(loc=0)
plt.grid()
plt.savefig('DiffusionPotential.pdf', layout='tight')
def plot_eff_acceptor_concentration():
fluence = np.logspace(0., 4., 1000.)
plt.clf()
# Plot diffusion potential
eff_acc_concentration = silicon.get_eff_acceptor_concentration(
fluence, n_eff_0=1.8, is_ntype=False, is_oxygenated=False)
plt.plot(fluence, eff_acc_concentration, '-', linewidth=2.,
label='p-type')
eff_acc_concentration = silicon.get_eff_acceptor_concentration(
fluence, n_eff_0=1.8, is_ntype=False, is_oxygenated=True)
plt.plot(fluence, eff_acc_concentration, '-.', linewidth=2.,
label='oxygenated p-type')
eff_acc_concentration = silicon.get_eff_acceptor_concentration(
fluence, n_eff_0=1.8, is_ntype=True, is_oxygenated=False)
plt.plot(fluence, eff_acc_concentration, ':', linewidth=2.,
label='n-type')
eff_acc_concentration = silicon.get_eff_acceptor_concentration(
fluence, n_eff_0=1.8, is_ntype=True, is_oxygenated=True)
plt.plot(fluence, eff_acc_concentration, '--', linewidth=2.,
label='oxygenated n-type')
plt.title('Effective acceptor concentration')
plt.xlabel('Fluence [$\mathrm{10^{12}\ N_{eq}/cm^2}}$]')
plt.ylabel('Acceptor concentration $\mathrm{N_{eff}}$\
[$\mathrm{10^{12} cm^{-3}}$]')
plt.legend(loc=0)
plt.xscale('log')
plt.yscale('log')
plt.grid()
plt.savefig('EffectiveAcceptorConcetration.pdf', layout='tight')
def plot_free_path():
fluence = np.logspace(12., 15., 1000.)
plt.clf()
# Plot trapping rate (1 / s)
s_e = silicon.get_free_path(fluence, e_field=1e6, temperature=250,
is_electron=True)
s_h = silicon.get_free_path(fluence, e_field=1e6, temperature=250,
is_electron=False)
plt.plot(fluence, s_e, linewidth=2., color='blue', linestyle='-',
label='Electrons, T = 250')
plt.plot(fluence, s_h, linewidth=2., color='red', linestyle='-',
label='Holes, T = 250')
plt.title('Charge carrier mean free path in irradiated silicon\nat saturation\
velocity ($\mathrm{E=10^6\ V/cm}$)')
plt.xlabel('Fluence [$\mathrm{N_{eq}/cm^2}}$]')
plt.ylabel('Trapping time [$\mathrm{ns}$]')
plt.legend(loc=0)
plt.xscale('log')
plt.yscale('log')
plt.grid()
plt.savefig('MeanFreePath.pdf', layout='tight')
def plot_mobility():
e_field = np.logspace(3., 5., 1000.)
plt.clf()
# Plot mobility mu
mu0_e = silicon.get_mobility(e_field, temperature=250., is_electron=True)
mu1_e = silicon.get_mobility(e_field, temperature=300., is_electron=True)
mu0_h = silicon.get_mobility(e_field, temperature=250., is_electron=False)
mu1_h = silicon.get_mobility(e_field, temperature=300., is_electron=False)
plt.loglog(e_field, mu0_e, linewidth=2., color='blue', linestyle='--', label='Electrons, T = 250K')
plt.loglog(e_field, mu1_e, linewidth=2., color='blue', linestyle='-', label='Electrons, T = 300K')
plt.loglog(e_field, mu0_h, linewidth=2., color='red', linestyle='--', label='Holes, T = 250K')
plt.loglog(e_field, mu1_h, linewidth=2., color='red', linestyle='-', label='Holes, T = 300K')
plt.title('Charge carrier mobility in silicon')
plt.xlabel('Electric field [$\mathrm{V/cm}$]')
plt.ylabel('Electron/-hole mobility [$\mathrm{cm^2/Vs}$]')
plt.legend(loc=0)
plt.grid()
plt.savefig('Mobility.pdf', layout='tight')
def plot_velocity():
# Plot velocity: v = mu * E
e_field = np.logspace(3., 5., 1000.)
plt.clf()
v0_e = silicon.get_mobility(e_field, temperature=250., is_electron=True) * e_field
v1_e = silicon.get_mobility(e_field, temperature=300., is_electron=True) * e_field
v0_h = silicon.get_mobility(e_field, temperature=250., is_electron=False) * e_field
v1_h = silicon.get_mobility(e_field, temperature=300., is_electron=False) * e_field
plt.plot(e_field, v0_e, linewidth=2., color='blue', linestyle='-', label='Electrons, T = 250K')
plt.plot(e_field, v1_e, linewidth=2., color='blue', linestyle='-', label='Electrons, T = 300K')
plt.plot(e_field, v0_h, linewidth=2., color='red', linestyle='-', label='Holes, T = 250K')
plt.plot(e_field, v1_h, linewidth=2., color='red', linestyle='-', label='Holes, T = 300K')
plt.title('Charge carrier velocity in silicon')
plt.xlabel('Electric field [$\mathrm{V/cm}$]')
plt.ylabel('Electron/-hole velocity [$\mathrm{cm/s}$]')
plt.plot([5000, 5000], plt.ylim(), '--', color='black', linewidth=2)
plt.text(6000, 10000000, r'$\frac{\mathrm{100\ V}}{\mathrm{200 \mu m}}$', fontsize=25)
plt.plot([50000, 50000], plt.ylim(), '--', color='black', linewidth=2)
plt.text(51000, 10000000, r'$\frac{\mathrm{1000\ V}}{\mathrm{200 \mu m}}$', fontsize=25)
plt.xlim((1000, 60000))
plt.legend(loc=0)
plt.grid()
plt.gca().set_aspect(1. / plt.gca().get_data_ratio() / 1.618)
plt.savefig('Velocity.pdf', layout='tight')
def plot_resistivity():
n_eff = np.logspace(11., 15., 1000.)
plt.clf()
# Plot trapping rate (1 / s)
plt.plot(n_eff, silicon.get_resistivity(n_eff / 1e12, is_n_type=True), label='n-type')
plt.plot(n_eff, silicon.get_resistivity(n_eff / 1e12, is_n_type=False), label='p-type')
plt.title('Resistivity of silicon (low e-field approximation)')
plt.xlabel('Effective doping concentration [$\mathrm{cm^3}}$]')
plt.ylabel('Resistivity [$\mathrm{\Omega - cm}$]')
plt.legend(loc=0)
plt.xscale('log')
plt.yscale('log')
plt.grid()
plt.savefig('Resistivity.pdf', layout='tight')
def plot_trapping():
fluence = np.logspace(12., 15., 1000.)
# Plot trapping rate (1 / s)
tr_e = silicon.get_trapping(fluence, is_electron=True, paper=1)
tr_h = silicon.get_trapping(fluence, is_electron=False, paper=1)
plt.clf()
plt.plot(fluence, tr_e, linewidth=2., color='blue', linestyle='-', label='Electrons')
plt.plot(fluence, tr_h, linewidth=2., color='red', linestyle='-', label='Holes')
plt.title('Charge carrier trapping time in irradiated silicon')
plt.xlabel('Fluence [$\mathrm{N_{eq}/cm^2}}}$]')
plt.ylabel('Trapping time [$\mathrm{ns}$]')
plt.legend(loc=0)
plt.xscale('log')
plt.yscale('log')
plt.grid()
plt.savefig('TrappingTime.pdf', layout='tight')
def plot_thermal_velocity():
temperature = np.linspace(100, 500, 100)
v_th_e = silicon.get_thermal_velocity(temperature, is_electron=True)
v_th_h = silicon.get_thermal_velocity(temperature, is_electron=False)
plt.clf()
plt.plot(temperature, v_th_e, linewidth=2., color='blue', linestyle='-',
label='Electrons')
plt.plot(temperature, v_th_h, linewidth=2., color='red', linestyle='-',
label='Holes')
plt.title('Thermal velocity of charge carriers in silicon')
plt.xlabel('Temperatur [K]')
plt.ylabel('Thermal velocity [cm/s]')
plt.legend(loc=0)
plt.grid()
plt.savefig('ThermalVelocity.pdf', layout='tight')
def create_plots():
plot_depletion_depth()
plot_depletion_depth_res()
plot_depletion_voltage()
plot_diffusion_potential()
plot_eff_acceptor_concentration()
plot_free_path()
plot_mobility()
plot_velocity()
plot_resistivity()
plot_trapping()
plot_thermal_velocity()
if __name__ == '__main__':
create_plots()
|
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.connection import AWSAuthConnection
from boto.exception import DynamoDBResponseError
from boto.provider import Provider
from boto.dynamodb import exceptions as dynamodb_exceptions
import time
try:
import simplejson as json
except ImportError:
import json
#
# To get full debug output, uncomment the following line and set the
# value of Debug to be 2
#
#boto.set_stream_logger('dynamodb')
Debug = 0
class Layer1(AWSAuthConnection):
"""
This is the lowest-level interface to DynamoDB. Methods at this
layer map directly to API requests and parameters to the methods
are either simple, scalar values or they are the Python equivalent
of the JSON input as defined in the DynamoDB Developer's Guide.
All responses are direct decoding of the JSON response bodies to
Python data structures via the json or simplejson modules.
:ivar throughput_exceeded_events: An integer variable that
keeps a running total of the number of ThroughputExceeded
responses this connection has received from Amazon DynamoDB.
"""
DefaultRegionName = 'us-east-1'
"""The default region name for DynamoDB API."""
ServiceName = 'DynamoDB'
"""The name of the Service"""
Version = '20111205'
"""DynamoDB API version."""
ThruputError = "ProvisionedThroughputExceededException"
"""The error response returned when provisioned throughput is exceeded"""
SessionExpiredError = 'com.amazon.coral.service#ExpiredTokenException'
"""The error response returned when session token has expired"""
ConditionalCheckFailedError = 'ConditionalCheckFailedException'
"""The error response returned when a conditional check fails"""
ValidationError = 'ValidationException'
"""The error response returned when an item is invalid in some way"""
ResponseError = DynamoDBResponseError
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
debug=0, session_token=None, region=None):
if not region:
region_name = boto.config.get('DynamoDB', 'region',
self.DefaultRegionName)
for reg in boto.dynamodb.regions():
if reg.name == region_name:
region = reg
break
self.region = region
self._passed_access_key = aws_access_key_id
self._passed_secret_key = aws_secret_access_key
if not session_token:
session_token = self._get_session_token()
self.creds = session_token
self.throughput_exceeded_events = 0
self.request_id = None
self.instrumentation = {'times': [], 'ids': []}
self.do_instrumentation = False
AWSAuthConnection.__init__(self, self.region.endpoint,
self.creds.access_key,
self.creds.secret_key,
is_secure, port, proxy, proxy_port,
debug=debug,
security_token=self.creds.session_token)
def _update_provider(self):
self.provider = Provider('aws',
self.creds.access_key,
self.creds.secret_key,
self.creds.session_token)
self._auth_handler.update_provider(self.provider)
def _get_session_token(self):
boto.log.debug('Creating new Session Token')
sts = boto.connect_sts(self._passed_access_key,
self._passed_secret_key)
return sts.get_session_token()
def _required_auth_capability(self):
return ['hmac-v3-http']
def make_request(self, action, body='', object_hook=None):
"""
:raises: ``DynamoDBExpiredTokenError`` if the security token expires.
"""
headers = {'X-Amz-Target': '%s_%s.%s' % (self.ServiceName,
self.Version, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.0',
'Content-Length': str(len(body))}
http_request = self.build_base_http_request('POST', '/', '/',
{}, headers, body, None)
if self.do_instrumentation:
start = time.time()
response = self._mexe(http_request, sender=None,
override_num_retries=10,
retry_handler=self._retry_handler)
self.request_id = response.getheader('x-amzn-RequestId')
boto.log.debug('RequestId: %s' % self.request_id)
if self.do_instrumentation:
self.instrumentation['times'].append(time.time() - start)
self.instrumentation['ids'].append(self.request_id)
response_body = response.read()
boto.log.debug(response_body)
return json.loads(response_body, object_hook=object_hook)
def _retry_handler(self, response, i, next_sleep):
status = None
if response.status == 400:
response_body = response.read()
boto.log.debug(response_body)
data = json.loads(response_body)
if self.ThruputError in data.get('__type'):
self.throughput_exceeded_events += 1
msg = "%s, retry attempt %s" % (self.ThruputError, i)
if i == 0:
next_sleep = 0
else:
next_sleep = 0.05 * (2 ** i)
i += 1
status = (msg, i, next_sleep)
elif self.SessionExpiredError in data.get('__type'):
msg = 'Renewing Session Token'
self.creds = self._get_session_token()
self._update_provider()
status = (msg, i + self.num_retries - 1, 0)
elif self.ConditionalCheckFailedError in data.get('__type'):
raise dynamodb_exceptions.DynamoDBConditionalCheckFailedError(
response.status, response.reason, data)
elif self.ValidationError in data.get('__type'):
raise dynamodb_exceptions.DynamoDBValidationError(
response.status, response.reason, data)
else:
raise self.ResponseError(response.status, response.reason,
data)
return status
def list_tables(self, limit=None, start_table=None):
"""
Returns a dictionary of results. The dictionary contains
a **TableNames** key whose value is a list of the table names.
The dictionary could also contain a **LastEvaluatedTableName**
key whose value would be the last table name returned if
the complete list of table names was not returned. This
value would then be passed as the ``start_table`` parameter on
a subsequent call to this method.
:type limit: int
:param limit: The maximum number of tables to return.
:type start_table: str
:param start_table: The name of the table that starts the
list. If you ran a previous list_tables and not
all results were returned, the response dict would
include a LastEvaluatedTableName attribute. Use
that value here to continue the listing.
"""
data = {}
if limit:
data['Limit'] = limit
if start_table:
data['ExclusiveStartTableName'] = start_table
json_input = json.dumps(data)
return self.make_request('ListTables', json_input)
def describe_table(self, table_name):
"""
Returns information about the table including current
state of the table, primary key schema and when the
table was created.
:type table_name: str
:param table_name: The name of the table to describe.
"""
data = {'TableName': table_name}
json_input = json.dumps(data)
return self.make_request('DescribeTable', json_input)
def create_table(self, table_name, schema, provisioned_throughput):
"""
Add a new table to your account. The table name must be unique
among those associated with the account issuing the request.
This request triggers an asynchronous workflow to begin creating
the table. When the workflow is complete, the state of the
table will be ACTIVE.
:type table_name: str
:param table_name: The name of the table to create.
:type schema: dict
:param schema: A Python version of the KeySchema data structure
as defined by DynamoDB
:type provisioned_throughput: dict
:param provisioned_throughput: A Python version of the
ProvisionedThroughput data structure defined by
DynamoDB.
"""
data = {'TableName': table_name,
'KeySchema': schema,
'ProvisionedThroughput': provisioned_throughput}
json_input = json.dumps(data)
response_dict = self.make_request('CreateTable', json_input)
return response_dict
def update_table(self, table_name, provisioned_throughput):
"""
Updates the provisioned throughput for a given table.
:type table_name: str
:param table_name: The name of the table to update.
:type provisioned_throughput: dict
:param provisioned_throughput: A Python version of the
ProvisionedThroughput data structure defined by
DynamoDB.
"""
data = {'TableName': table_name,
'ProvisionedThroughput': provisioned_throughput}
json_input = json.dumps(data)
return self.make_request('UpdateTable', json_input)
def delete_table(self, table_name):
"""
Deletes the table and all of it's data. After this request
the table will be in the DELETING state until DynamoDB
completes the delete operation.
:type table_name: str
:param table_name: The name of the table to delete.
"""
data = {'TableName': table_name}
json_input = json.dumps(data)
return self.make_request('DeleteTable', json_input)
def get_item(self, table_name, key, attributes_to_get=None,
consistent_read=False, object_hook=None):
"""
Return a set of attributes for an item that matches
the supplied key.
:type table_name: str
:param table_name: The name of the table containing the item.
:type key: dict
:param key: A Python version of the Key data structure
defined by DynamoDB.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
"""
data = {'TableName': table_name,
'Key': key}
if attributes_to_get:
data['AttributesToGet'] = attributes_to_get
if consistent_read:
data['ConsistentRead'] = True
json_input = json.dumps(data)
response = self.make_request('GetItem', json_input,
object_hook=object_hook)
if 'Item' not in response:
raise dynamodb_exceptions.DynamoDBKeyNotFoundError(
"Key does not exist."
)
return response
def batch_get_item(self, request_items, object_hook=None):
"""
Return a set of attributes for a multiple items in
multiple tables using their primary keys.
:type request_items: dict
:param request_items: A Python version of the RequestItems
data structure defined by DynamoDB.
"""
data = {'RequestItems': request_items}
json_input = json.dumps(data)
return self.make_request('BatchGetItem', json_input,
object_hook=object_hook)
def batch_write_item(self, request_items, object_hook=None):
"""
This operation enables you to put or delete several items
across multiple tables in a single API call.
:type request_items: dict
:param request_items: A Python version of the RequestItems
data structure defined by DynamoDB.
"""
data = {'RequestItems': request_items}
json_input = json.dumps(data)
return self.make_request('BatchWriteItem', json_input,
object_hook=object_hook)
def put_item(self, table_name, item,
expected=None, return_values=None,
object_hook=None):
"""
Create a new item or replace an old item with a new
item (including all attributes). If an item already
exists in the specified table with the same primary
key, the new item will completely replace the old item.
You can perform a conditional put by specifying an
expected rule.
:type table_name: str
:param table_name: The name of the table in which to put the item.
:type item: dict
:param item: A Python version of the Item data structure
defined by DynamoDB.
:type expected: dict
:param expected: A Python version of the Expected
data structure defined by DynamoDB.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
data = {'TableName': table_name,
'Item': item}
if expected:
data['Expected'] = expected
if return_values:
data['ReturnValues'] = return_values
json_input = json.dumps(data)
return self.make_request('PutItem', json_input,
object_hook=object_hook)
def update_item(self, table_name, key, attribute_updates,
expected=None, return_values=None,
object_hook=None):
"""
Edits an existing item's attributes. You can perform a conditional
update (insert a new attribute name-value pair if it doesn't exist,
or replace an existing name-value pair if it has certain expected
attribute values).
:type table_name: str
:param table_name: The name of the table.
:type key: dict
:param key: A Python version of the Key data structure
defined by DynamoDB which identifies the item to be updated.
:type attribute_updates: dict
:param attribute_updates: A Python version of the AttributeUpdates
data structure defined by DynamoDB.
:type expected: dict
:param expected: A Python version of the Expected
data structure defined by DynamoDB.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
data = {'TableName': table_name,
'Key': key,
'AttributeUpdates': attribute_updates}
if expected:
data['Expected'] = expected
if return_values:
data['ReturnValues'] = return_values
json_input = json.dumps(data)
return self.make_request('UpdateItem', json_input,
object_hook=object_hook)
def delete_item(self, table_name, key,
expected=None, return_values=None,
object_hook=None):
"""
Delete an item and all of it's attributes by primary key.
You can perform a conditional delete by specifying an
expected rule.
:type table_name: str
:param table_name: The name of the table containing the item.
:type key: dict
:param key: A Python version of the Key data structure
defined by DynamoDB.
:type expected: dict
:param expected: A Python version of the Expected
data structure defined by DynamoDB.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
data = {'TableName': table_name,
'Key': key}
if expected:
data['Expected'] = expected
if return_values:
data['ReturnValues'] = return_values
json_input = json.dumps(data)
return self.make_request('DeleteItem', json_input,
object_hook=object_hook)
def query(self, table_name, hash_key_value, range_key_conditions=None,
attributes_to_get=None, limit=None, consistent_read=False,
scan_index_forward=True, exclusive_start_key=None,
object_hook=None):
"""
Perform a query of DynamoDB. This version is currently punting
and expecting you to provide a full and correct JSON body
which is passed as is to DynamoDB.
:type table_name: str
:param table_name: The name of the table to query.
:type hash_key_value: dict
:param key: A DynamoDB-style HashKeyValue.
:type range_key_conditions: dict
:param range_key_conditions: A Python version of the
RangeKeyConditions data structure.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type limit: int
:param limit: The maximum number of items to return.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
"""
data = {'TableName': table_name,
'HashKeyValue': hash_key_value}
if range_key_conditions:
data['RangeKeyCondition'] = range_key_conditions
if attributes_to_get:
data['AttributesToGet'] = attributes_to_get
if limit:
data['Limit'] = limit
if consistent_read:
data['ConsistentRead'] = True
if scan_index_forward:
data['ScanIndexForward'] = True
else:
data['ScanIndexForward'] = False
if exclusive_start_key:
data['ExclusiveStartKey'] = exclusive_start_key
json_input = json.dumps(data)
return self.make_request('Query', json_input,
object_hook=object_hook)
def scan(self, table_name, scan_filter=None,
attributes_to_get=None, limit=None,
count=False, exclusive_start_key=None,
object_hook=None):
"""
Perform a scan of DynamoDB. This version is currently punting
and expecting you to provide a full and correct JSON body
which is passed as is to DynamoDB.
:type table_name: str
:param table_name: The name of the table to scan.
:type scan_filter: dict
:param scan_filter: A Python version of the
ScanFilter data structure.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type limit: int
:param limit: The maximum number of items to return.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
"""
data = {'TableName': table_name}
if scan_filter:
data['ScanFilter'] = scan_filter
if attributes_to_get:
data['AttributesToGet'] = attributes_to_get
if limit:
data['Limit'] = limit
if count:
data['Count'] = True
if exclusive_start_key:
data['ExclusiveStartKey'] = exclusive_start_key
json_input = json.dumps(data)
return self.make_request('Scan', json_input, object_hook=object_hook)
|
|
from __future__ import absolute_import
from __future__ import with_statement
from mock import Mock
from celery import abstract
from celery.tests.utils import AppCase, Case
class test_Component(Case):
class Def(abstract.Component):
name = "test_Component.Def"
def test_components_must_be_named(self):
with self.assertRaises(NotImplementedError):
class X(abstract.Component):
pass
class Y(abstract.Component):
abstract = True
def test_namespace_name(self, ns="test_namespace_name"):
class X(abstract.Component):
namespace = ns
name = "X"
self.assertEqual(X.namespace, ns)
self.assertEqual(X.name, "X")
class Y(abstract.Component):
name = "%s.Y" % (ns, )
self.assertEqual(Y.namespace, ns)
self.assertEqual(Y.name, "Y")
def test_init(self):
self.assertTrue(self.Def(self))
def test_create(self):
self.Def(self).create(self)
def test_include_if(self):
x = self.Def(self)
x.enabled = True
self.assertTrue(x.include_if(self))
x.enabled = False
self.assertFalse(x.include_if(self))
def test_instantiate(self):
self.assertIsInstance(self.Def(self).instantiate(self.Def, self),
self.Def)
def test_include_when_enabled(self):
x = self.Def(self)
x.create = Mock()
x.create.return_value = "George"
self.assertTrue(x.include(self))
self.assertEqual(x.obj, "George")
x.create.assert_called_with(self)
def test_include_when_disabled(self):
x = self.Def(self)
x.enabled = False
x.create = Mock()
self.assertFalse(x.include(self))
self.assertFalse(x.create.call_count)
class test_StartStopComponent(Case):
class Def(abstract.StartStopComponent):
name = "test_StartStopComponent.Def"
def setUp(self):
self.components = []
def test_start__stop(self):
x = self.Def(self)
x.create = Mock()
# include creates the underlying object and sets
# its x.obj attribute to it, as well as appending
# it to the parent.components list.
x.include(self)
self.assertTrue(self.components)
self.assertIs(self.components[0], x.obj)
x.start()
x.obj.start.assert_called_with()
x.stop()
x.obj.stop.assert_called_with()
def test_include_when_disabled(self):
x = self.Def(self)
x.enabled = False
x.include(self)
self.assertFalse(self.components)
def test_terminate_when_terminable(self):
x = self.Def(self)
x.terminable = True
x.create = Mock()
x.include(self)
x.terminate()
x.obj.terminate.assert_called_with()
self.assertFalse(x.obj.stop.call_count)
def test_terminate_calls_stop_when_not_terminable(self):
x = self.Def(self)
x.terminable = False
x.create = Mock()
x.include(self)
x.terminate()
x.obj.stop.assert_called_with()
self.assertFalse(x.obj.terminate.call_count)
class test_Namespace(AppCase):
class NS(abstract.Namespace):
name = "test_Namespace"
class ImportingNS(abstract.Namespace):
def __init__(self, *args, **kwargs):
abstract.Namespace.__init__(self, *args, **kwargs)
self.imported = []
def modules(self):
return ["A", "B", "C"]
def import_module(self, module):
self.imported.append(module)
def test_components_added_to_unclaimed(self):
class tnA(abstract.Component):
name = "test_Namespace.A"
class tnB(abstract.Component):
name = "test_Namespace.B"
class xxA(abstract.Component):
name = "xx.A"
self.assertIn("A", self.NS._unclaimed["test_Namespace"])
self.assertIn("B", self.NS._unclaimed["test_Namespace"])
self.assertIn("A", self.NS._unclaimed["xx"])
self.assertNotIn("B", self.NS._unclaimed["xx"])
def test_init(self):
ns = self.NS(app=self.app)
self.assertIs(ns.app, self.app)
self.assertEqual(ns.name, "test_Namespace")
self.assertTrue(ns.logger)
self.assertFalse(ns.services)
def test_interface_modules(self):
self.NS(app=self.app).modules()
def test_load_modules(self):
x = self.ImportingNS(app=self.app)
x.load_modules()
self.assertListEqual(x.imported, ["A", "B", "C"])
def test_apply(self):
class MyNS(abstract.Namespace):
name = "test_apply"
def modules(self):
return ["A", "B"]
class A(abstract.Component):
name = "test_apply.A"
requires = ["C"]
class B(abstract.Component):
name = "test_apply.B"
class C(abstract.Component):
name = "test_apply.C"
requires = ["B"]
class D(abstract.Component):
name = "test_apply.D"
last = True
x = MyNS(app=self.app)
x.import_module = Mock()
x.apply(self)
self.assertItemsEqual(x.components.values(), [A, B, C, D])
self.assertTrue(x.import_module.call_count)
for boot_step in x.boot_steps:
self.assertEqual(boot_step.namespace, x)
self.assertIsInstance(x.boot_steps[0], B)
self.assertIsInstance(x.boot_steps[1], C)
self.assertIsInstance(x.boot_steps[2], A)
self.assertIsInstance(x.boot_steps[3], D)
self.assertIs(x["A"], A)
def test_import_module(self):
x = self.NS(app=self.app)
import os
self.assertIs(x.import_module("os"), os)
def test_find_last_but_no_components(self):
class MyNS(abstract.Namespace):
name = "qwejwioqjewoqiej"
x = MyNS(app=self.app)
x.apply(self)
self.assertIsNone(x._find_last())
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration Manager."""
from flask import request
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import db_config
from clusterfuzz._internal.datastore import data_types
from handlers import base_handler
from libs import form
from libs import handler
from libs import helpers
USER_PERMISSION_ENTITY_KINDS = [
{
'name': 'fuzzer',
'value': data_types.PermissionEntityKind.FUZZER,
},
{
'name': 'job',
'value': data_types.PermissionEntityKind.JOB,
},
{
'name': 'uploader',
'value': data_types.PermissionEntityKind.UPLOADER,
},
]
USER_PERMISSION_AUTO_CC_TYPES = [
{
'name': 'none',
'value': data_types.AutoCCType.NONE,
},
{
'name': 'all',
'value': data_types.AutoCCType.ALL,
},
{
'name': 'security',
'value': data_types.AutoCCType.SECURITY,
},
]
def get_value_by_name(item_list, name):
"""Return value for entry whose name matches the one in item list."""
for item in item_list:
if item['name'] == name:
return item['value']
return None
class Handler(base_handler.Handler):
"""Configuration manager."""
@handler.get(handler.HTML)
@handler.check_admin_access
def get(self):
"""Handle a get request."""
external_user_permissions = list(
data_types.ExternalUserPermission.query().order(
data_types.ExternalUserPermission.entity_kind,
data_types.ExternalUserPermission.entity_name,
data_types.ExternalUserPermission.email))
template_values = {
'config': db_config.get(),
'permissions': external_user_permissions,
'fieldValues': {
'csrf_token': form.generate_csrf_token(),
'user_permission_entity_kinds': USER_PERMISSION_ENTITY_KINDS,
'user_permission_auto_cc_types': USER_PERMISSION_AUTO_CC_TYPES,
'add_permission_url': '/add-external-user-permission',
'delete_permission_url': '/delete-external-user-permission',
}
}
helpers.log('Configuration', helpers.VIEW_OPERATION)
return self.render('configuration.html', template_values)
@handler.post(handler.FORM, handler.HTML)
@handler.check_admin_access
@handler.require_csrf_token
def post(self):
"""Handle a post request."""
config = db_config.get()
if not config:
config = data_types.Config()
previous_hash = request.get('previous_hash')
if config.previous_hash and config.previous_hash != previous_hash:
raise helpers.EarlyExitException(
'Your change conflicts with another configuration update. '
'Please refresh and try again.', 500)
build_apiary_service_account_private_key = request.get(
'build_apiary_service_account_private_key')
bug_report_url = request.get('bug_report_url')
client_credentials = request.get('client_credentials')
jira_url = request.get('jira_url')
jira_credentials = request.get('jira_credentials')
component_repository_mappings = request.get('component_repository_mappings')
contact_string = request.get('contact_string')
documentation_url = request.get('documentation_url')
github_credentials = request.get('github_credentials')
platform_group_mappings = request.get('platform_group_mappings')
privileged_users = request.get('privileged_users')
blacklisted_users = request.get('blacklisted_users')
relax_security_bug_restrictions = request.get(
'relax_security_bug_restrictions')
relax_testcase_restrictions = request.get('relax_testcase_restrictions')
reproduce_tool_client_id = request.get('reproduce_tool_client_id')
reproduce_tool_client_secret = request.get('reproduce_tool_client_secret')
reproduction_help_url = request.get('reproduction_help_url')
test_account_email = request.get('test_account_email')
test_account_password = request.get('test_account_password')
wifi_ssid = request.get('wifi_ssid')
wifi_password = request.get('wifi_password')
sendgrid_api_key = request.get('sendgrid_api_key')
sendgrid_sender = request.get('sendgrid_sender')
config.build_apiary_service_account_private_key = (
build_apiary_service_account_private_key)
config.bug_report_url = bug_report_url
config.client_credentials = client_credentials
config.component_repository_mappings = component_repository_mappings
config.contact_string = contact_string
config.documentation_url = documentation_url
config.github_credentials = github_credentials
config.jira_credentials = jira_credentials
config.jira_url = jira_url
config.platform_group_mappings = platform_group_mappings
config.privileged_users = privileged_users
config.blacklisted_users = blacklisted_users
config.relax_security_bug_restrictions = bool(
relax_security_bug_restrictions)
config.relax_testcase_restrictions = bool(relax_testcase_restrictions)
config.reproduce_tool_client_id = reproduce_tool_client_id
config.reproduce_tool_client_secret = reproduce_tool_client_secret
config.reproduction_help_url = reproduction_help_url
config.test_account_email = test_account_email
config.test_account_password = test_account_password
config.wifi_ssid = wifi_ssid
config.wifi_password = wifi_password
config.sendgrid_api_key = sendgrid_api_key
config.sendgrid_sender = sendgrid_sender
helpers.log('Configuration', helpers.MODIFY_OPERATION)
# Before hashing the entity, we must put it so that the internal maps are
# updated.
config.put()
config.previous_hash = utils.entity_hash(config)
config.put()
template_values = {
'title':
'Success',
'message': ('Configuration is successfully updated. '
'Redirecting to the configuration page...'),
'redirect_url':
'/configuration',
}
return self.render('message.html', template_values)
class AddExternalUserPermission(base_handler.Handler):
"""Handles adding a new ExternalUserPermission."""
@handler.post(handler.FORM, handler.HTML)
@handler.check_admin_access
@handler.require_csrf_token
def post(self):
"""Handle a post request."""
email = utils.normalize_email(request.get('email'))
entity_kind = request.get('entity_kind')
entity_name = request.get('entity_name')
is_prefix = request.get('is_prefix')
auto_cc = request.get('auto_cc')
if not email:
raise helpers.EarlyExitException('No email provided.', 400)
if not entity_kind or entity_kind == 'undefined':
raise helpers.EarlyExitException('No entity_kind provided.', 400)
entity_kind = get_value_by_name(USER_PERMISSION_ENTITY_KINDS, entity_kind)
if entity_kind is None:
raise helpers.EarlyExitException('Invalid entity_kind provided.', 400)
if entity_kind == data_types.PermissionEntityKind.UPLOADER:
# Enforce null values for entity name and auto-cc when uploader is chosen.
entity_name = None
auto_cc = data_types.AutoCCType.NONE
else:
if not entity_name:
raise helpers.EarlyExitException('No entity_name provided.', 400)
if not auto_cc or auto_cc == 'undefined':
raise helpers.EarlyExitException('No auto_cc provided.', 400)
auto_cc = get_value_by_name(USER_PERMISSION_AUTO_CC_TYPES, auto_cc)
if auto_cc is None:
raise helpers.EarlyExitException('Invalid auto_cc provided.', 400)
# Check for existing permission.
query = data_types.ExternalUserPermission.query(
data_types.ExternalUserPermission.email == email,
data_types.ExternalUserPermission.entity_kind == entity_kind,
data_types.ExternalUserPermission.entity_name == entity_name)
permission = query.get()
if not permission:
# Doesn't exist, create new one.
permission = data_types.ExternalUserPermission(
email=email, entity_kind=entity_kind, entity_name=entity_name)
permission.is_prefix = bool(is_prefix)
permission.auto_cc = auto_cc
permission.put()
helpers.log('Configuration', helpers.MODIFY_OPERATION)
template_values = {
'title':
'Success',
'message':
('User %s permission for entity %s is successfully added. '
'Redirecting to the configuration page...') % (email, entity_name),
'redirect_url':
'/configuration',
}
return self.render('message.html', template_values)
class DeleteExternalUserPermission(base_handler.Handler):
"""Handles deleting an ExternalUserPermission."""
@handler.post(handler.FORM, handler.HTML)
@handler.check_admin_access
@handler.require_csrf_token
def post(self):
"""Handle a post request."""
email = request.get('email')
entity_kind = request.get('entity_kind')
entity_name = request.get('entity_name')
if not email:
raise helpers.EarlyExitException('No email provided.', 400)
if not entity_kind or entity_kind == 'undefined':
raise helpers.EarlyExitException('No entity_kind provided.', 400)
entity_kind = get_value_by_name(USER_PERMISSION_ENTITY_KINDS, entity_kind)
if entity_kind is None:
raise helpers.EarlyExitException('Invalid entity_kind provided.', 400)
if entity_kind == data_types.PermissionEntityKind.UPLOADER:
entity_name = None
else:
if not entity_name:
raise helpers.EarlyExitException('No entity_name provided.', 400)
# Check for existing permission.
permission = data_types.ExternalUserPermission.query(
data_types.ExternalUserPermission.email == email,
data_types.ExternalUserPermission.entity_kind == entity_kind,
data_types.ExternalUserPermission.entity_name == entity_name).get()
if not permission:
raise helpers.EarlyExitException('Permission does not exist.', 400)
permission.key.delete()
helpers.log('Configuration', helpers.MODIFY_OPERATION)
template_values = {
'title':
'Success',
'message':
('User %s permission for entity %s is successfully deleted. '
'Redirecting to the configuration page...') % (email, entity_name),
'redirect_url':
'/configuration',
}
return self.render('message.html', template_values)
|
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import shutil
import mock
from twisted.trial import unittest
from twisted.internet import defer, reactor, task
from twisted.python import failure, log
from buildslave.test.util import command, compat
from buildslave.test.fake.remote import FakeRemote
from buildslave.test.fake.runprocess import Expect
import buildslave
from buildslave import bot
class TestBot(unittest.TestCase):
def setUp(self):
self.basedir = os.path.abspath("basedir")
if os.path.exists(self.basedir):
shutil.rmtree(self.basedir)
os.makedirs(self.basedir)
self.real_bot = bot.Bot(self.basedir, False)
self.real_bot.startService()
self.bot = FakeRemote(self.real_bot)
def tearDown(self):
d = defer.succeed(None)
if self.real_bot and self.real_bot.running:
d.addCallback(lambda _ : self.real_bot.stopService())
if os.path.exists(self.basedir):
shutil.rmtree(self.basedir)
return d
def test_getCommands(self):
d = self.bot.callRemote("getCommands")
def check(cmds):
# just check that 'shell' is present..
self.assertTrue('shell' in cmds)
d.addCallback(check)
return d
def test_getVersion(self):
d = self.bot.callRemote("getVersion")
def check(vers):
self.assertEqual(vers, buildslave.version)
d.addCallback(check)
return d
def test_getSlaveInfo(self):
infodir = os.path.join(self.basedir, "info")
os.makedirs(infodir)
open(os.path.join(infodir, "admin"), "w").write("testy!")
open(os.path.join(infodir, "foo"), "w").write("bar")
open(os.path.join(infodir, "environ"), "w").write("something else")
d = self.bot.callRemote("getSlaveInfo")
def check(info):
self.assertEqual(info, dict(admin='testy!', foo='bar', environ=os.environ, system=os.name, basedir=self.basedir))
d.addCallback(check)
return d
def test_getSlaveInfo_nodir(self):
d = self.bot.callRemote("getSlaveInfo")
def check(info):
self.assertEqual(set(info.keys()), set(['environ','system','basedir']))
d.addCallback(check)
return d
def test_setBuilderList_empty(self):
d = self.bot.callRemote("setBuilderList", [])
def check(builders):
self.assertEqual(builders, {})
d.addCallback(check)
return d
def test_setBuilderList_single(self):
d = self.bot.callRemote("setBuilderList", [ ('mybld', 'myblddir') ])
def check(builders):
self.assertEqual(builders.keys(), ['mybld'])
self.assertTrue(os.path.exists(os.path.join(self.basedir, 'myblddir')))
# note that we test the SlaveBuilder instance below
d.addCallback(check)
return d
def test_setBuilderList_updates(self):
d = defer.succeed(None)
slavebuilders = {}
def add_my(_):
d = self.bot.callRemote("setBuilderList", [
('mybld', 'myblddir') ])
def check(builders):
self.assertEqual(builders.keys(), ['mybld'])
self.assertTrue(os.path.exists(os.path.join(self.basedir, 'myblddir')))
slavebuilders['my'] = builders['mybld']
d.addCallback(check)
return d
d.addCallback(add_my)
def add_your(_):
d = self.bot.callRemote("setBuilderList", [
('mybld', 'myblddir'), ('yourbld', 'yourblddir') ])
def check(builders):
self.assertEqual(sorted(builders.keys()), sorted(['mybld', 'yourbld']))
self.assertTrue(os.path.exists(os.path.join(self.basedir, 'myblddir')))
self.assertTrue(os.path.exists(os.path.join(self.basedir, 'yourblddir')))
# 'my' should still be the same slavebuilder object
self.assertEqual(id(slavebuilders['my']), id(builders['mybld']))
slavebuilders['your'] = builders['yourbld']
d.addCallback(check)
return d
d.addCallback(add_your)
def remove_my(_):
d = self.bot.callRemote("setBuilderList", [
('yourbld', 'yourblddir2') ]) # note new builddir
def check(builders):
self.assertEqual(sorted(builders.keys()), sorted(['yourbld']))
# note that build dirs are not deleted..
self.assertTrue(os.path.exists(os.path.join(self.basedir, 'myblddir')))
self.assertTrue(os.path.exists(os.path.join(self.basedir, 'yourblddir')))
self.assertTrue(os.path.exists(os.path.join(self.basedir, 'yourblddir2')))
# 'your' should still be the same slavebuilder object
self.assertEqual(id(slavebuilders['your']), id(builders['yourbld']))
d.addCallback(check)
return d
d.addCallback(remove_my)
def add_and_remove(_):
d = self.bot.callRemote("setBuilderList", [
('theirbld', 'theirblddir') ])
def check(builders):
self.assertEqual(sorted(builders.keys()), sorted(['theirbld']))
self.assertTrue(os.path.exists(os.path.join(self.basedir, 'myblddir')))
self.assertTrue(os.path.exists(os.path.join(self.basedir, 'yourblddir')))
self.assertTrue(os.path.exists(os.path.join(self.basedir, 'theirblddir')))
d.addCallback(check)
return d
d.addCallback(add_and_remove)
return d
def test_shutdown(self):
d1 = defer.Deferred()
self.patch(reactor, "stop", lambda : d1.callback(None))
d2 = self.bot.callRemote("shutdown")
# don't return until both the shutdown method has returned, and
# reactor.stop has been called
return defer.gatherResults([d1, d2])
class FakeStep(object):
"A fake master-side BuildStep that records its activities."
def __init__(self):
self.finished_d = defer.Deferred()
self.actions = []
def wait_for_finish(self):
return self.finished_d
def remote_update(self, updates):
for update in updates:
if 'elapsed' in update[0]:
update[0]['elapsed'] = 1
self.actions.append(["update", updates])
def remote_complete(self, f):
self.actions.append(["complete", f])
self.finished_d.callback(None)
class TestSlaveBuilder(command.CommandTestMixin, unittest.TestCase):
def setUp(self):
self.basedir = os.path.abspath("basedir")
if os.path.exists(self.basedir):
shutil.rmtree(self.basedir)
os.makedirs(self.basedir)
self.bot = bot.Bot(self.basedir, False)
self.bot.startService()
# get a SlaveBuilder object from the bot and wrap it as a fake remote
builders = self.bot.remote_setBuilderList([('sb', 'sb')])
self.sb = FakeRemote(builders['sb'])
self.setUpCommand()
def tearDown(self):
self.tearDownCommand()
d = defer.succeed(None)
if self.bot and self.bot.running:
d.addCallback(lambda _ : self.bot.stopService())
if os.path.exists(self.basedir):
shutil.rmtree(self.basedir)
return d
def test_print(self):
return self.sb.callRemote("print", "Hello, SlaveBuilder.")
def test_setMaster(self):
# not much to check here - what the SlaveBuilder does with the
# master is not part of the interface (and, in fact, it does very little)
return self.sb.callRemote("setMaster", mock.Mock())
def test_shutdown(self):
# don't *actually* shut down the reactor - that would be silly
stop = mock.Mock()
self.patch(reactor, "stop", stop)
d = self.sb.callRemote("shutdown")
def check(_):
self.assertTrue(stop.called)
d.addCallback(check)
return d
def test_startBuild(self):
return self.sb.callRemote("startBuild")
def test_startCommand(self):
# set up a fake step to receive updates
st = FakeStep()
# patch runprocess to handle the 'echo', below
self.patch_runprocess(
Expect([ 'echo', 'hello' ], os.path.join(self.basedir, 'sb', 'workdir'))
+ { 'hdr' : 'headers' } + { 'stdout' : 'hello\n' } + { 'rc' : 0 }
+ 0,
)
d = defer.succeed(None)
def do_start(_):
return self.sb.callRemote("startCommand", FakeRemote(st),
"13", "shell", dict(
command=[ 'echo', 'hello' ],
workdir='workdir',
))
d.addCallback(do_start)
d.addCallback(lambda _ : st.wait_for_finish())
def check(_):
self.assertEqual(st.actions, [
['update', [[{'hdr': 'headers'}, 0]]],
['update', [[{'stdout': 'hello\n'}, 0]]],
['update', [[{'rc': 0}, 0]]],
['update', [[{'elapsed': 1}, 0]]],
['complete', None],
])
d.addCallback(check)
return d
def test_startCommand_interruptCommand(self):
# set up a fake step to receive updates
st = FakeStep()
# patch runprocess to pretend to sleep (it will really just hang forever,
# except that we interrupt it)
self.patch_runprocess(
Expect([ 'sleep', '10' ], os.path.join(self.basedir, 'sb', 'workdir'))
+ { 'hdr' : 'headers' }
+ { 'wait' : True }
)
d = defer.succeed(None)
def do_start(_):
return self.sb.callRemote("startCommand", FakeRemote(st),
"13", "shell", dict(
command=[ 'sleep', '10' ],
workdir='workdir',
))
d.addCallback(do_start)
# wait a jiffy..
def do_wait(_):
d = defer.Deferred()
reactor.callLater(0.01, d.callback, None)
return d
d.addCallback(do_wait)
# and then interrupt the step
def do_interrupt(_):
return self.sb.callRemote("interruptCommand", "13", "tl/dr")
d.addCallback(do_interrupt)
d.addCallback(lambda _ : st.wait_for_finish())
def check(_):
self.assertEqual(st.actions, [
['update', [[{'hdr': 'headers'}, 0]]],
['update', [[{'hdr': 'killing'}, 0]]],
['update', [[{'rc': -1}, 0]]],
['complete', None],
])
d.addCallback(check)
return d
def test_startCommand_failure(self):
# similar to test_startCommand, but leave out some args so the slave
# generates a failure
# set up a fake step to receive updates
st = FakeStep()
# patch the log.err, otherwise trial will think something *actually* failed
self.patch(log, "err", lambda f : None)
d = defer.succeed(None)
def do_start(_):
return self.sb.callRemote("startCommand", FakeRemote(st),
"13", "shell", dict(
workdir='workdir',
))
d.addCallback(do_start)
d.addCallback(lambda _ : st.wait_for_finish())
def check(_):
self.assertEqual(st.actions[1][0], 'complete')
self.assertTrue(isinstance(st.actions[1][1], failure.Failure))
d.addCallback(check)
return d
class TestBotFactory(unittest.TestCase):
def setUp(self):
self.bf = bot.BotFactory('mstr', 9010, 35, 200)
# tests
def test_timers(self):
clock = self.bf._reactor = task.Clock()
calls = []
def callRemote(method):
calls.append(clock.seconds())
self.assertEqual(method, 'keepalive')
# simulate the response taking a few seconds
d = defer.Deferred()
clock.callLater(5, d.callback, None)
return d
self.bf.perspective = mock.Mock()
self.bf.perspective.callRemote = callRemote
self.bf.startTimers()
clock.callLater(100, self.bf.stopTimers)
clock.pump(( 1 for _ in xrange(150)))
self.assertEqual(calls, [ 35, 70 ])
@compat.usesFlushLoggedErrors
def test_timers_exception(self):
clock = self.bf._reactor = task.Clock()
self.bf.perspective = mock.Mock()
def callRemote(method):
return defer.fail(RuntimeError("oh noes"))
self.bf.perspective.callRemote = callRemote
self.bf.startTimers()
clock.advance(35)
self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1)
# note that the BuildSlave class is tested in test_bot_BuildSlave
|
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Tests the log service
:author: Thomas Calmant
"""
# Pelix
import pelix.framework
import pelix.misc
from pelix.ipopo.constants import use_ipopo
from pelix.misc.log import LOG_DEBUG, LOG_INFO, LOG_WARNING, LOG_ERROR
# Standard library
import logging
import sys
import time
try:
import unittest2 as unittest
except ImportError:
import unittest
# ------------------------------------------------------------------------------
__version__ = "1.0.0"
# ------------------------------------------------------------------------------
class LogServiceTest(unittest.TestCase):
"""
Tests the log service
"""
def setUp(self):
"""
Prepares a framework and a registers a service to export
"""
# Create the framework
self.framework = pelix.framework.create_framework(
('pelix.ipopo.core', 'pelix.misc.log'))
self.framework.start()
# Get the service
self.service = self._get_service()
def tearDown(self):
"""
Cleans up for next test
"""
# Stop the framework
pelix.framework.FrameworkFactory.delete_framework(self.framework)
self.service = None
self.framework = None
def _get_service(self):
"""
Returns the log service
"""
context = self.framework.get_bundle_context()
ref = context.get_service_reference(pelix.misc.LOG_SERVICE)
return context.get_service(ref)
def test_log(self):
"""
Basic tests for the log service
"""
# Try to log at various log levels
prev_logs = []
for level, osgi_level in (
(logging.DEBUG, LOG_DEBUG), (logging.INFO, LOG_INFO),
(logging.WARNING, LOG_WARNING), (logging.ERROR, LOG_ERROR),
(logging.CRITICAL, LOG_ERROR)):
# Log at the expected level
self.service.log(level, logging.getLevelName(level))
# Get new logs
new_logs = self.service.get_log()
latest = new_logs[-1]
# Check time stamp
self.assertLessEqual(
latest.time, time.time() + .5, "Log in future")
self.assertGreaterEqual(
latest.time, time.time() - 10, "Log too far in past")
# Check stored info
self.assertEqual(latest.level, level, "Wrong log level")
self.assertEqual(latest.osgi_level, osgi_level,
"Wrong OSGi log level")
self.assertEqual(latest.message, logging.getLevelName(level),
"Wrong log message")
self.assertIsNone(latest.bundle, "Unexpected bundle info")
self.assertIsNone(latest.exception, "Unexpected exception data")
self.assertIsNone(latest.reference, "Unexpected reference data")
# Compare list (not tuples)
new_logs = list(new_logs)
self.assertListEqual(new_logs, prev_logs + [latest],
"Logs list changed")
prev_logs = new_logs
def test_logging(self):
"""
Tests if logs made with the logging are handled
"""
# Debug logs aren't taken into account
logging.debug("Some log message at %s",
logging.getLevelName(logging.DEBUG))
self.assertListEqual(
list(self.service.get_log()), [], "Debug message logged")
# Try to log at various log levels
prev_logs = []
for level, osgi_level in (
(logging.INFO, LOG_INFO), (logging.WARNING, LOG_WARNING),
(logging.ERROR, LOG_ERROR), (logging.CRITICAL, LOG_ERROR)):
# Log at the expected level
logging.log(level, "Some log message at %s",
logging.getLevelName(level))
# Get new logs
new_logs = self.service.get_log()
latest = new_logs[-1]
# Check time stamp
self.assertLessEqual(
latest.time, time.time() + .5, "Log in future")
self.assertGreaterEqual(
latest.time, time.time() - 10, "Log too far in past")
# Check stored info
self.assertEqual(latest.level, level, "Wrong log level")
self.assertEqual(latest.osgi_level, osgi_level,
"Wrong OSGi log level")
self.assertIn(logging.getLevelName(level), latest.message,
"Wrong log message")
self.assertIsNone(latest.bundle, "Unexpected bundle info")
self.assertIsNone(latest.exception, "Unexpected exception data")
self.assertIsNone(latest.reference, "Unexpected reference data")
# Check string representation
self.assertIn(logging.getLevelName(level), str(latest))
# Compare list (not tuples)
new_logs = list(new_logs)
self.assertListEqual(new_logs, prev_logs + [latest],
"Logs list changed")
prev_logs = new_logs
def test_logging_filter_level(self):
"""
Tests the change of filter for the logging handler
"""
for filter_level in (logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR):
for int_level in (True, False):
# Restart the framework
self.tearDown()
self.setUp()
# Change the framework property and reload the log service
if int_level:
self.framework.add_property(
pelix.misc.PROPERTY_LOG_LEVEL, filter_level)
else:
self.framework.add_property(
pelix.misc.PROPERTY_LOG_LEVEL,
logging.getLevelName(filter_level))
self.framework.get_bundle_by_name("pelix.misc.log").update()
self.service = self._get_service()
# Log for each level
for level in (logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR):
# Log something
logging.log(level, "Some log at %s",
logging.getLevelName(level))
try:
latest = self.service.get_log()[-1]
if level >= filter_level:
self.assertIn(logging.getLevelName(level),
latest.message)
except IndexError:
if level >= filter_level:
self.fail("Missing a log matching the filter")
# Try with invalid levels, default level is INFO
filter_level = logging.INFO
for invalid in (None, "", "deb", "ug", "foobar", {1: 2}, [1, 2]):
# Restart the framework
self.tearDown()
self.setUp()
# Change the framework property and reload the log service
self.framework.add_property(pelix.misc.PROPERTY_LOG_LEVEL, invalid)
self.framework.get_bundle_by_name("pelix.misc.log").update()
self.service = self._get_service()
# Log for each level
for level in (logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR):
# Log something
logging.log(level, "Some log at %s",
logging.getLevelName(level))
try:
latest = self.service.get_log()[-1]
if level >= filter_level:
self.assertIn(logging.getLevelName(level),
latest.message)
except IndexError:
if level >= filter_level:
self.fail("Missing a log matching the filter")
def test_listener(self):
"""
Tests when log listeners are notified
"""
entries = []
# Prepare the listener
class Listener:
@staticmethod
def logged(entry):
entries.append(entry)
listener = Listener()
# Register it twice
self.service.add_log_listener(listener)
self.service.add_log_listener(listener)
# Also, check with a null log listener
self.service.add_log_listener(None)
# Log something
self.service.log(logging.WARNING, "Some log")
# Get the log entry through the service
latest = self.service.get_log()[-1]
# Compare with what we stored
self.assertListEqual(entries, [latest], "Bad content for the listener")
# Clean up
del entries[:]
# Unregister the listener once
self.service.remove_log_listener(listener)
# Log something
self.service.log(logging.WARNING, "Some log")
# Nothing must have been logged
self.assertListEqual(entries, [], "Something has been logged")
# Nothing must happen if we unregister the listener twice
self.service.remove_log_listener(listener)
self.service.remove_log_listener(None)
def test_bad_listener(self):
"""
Tests a listener raising an exception
"""
# Prepare the listener
class GoodListener:
def __init__(self):
self.entries = []
def logged(self, entry):
self.entries.append(entry)
raise OSError("Something went wrong")
class BadListener(GoodListener):
def logged(self, entry):
super(BadListener, self).logged(entry)
raise OSError("Something went wrong")
good1 = GoodListener()
bad = GoodListener()
good2 = GoodListener()
# Register listeners
self.service.add_log_listener(good1)
self.service.add_log_listener(bad)
self.service.add_log_listener(good2)
# Log something
self.service.log(logging.WARNING, "Some log")
# Get the log entry through the service
latest = self.service.get_log()[-1]
self.assertEqual(latest.level, logging.WARNING)
for listener in (good1, bad, good2):
self.assertIs(latest, listener.entries[-1], "Entry not kept")
def test_reference(self):
"""
Tests the service reference handling in logs
"""
# Register a service, with the Framework context
context = self.framework.get_bundle_context()
svc_reg = context.register_service("test.svc", object(), {})
svc_ref = svc_reg.get_reference()
# Log something
self.service.log(logging.WARNING, "Some text", reference=svc_ref)
# Check what has been stored
latest = self.service.get_log()[-1]
self.assertIs(latest.reference, svc_ref, "Wrong service reference")
self.assertIs(latest.bundle, self.framework, "Wrong bundle found")
# Log with wrong references
for wrong_ref in (None, object(), svc_reg):
self.service.log(logging.WARNING, "Some text", reference=wrong_ref)
latest = self.service.get_log()[-1]
self.assertIsNone(latest.reference, "Non-None service reference")
self.assertIsNone(latest.bundle, "Non-None bundle found")
def test_bundle(self):
"""
Tests the detection of the calling bundle
"""
# Install a test bundle
context = self.framework.get_bundle_context()
bnd = context.install_bundle("tests.misc.log_bundle")
module = bnd.get_module()
bnd.start()
# Instantiate a test component
with use_ipopo(context) as ipopo:
comp = ipopo.instantiate(module.SIMPLE_FACTORY, "test.log", {})
# Log something
comp.log(logging.WARNING, "Some log")
# Check the bundle
latest = self.service.get_log()[-1]
self.assertIs(latest.bundle, bnd, "Wrong bundle found")
# Check if the bundle in the string representation
self.assertIn(bnd.get_symbolic_name(), str(latest))
# Remove the name of the module
comp.remove_name()
# Log something
comp.log(logging.WARNING, "Some log")
# Check the bundle
latest = self.service.get_log()[-1]
self.assertIsNone(latest.bundle, "Wrong bundle found")
def test_exception(self):
"""
Tests the exception information
"""
try:
raise ValueError("Some error")
except ValueError:
self.service.log(logging.ERROR, "Error !", sys.exc_info())
latest = self.service.get_log()[-1]
self.assertTrue(isinstance(latest.exception, str),
"Exception info must be a string")
self.assertIn(__file__, latest.exception, "Incomplete exception info")
# Check if the exception in the string representation
self.assertIn(latest.exception, str(latest))
# Check invalid exception info
for invalid in ([], [1, 2], (4, 5, 6)):
self.service.log(logging.ERROR, "Error !", invalid)
latest = self.service.get_log()[-1]
self.assertEqual(latest.exception, '<Invalid exc_info>')
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_server_request(
resource_group_name: str,
server_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateLinkResources')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
server_name: str,
group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateLinkResources/{groupName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"groupName": _SERIALIZER.url("group_name", group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class PrivateLinkResourcesOperations(object):
"""PrivateLinkResourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.mysql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_server(
self,
resource_group_name: str,
server_name: str,
**kwargs: Any
) -> Iterable["_models.PrivateLinkResourceListResult"]:
"""Gets the private link resources for MySQL server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkResourceListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.rdbms.mysql.models.PrivateLinkResourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_server_request(
resource_group_name=resource_group_name,
server_name=server_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_server.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_server_request(
resource_group_name=resource_group_name,
server_name=server_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateLinkResources'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
server_name: str,
group_name: str,
**kwargs: Any
) -> "_models.PrivateLinkResource":
"""Gets a private link resource for MySQL server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param group_name: The name of the private link resource.
:type group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResource, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.mysql.models.PrivateLinkResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
server_name=server_name,
group_name=group_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateLinkResources/{groupName}'} # type: ignore
|
|
#!/usr/bin/env python
#
# Copyright (c) 2010-2013, GhostBSD. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistribution's of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistribution's in binary form must reproduce the above
# copyright notice,this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither then name of GhostBSD Project nor the names of its
# contributors maybe used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# root.py v 1.4 Friday, January 17 2014 Eric Turgeon
#
# root.py set root password.
from gi.repository import Gtk
import os
import re
from subprocess import Popen
import pickle
# Directory use from the installer.
tmp = "/tmp/.gbi/"
installer = "/usr/local/lib/gbi/"
if not os.path.exists(tmp):
os.makedirs(tmp)
# Find if pasword contain only lower case and number
def lowerCase(strg, search=re.compile(r'[^a-z]').search):
return not bool(search(strg))
# Find if pasword contain only upper case
def upperCase(strg, search=re.compile(r'[^A-Z]').search):
return not bool(search(strg))
# Find if pasword contain only lower case and number
def lowerandNunber(strg, search=re.compile(r'[^a-z0-9]').search):
return not bool(search(strg))
# Find if pasword contain only upper case and number
def upperandNunber(strg, search=re.compile(r'[^A-Z0-9]').search):
return not bool(search(strg))
# Find if pasword contain only lower and upper case and
def lowerUpperCase(strg, search=re.compile(r'[^a-zA-Z]').search):
return not bool(search(strg))
# Find if pasword contain only lower and upper case and
def lowerUpperNumber(strg, search=re.compile(r'[^a-zA-Z0-9]').search):
return not bool(search(strg))
# Find if pasword contain only lower and upper case and
def allCharacter(strg, search=re.compile(r'[^a-zA-Z0-9~\!@#\$%\^&\*_\+":;\'\-]').search):
return not bool(search(strg))
class RootUser:
def save_selection(self):
if self.password.get_text() == self.repassword.get_text():
f = open('%sroot' % tmp, 'wb')
rp = self.password.get_text()
ul = [rp]
pickle.dump(ul, f)
f.close()
def __init__(self, button3):
self.box1 = Gtk.VBox(False, 0)
self.box1.show()
box2 = Gtk.VBox(False, 0)
box2.set_border_width(10)
self.box1.pack_start(box2, False, False, 10)
box2.show()
# title.
ttext = "Administrator(root) Password"
Title = Gtk.Label("<b><span size='xx-large'>%s</span></b>" % ttext)
Title.set_use_markup(True)
box2.pack_start(Title, False, False, 10)
# password for root.
label = Gtk.Label('<b>Administrator (root) Password</b>')
label.set_use_markup(True)
label.set_alignment(.4, .2)
table = Gtk.Table(1, 3, True)
table.set_row_spacings(10)
label1 = Gtk.Label("Password")
self.password = Gtk.Entry()
self.password.set_visibility(False)
self.password.connect("changed", self.passwdstrength)
label2 = Gtk.Label("Verify Password")
self.repassword = Gtk.Entry()
self.repassword.set_visibility(False)
self.repassword.connect("changed", self.passwdVerification, button3)
self.label3 = Gtk.Label()
self.img = Gtk.Image()
table.attach(label1, 0, 1, 1, 2)
table.attach(self.password, 1, 2, 1, 2)
table.attach(self.label3, 2, 3, 1, 2)
table.attach(label2, 0, 1, 2, 3)
table.attach(self.repassword, 1, 2, 2, 3)
table.attach(self.img, 2, 3, 2, 3)
box2.pack_start(table, False, False, 10)
def get_model(self):
return self.box1
def passwdstrength(self, widget):
passwd = self.password.get_text()
if len(passwd) <= 4:
self.label3.set_text("Super Weak")
elif len(passwd) <= 8:
if lowerCase(passwd) or upperCase(passwd) or passwd.isdigit():
self.label3.set_text("Super Weak")
elif lowerandNunber(passwd):
self.label3.set_text("Very Weak")
elif upperandNunber(passwd):
self.label3.set_text("Very Weak")
elif lowerUpperCase(passwd):
self.label3.set_text("Very Weak")
elif lowerUpperNumber(passwd):
self.label3.set_text("Fairly Weak")
elif allCharacter(passwd):
self.label3.set_text("Weak")
elif len(passwd) <= 12:
if lowerCase(passwd) or upperCase(passwd) or passwd.isdigit():
self.label3.set_text("Very Weak")
elif lowerandNunber(passwd):
self.label3.set_text("Fairly Weak")
elif upperandNunber(passwd):
self.label3.set_text("Fairly Weak")
elif lowerUpperCase(passwd):
self.label3.set_text("Fairly Weak")
elif lowerUpperNumber(passwd):
self.label3.set_text("Weak")
elif allCharacter(passwd):
self.label3.set_text("Strong")
elif len(passwd) <= 16:
if lowerCase(passwd) or upperCase(passwd) or passwd.isdigit():
self.label3.set_text("Fairly Weak")
elif lowerandNunber(passwd):
self.label3.set_text("Weak")
elif upperandNunber(passwd):
self.label3.set_text("Weak")
elif lowerUpperCase(passwd):
self.label3.set_text("Weak")
elif lowerUpperNumber(passwd):
self.label3.set_text("Strong")
elif allCharacter(passwd):
self.label3.set_text("Fairly Strong")
elif len(passwd) <= 20:
if lowerCase(passwd) or upperCase(passwd) or passwd.isdigit():
self.label3.set_text("Weak")
elif lowerandNunber(passwd):
self.label3.set_text("Strong")
elif upperandNunber(passwd):
self.label3.set_text("Strong")
elif lowerUpperCase(passwd):
self.label3.set_text("Strong")
elif lowerUpperNumber(passwd):
self.label3.set_text("Fairly Strong")
elif allCharacter(passwd):
self.label3.set_text("Very Strong")
elif len(passwd) <= 24:
if lowerCase(passwd) or upperCase(passwd) or passwd.isdigit():
self.label3.set_text("Strong")
elif lowerandNunber(passwd):
self.label3.set_text("Fairly Strong")
elif upperandNunber(passwd):
self.label3.set_text("Fairly Strong")
elif lowerUpperCase(passwd):
self.label3.set_text("Fairly Strong")
elif lowerUpperNumber(passwd):
self.label3.set_text("Very Strong")
elif allCharacter(passwd):
self.label3.set_text("Super Strong")
elif len(passwd) > 24:
if lowerCase(passwd) or upperCase(passwd) or passwd.isdigit():
self.label3.set_text("Fairly Strong")
else:
self.label3.set_text("Super Strong")
def passwdVerification(self, widget, button3):
if self.password.get_text() == self.repassword.get_text() and self.password.get_text() != "":
self.img.set_from_stock(Gtk.STOCK_YES, 5)
button3.set_sensitive(True)
else:
self.img.set_from_stock(Gtk.STOCK_NO, 5)
button3.set_sensitive(False)
|
|
import pandas as pd
import numpy as np
import CGAT.IOTools as IOTools
import random
import itertools
from CGATPipelines.Pipeline import cluster_runnable
import json
import decimal
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.patches as mpatches
@cluster_runnable
def MakeSNPFreqDict(infiles, outfiles, rs):
'''
Generates a random set of SNPs to use to characterise the ancestry
and relatedness of the samples.
1. Finds all SNPs which have a known genotype frequency for all 11
hapmap ancestries
2. Picks a random 50000 of these SNPs
3. Stores a list of these SNPs as randomsnps.tsv
4. Builds a dictionary of dictionaries of dictionaries where:
At Level 1 each key is a HapMap ancestry ID
each of these keys leads to another dictionary - level 2.
At Level 2 each key is a dbSNP SNP ID
each of these keys leads to another dictionary - level 3.
At Level 3 each key is a genotype:
the values of these keys are the genotype frequency of this
genotype at this SNP in this ancestry.
e.g. snpdict['ASW']['rs000000001']['CT'] -->
frequency of the CT genotype at the rs0000001 SNP in the
ASW population
5. Stores this dictionary in json format as randomsnps.json
'''
# list all chromosomes
chroms = np.array([f.split("/")[-1].split("_")[2] for f in infiles])
uchroms = set(chroms)
infiles = np.array(infiles)
snpdict = dict()
for chrom in uchroms:
# get all the files for this chromosome
c = np.where(chroms == chrom)[0]
thischrom = infiles[c]
snpidsets = []
# make a set of all the snp ids of known frequency for
# this chromsome for each ancestry
for f in thischrom:
snpids = []
anc = f.split("/")[-1].split("_")[3]
with IOTools.openFile(f) as inp:
for line in inp:
line = line.strip().split(" ")
snpids.append(line[0])
snpids = set(snpids)
snpidsets.append(snpids)
# find the snp ids where frequency is known in all ancestries
thischromsnps = set.intersection(*snpidsets)
snpdict[chrom] = thischromsnps
vals = list(snpdict.values())
# set of all the SNPs genotyped in all ancestries on all chroms
pooled = set.union(*vals)
# take a random sample of snps from this set
random.seed(rs)
sam = set(random.sample(pooled, 50000))
# make a new dictionary of just the sampled snps for each chrom
snpdictsam = dict()
for key in snpdict:
i = snpdict[key] & sam
snpdictsam[key] = i
# generate all possible genotypes as list of strings
# (e.g. CC CT CG CA TT etc)
res = ["%s%s" % (g[0], g[1])
for g in itertools.permutations("CGAT", 2)]
res += ['CC', 'GG', 'TT', 'AA']
freqdict = dict()
ancs = set([l.split("/")[-1].split("_")[3] for l in infiles])
# build an empty dictionary of dictionaries of dictionaries
# to store genotype freqs for each ancestry
for anc in ancs:
freqdict.setdefault(anc, dict())
for s in sam:
freqdict[anc].setdefault(s, dict())
for r in res:
freqdict[anc][s][r] = 0.0
# read genotype freqs from the input file and store in the dictionary
for f in infiles:
bits = f.split("/")[-1].split("_")
anc = bits[3]
chrom = bits[2]
snpset = snpdictsam[chrom]
with IOTools.openFile(f) as input:
for line in input:
line = line.strip().split(" ")
if line[0] in snpset:
G1 = "%s%s" % (line[10][0], line[10][-1])
try:
F1 = float(line[11])
G2 = "%s%s" % (line[13][0], line[13][-1])
F2 = float(line[14])
G3 = "%s%s" % (line[16][0], line[16][-1])
F3 = float(line[17])
except:
# because occasionally the GFs are not numeric
continue
freqdict[anc][line[0]][G1] = F1
freqdict[anc][line[0]][G2] = F2
freqdict[anc][line[0]][G3] = F3
# dump dictionary in a json file to resurrect later
outf = IOTools.openFile(outfiles[0], "w")
json.dump(freqdict, outf)
outf.close()
# store the sampled list of snps
out = IOTools.openFile(outfiles[1], "w")
for snp in sam:
out.write("%s\n" % snp)
out.close()
@cluster_runnable
def GenotypeSNPs(infile, snplist, outfile):
'''
Fetches the genotype from the variant tables for all samples
for SNPs in the hapmap sample from makeRandomSNPSet.
Complex sites are ignored (as simple SNPs are sufficient for these
calculations).
These are:
Sites which failed QC (column 3 in the variant table is not PASS)
Sites with more than 2 alleles defined (column 6 in the variant table
contains more than one alternative allele)
SNPs with more than one ID
Indels
'''
out = IOTools.openFile(outfile, "w")
with IOTools.openFile(infile) as inf:
for line in inf:
line = line.strip().split()
# if the variant passed QC
if line[4] == "PASS":
genotype = line[7]
# if the genotype looks normal e.g. 1/1
if len(genotype) == 3:
# get the actual genotype (rather than the index)
if genotype[0] != ".":
ind1 = int(genotype[0])
else:
ind1 = 0
if genotype[2] != ".":
ind2 = int(genotype[2])
else:
ind2 = 0
A1 = line[5]
A2 = line[6].split(",")
AS = [A1] + A2
if len(AS) <= 2:
GT = "%s%s" % (AS[ind1], AS[ind2])
refGT = "%s%s" % (A1, A1)
if len(GT) == 2:
if line[3][0:2] == "rs" and len(
line[3].split(";")) == 1:
snpid = line[3]
chrom = line[0]
pos = line[1]
if snpid in snplist:
out.write("%s\t%s\t%s\t%s\t%s\n"
% (snpid, chrom, pos, GT,
refGT))
out.close()
@cluster_runnable
def CalculateAncestry(infile, calledsnps, snpdict, outfiles):
'''
Takes the data stored in MakeRandomSNPSet and the genotype of each sample
at each site in calledsnps.tsv and tabulates the frequency of this
genotype in each of the HapMap ancestry categories.
The overall probability of each ancestry is then calculated as the
product of these frequencies. These can only be used in comparison to
each other - to show which of the 11 ancestries is most probable.
'''
# List the SNPs in the sample where a variant has been called
# Record the reference genotype at each of these SNPs
called = set()
refs = dict()
for line in IOTools.openFile(calledsnps).readlines():
line = line.strip().split("\t")
refs[line[0]] = line[1]
called.add(line[0])
# Record the genotype of the current sample at each SNP
currents = dict()
for line in IOTools.openFile(infile).readlines():
line = line.strip().split("\t")
currents[line[0]] = line[3]
# Regenerate the genotype frequency dictionary made earlier
s = IOTools.openFile(snpdict)
snpdict = json.load(s)
s.close()
ancs = list(snpdict.keys())
# Build a table of the frequency of the genotype in this sample
# in each of the hapmap ancestries
out = IOTools.openFile(outfiles[0], "w")
out.write("snp\tgenotype\t%s\n" % "\t".join(ancs))
for snp in called:
# where a variant hasn't been called in the current sample assume
# the reference genotype
if snp in currents:
geno = currents[snp]
else:
geno = refs[snp]
res = []
for anc in ancs:
D = snpdict[anc]
sub = D[snp]
freq = sub[geno]
res.append(freq)
res = [str(r) for r in res]
out.write("%s\t%s\t%s\n" % (snp, geno, "\t".join(res)))
out.close()
out = IOTools.openFile(outfiles[1], "w")
# read the output of the previous step into pandas
res = pd.read_csv(outfiles[0], sep="\t")
# calculate the probability of each ancestry as the product of all the
# genotype frequencies
# SNPs where a genotype not recorded in hapmap has been called are
# skipped
# decimal package is used because the results are very small floats
res = res.replace(0, float('nan'))
for anc in set(ancs):
r = res[anc][np.invert(res[anc].isnull())]
r = [decimal.Decimal(d) for d in r]
out.write("%s\t%s\n" % (anc, np.product(r)))
out.close()
@cluster_runnable
def MakePEDFile(infiles, outfiles):
'''
Generates the required input for the Plink and King software packages.
- PED file - columns are SNPs and rows are samples, each cell is the
genotype of the sample at this SNP
- MAP file - rows are SNPs in the same order as the columns in the ped
file, each row shows chromosome, snp id and position.
'''
# Record the chromosome and position of each SNP
chromposdict = dict()
for line in IOTools.openFile(infiles[-1]).readlines():
line = line.strip().split("\t")
chromposdict[line[0]] = ((line[2], line[3]))
# Generate the PED file - takes the genotype column of the table from
# the CalculateAncestry step and transposes it across a row
out = IOTools.openFile(outfiles[0], "w")
for inf in infiles[0:-1]:
inf = inf[0]
df = pd.read_csv(inf, sep="\t")
geno = df['genotype']
genolist = []
for g in geno:
genolist.append(" ".join(list(g)))
out.write("%s\t%s\n" % (inf.split("/")[-1], "\t".join(genolist)))
out.close()
# Generate the MAP file - chromosome and position of each SNP in the same
# order as the PED file
mapf = IOTools.openFile(outfiles[1], "w")
slist = df['snp']
for snp in slist:
mapf.write("%s\t%s\t%s\n" % (chromposdict[snp][0], snp,
chromposdict[snp][1]))
mapf.close()
def CalculateFamily(infile, outfile):
'''
Translates and filters the output from King.
Pairs of related samples are written to the output file along with the
degree of relatedness. Degrees are decided using thresholds from
the King documentation, here
http://people.virginia.edu/~wc9c/KING/manual.html
'''
out = IOTools.openFile(outfile, "w")
input = open(infile).readlines()[1:]
for line in input:
line = line.strip().split("\t")
score = float(line[14])
# if there is a significant relatedness score
if score >= 0.0442:
ID1 = line[0]
ID2 = line[2]
# 2nd degree relatives
if score <= 0.177:
degree = 2
# 1st degree relatives
elif score <= 0.354:
degree = 1
# 0th degree relatives (ID twins or duplicate)
else:
degree = 0
out.write("%s\t%s\t%s\n" % (ID1, ID2, degree))
out.close()
@cluster_runnable
def CalculateSex(infiles, outfile):
'''
Approximates the sex of the sample using the data in the variant table.
Basic estimate based on heterozygosity on the X chromosome - genotypes
are taken for all called variants on X passing QC and the percentage
of heterozygotes is taken.
This tends to produce two clear populations so Kmeans clustering
is used to split the data into two - male and female. Samples which are
unclear are marked in the output.
'''
percs = []
for f in infiles:
homs = 0
tot = 0
# Count the homozygous and total variants called on the
# X chromosome in each input file (after QC)
with IOTools.openFile(f) as inf:
for line in inf:
line = line.strip().split("\t")
if line[0] == "chrX" and line[4] == "PASS":
geno = line[7]
s = set(list(geno))
if len(s) == 2:
homs += 1
tot += 1
perc = float(homs) / float(tot)
percs.append(perc)
# impose a grid on the data so clustering is possible
ests = np.array(percs)
ones = np.array([1] * len(ests))
grid = np.column_stack((ones, ests))
# divide data into two clusters
means = KMeans(n_clusters=2)
fit1 = means.fit(grid)
res = fit1.predict(grid)
# find the position of the cluster centres on the y axis
ccs = fit1.cluster_centers_
cc1 = ccs[0, 1]
cc2 = ccs[1, 1]
ccs = [cc1, cc2]
# pick out the scores from each cluster and calculate the standard dev
s1 = ests[res == 0]
s2 = ests[res == 1]
sds = [np.std(s1), np.std(s2)]
# males will be more homozygous - the bigger value represents males
if cc1 > cc2:
sexes = ["male", "female"]
else:
sexes = ["female", "male"]
x = 0
out = IOTools.openFile(outfile, "w")
for f in infiles:
# for each input file take the group, the score, the standard deviation
# of this group, the distance from the cluster centre and the sex
group = res[x]
est = ests[x]
std = sds[group]
dist = est - ccs[group]
sex = sexes[group]
# is the value with 3 SDs of the mean for this sex
if sex == "male":
if ((3 * std) - abs(dist) >= 0 or dist >= 0):
sig = "*"
else:
sig = "-"
else:
if ((3 * std) - abs(dist) >= 0 or dist <= 0):
sig = "*"
else:
sig = "-"
out.write("%s\t%f\t%f\t%s\t%s\n" % (f, est, dist, sexes[group], sig))
x += 1
out.close()
# plot the output - blue dots male, red dots female
# lines represent 3 standard deviations from the cluster centre
p = plt.figure()
a = p.add_subplot('111')
a.axis([0, len(infiles),
(min([min(s1), min(s2)]) - 0.1), (max([max(s1), max(s2)]) + 0.1)])
dist1 = np.linspace(0, len(infiles), len(s1))
dist2 = np.linspace(0, len(infiles), len(s2))
a.plot(dist1, s1, 'b.')
a.plot(dist2, s2, 'r.')
top = ccs[0] + (3 * sds[0])
bottom = ccs[1] - (3 * sds[1])
a.plot([0, len(infiles)], [top, top], 'b-')
a.plot([0, len(infiles)], [bottom, bottom], 'r-')
a.tick_params(axis='x', labelbottom='off', bottom='off')
a.yaxis.set_label_text('percentage homozygous X variants')
fig = outfile.replace(".tsv", ".png")
p.savefig(fig)
def PlotAncestry(infile):
'''
Draws a plot showing the score (probabilty) for each individual
for their assigned ancestry and the second closest match.
x = individual
y = score
large diamonds represent the best match and small triangles the second
best match
'''
ancestry = pd.read_csv(infile, sep="\t", header=None, dtype="str")
# score using the index of the probability e.g. 5e-1000 as -1000
ancestry[2] = [int(a[1]) * -1 for a in ancestry[2].str.split("-")]
ancestry[4] = [int(a[1]) * -1 for a in ancestry[4].str.split("-")]
# sort by assigned ancestry
ancestry = ancestry.sort_values([1])
ancs = list(set(ancestry[1].append(ancestry[3])))
# assign colours to points by ancestry
ancestry[5] = ancestry[1].apply(lambda x: ancs.index(x), 1)
ancestry[6] = ancestry[3].apply(lambda x: ancs.index(x), 1)
floats = np.linspace(0, 1, len(set(ancs)))
colours = cm.jet(floats)
f = plt.figure(figsize=(30, 15))
a = f.add_subplot('111')
ymin = min(ancestry[2]) - 100
ymax = max(ancestry[4]) + 100
a.axis([-5, len(ancestry) + 5, ymin, ymax])
# plot best scoring ancestry
a.scatter(list(range(len(ancestry))),
ancestry[2], c=colours[ancestry[5].values],
s=500, marker='D', edgecolors='None')
# plot second best scoring ancestry
a.scatter(list(range(len(ancestry))),
ancestry[4], c=colours[ancestry[6].values],
s=150, marker="^", edgecolors='None')
sancs = set(ancestry[1])
i = 20
# draw lines between individuals assigned to different ancestries
for anc in sancs:
p = max(np.where(ancestry[1] == anc)[0]) + 0.5
p2 = min(np.where(ancestry[1] == anc)[0]) + 0.5
a.plot([p, p],
[ymin, ymax], 'k-')
a.text(p2, ymax - i, anc, ha='center')
i += 10
# legend
ps = [mpatches.Patch(color=col) for col in colours]
a.legend(handles=ps, labels=ancs, loc=3)
f.savefig(infile.replace(".tsv", ".png"))
|
|
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A class for managing the Linux cgroup subsystem."""
from __future__ import print_function
import errno
import os
import signal
import time
from chromite.lib import cros_build_lib
from chromite.lib import locking
from chromite.lib import osutils
from chromite.lib import signals
from chromite.lib import sudo
# Rough hierarchy sketch:
# - all cgroup aware cros code should nest here.
# - No cros code should modify this namespace- this is user/system configurable
# - only. A release_agent can be specified, although we won't use it.
# cros/
#
# - cbuildbot instances land here only when they're cleaning their task pool.
# - this root namespace is *not* auto-removed; it's left so that user/system
# - configuration is persistant.
# cros/%(process-name)s/
# cros/cbuildbot/
#
# - a cbuildbot job pool, owned by pid. These are autocleaned.
# cros/cbuildbot/%(pid)i/
#
# - a job pool using process that was invoked by cbuildbot.
# - for example, cros/cbuildbot/42/cros_sdk:34
# - this pattern continues arbitrarily deep, and is autocleaned.
# cros/cbuildbot/%(pid1)i/%(basename_of_pid2)s:%(pid2)i/
#
# An example for cros_sdk (pid 552) would be:
# cros/cros_sdk/552/
# and it's children would be accessible in 552/tasks, or
# would create their own namespace w/in and assign themselves to it.
class _GroupWasRemoved(Exception):
"""Exception representing when a group was unexpectedly removed.
Via design, this should only be possible when instantiating a new
pool, but the parent pool has been removed- this means effectively that
we're supposed to shutdown (either we've been sigterm'd and ignored it,
or it's imminent).
"""
def _FileContains(filename, strings):
"""Greps a group of expressions, returns whether all were found."""
contents = osutils.ReadFile(filename)
return all(s in contents for s in strings)
def EnsureInitialized(functor):
"""Decorator for Cgroup methods to ensure the method is ran only if inited"""
def f(self, *args, **kwargs):
# pylint: disable=W0212
self.Instantiate()
return functor(self, *args, **kwargs)
# Dummy up our wrapper to make it look like what we're wrapping,
# and expose the underlying docstrings.
f.__name__ = functor.__name__
f.__doc__ = functor.__doc__
f.__module__ = functor.__module__
return f
class Cgroup(object):
"""Class representing a group in cgroups hierarchy.
Note the instance may not exist on disk; it will be created as necessary.
Additionally, because cgroups is kernel maintained (and mutated on the fly
by processes using it), chunks of this class are /explicitly/ designed to
always go back to disk and recalculate values.
Attributes:
path: Absolute on disk pathway to the cgroup directory.
tasks: Pids contained in this immediate cgroup, and the owning pids of
any first level groups nested w/in us.
all_tasks: All Pids, and owners of nested groups w/in this point in
the hierarchy.
nested_groups: The immediate cgroups nested w/in this one. If this
cgroup is 'cbuildbot/buildbot', 'cbuildbot' would have a nested_groups
of [Cgroup('cbuildbot/buildbot')] for example.
all_nested_groups: All cgroups nested w/in this one, regardless of depth.
pid_owner: Which pid owns this cgroup, if the cgroup is following cros
conventions for group naming.
"""
NEEDED_SUBSYSTEMS = ('cpuset',)
PROC_PATH = '/proc/cgroups'
_MOUNT_ROOT_POTENTIALS = ('/sys/fs/cgroup/cpuset', '/sys/fs/cgroup')
_MOUNT_ROOT_FALLBACK = '/dev/cgroup'
CGROUP_ROOT = None
MOUNT_ROOT = None
# Whether or not the cgroup implementation does auto inheritance via
# cgroup.clone_children
_SUPPORTS_AUTOINHERIT = False
@classmethod
@cros_build_lib.MemoizedSingleCall
def InitSystem(cls):
"""If cgroups are supported, initialize the system state"""
if not cls.IsSupported():
return False
def _EnsureMounted(mnt, args):
for mtab in osutils.IterateMountPoints():
if mtab.destination == mnt:
return True
# Grab a lock so in the off chance we have multiple programs (like two
# cros_sdk launched in parallel) running this init logic, we don't end
# up mounting multiple times.
lock_path = '/tmp/.chromite.cgroups.lock'
with locking.FileLock(lock_path, 'cgroup lock') as lock:
lock.write_lock()
for mtab in osutils.IterateMountPoints():
if mtab.destination == mnt:
return True
# Not all distros mount cgroup_root to sysfs.
osutils.SafeMakedirs(mnt, sudo=True)
cros_build_lib.SudoRunCommand(['mount'] + args + [mnt], print_cmd=False)
return True
mount_root_args = ['-t', 'tmpfs', 'cgroup_root']
opts = ','.join(cls.NEEDED_SUBSYSTEMS)
cgroup_root_args = ['-t', 'cgroup', '-o', opts, 'cros']
return _EnsureMounted(cls.MOUNT_ROOT, mount_root_args) and \
_EnsureMounted(cls.CGROUP_ROOT, cgroup_root_args)
@classmethod
@cros_build_lib.MemoizedSingleCall
def IsUsable(cls):
"""Function to sanity check if everything is setup to use cgroups"""
if not cls.InitSystem():
return False
cls._SUPPORTS_AUTOINHERIT = os.path.exists(
os.path.join(cls.CGROUP_ROOT, 'cgroup.clone_children'))
return True
@classmethod
@cros_build_lib.MemoizedSingleCall
def IsSupported(cls):
"""Sanity check as to whether or not cgroups are supported."""
# Is the cgroup subsystem even enabled?
if not os.path.exists(cls.PROC_PATH):
return False
# Does it support the subsystems we want?
if not _FileContains(cls.PROC_PATH, cls.NEEDED_SUBSYSTEMS):
return False
for potential in cls._MOUNT_ROOT_POTENTIALS:
if os.path.exists(potential):
cls.MOUNT_ROOT = potential
break
else:
cls.MOUNT_ROOT = cls._MOUNT_ROOT_FALLBACK
cls.MOUNT_ROOT = os.path.realpath(cls.MOUNT_ROOT)
cls.CGROUP_ROOT = os.path.join(cls.MOUNT_ROOT, 'cros')
return True
def __init__(self, namespace, autoclean=True, lazy_init=False, parent=None,
_is_root=False, _overwrite=True):
"""Initalize a cgroup instance.
Args:
namespace: What cgroup namespace is this in? cbuildbot/1823 for example.
autoclean: Should this cgroup be removed once unused?
lazy_init: Should we create the cgroup immediately, or when needed?
parent: A Cgroup instance; if the namespace is cbuildbot/1823, then the
parent *must* be the cgroup instance for namespace cbuildbot.
_is_root: Internal option, shouldn't be used by consuming code.
_overwrite: Internal option, shouldn't be used by consuming code.
"""
self._inited = None
self._overwrite = bool(_overwrite)
if _is_root:
namespace = '.'
self._inited = True
else:
namespace = os.path.normpath(namespace)
if parent is None:
raise ValueError("Either _is_root must be set to True, or parent must "
"be non null")
if namespace in ('.', ''):
raise ValueError("Invalid namespace %r was given" % (namespace,))
self.namespace = namespace
self.autoclean = autoclean
self.parent = parent
if not lazy_init:
self.Instantiate()
def _LimitName(self, name, for_path=False, multilevel=False):
"""Translation function doing sanity checks on derivative namespaces
If you're extending this class, you should be using this for any namespace
operations that pass through a nested group.
"""
# We use a fake pathway here, and this code must do so. To calculate the
# real pathway requires knowing CGROUP_ROOT, which requires sudo
# potentially. Since this code may be invoked just by loading the module,
# no execution/sudo should occur. However, if for_path is set, we *do*
# require CGROUP_ROOT- which is fine, since we sort that on the way out.
fake_path = os.path.normpath(os.path.join('/fake-path', self.namespace))
path = os.path.normpath(os.path.join(fake_path, name))
# Ensure that the requested pathway isn't trying to sidestep what we
# expect, and in the process it does internal validation checks.
if not path.startswith(fake_path + '/'):
raise ValueError("Name %s tried descending through this namespace into"
" another; this isn't allowed." % (name,))
elif path == self.namespace:
raise ValueError("Empty name %s" % (name,))
elif os.path.dirname(path) != fake_path and not multilevel:
raise ValueError("Name %s is multilevel, but disallowed." % (name,))
# Get the validated/normalized name.
name = path[len(fake_path):].strip('/')
if for_path:
return os.path.join(self.path, name)
return name
@property
def path(self):
return os.path.abspath(os.path.join(self.CGROUP_ROOT, self.namespace))
@property
def tasks(self):
s = set(x.strip() for x in self.GetValue('tasks', '').splitlines())
s.update(x.pid_owner for x in self.nested_groups)
s.discard(None)
return s
@property
def all_tasks(self):
s = self.tasks
for group in self.all_nested_groups:
s.update(group.tasks)
return s
@property
def nested_groups(self):
targets = []
path = self.path
try:
targets = [x for x in os.listdir(path)
if os.path.isdir(os.path.join(path, x))]
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
targets = [self.AddGroup(x, lazy_init=True, _overwrite=False)
for x in targets]
# Suppress initialization checks- if it exists on disk, we know it
# is already initialized.
for x in targets:
# pylint: disable=protected-access
x._inited = True
return targets
@property
def all_nested_groups(self):
# Do a depth first traversal.
def walk(groups):
for group in groups:
for subgroup in walk(group.nested_groups):
yield subgroup
yield group
return list(walk(self.nested_groups))
@property
@cros_build_lib.MemoizedSingleCall
def pid_owner(self):
# Ensure it's in cros namespace- if it is outside of the cros namespace,
# we shouldn't make assumptions about the naming convention used.
if not self.GroupIsAParent(_cros_node):
return None
# See documentation at the top of the file for the naming scheme.
# It's basically "%(program_name)s:%(owning_pid)i" if the group
# is nested.
return os.path.basename(self.namespace).rsplit(':', 1)[-1]
def GroupIsAParent(self, node):
"""Is the given node a parent of us?"""
parent_path = node.path + '/'
return self.path.startswith(parent_path)
def GetValue(self, key, default=None):
"""Query a cgroup configuration key from disk.
If the file doesn't exist, return the given default.
"""
try:
return osutils.ReadFile(os.path.join(self.path, key))
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
return default
def _AddSingleGroup(self, name, **kwargs):
"""Method for creating a node nested within this one.
Derivative classes should override this method rather than AddGroup;
see __init__ for the supported keywords.
"""
return self.__class__(os.path.join(self.namespace, name), **kwargs)
def AddGroup(self, name, **kwargs):
"""Add and return a cgroup nested in this one.
See __init__ for the supported keywords. If this isn't a direct child
(for example this instance is cbuildbot, and the name is 1823/x), it'll
create the intermediate groups as lazy_init=True, setting autoclean to
via the logic described for autoclean_parents below.
Args:
name: Name of group to add.
autoclean_parents: Optional keyword argument; if unspecified, it takes
the value of autoclean (or True if autoclean isn't specified). This
controls whether any intermediate nodes that must be created for
multilevel groups are autocleaned.
"""
name = self._LimitName(name, multilevel=True)
autoclean = kwargs.pop('autoclean', True)
autoclean_parents = kwargs.pop('autoclean_parents', autoclean)
chunks = name.split('/', 1)
node = self
# pylint: disable=W0212
for chunk in chunks[:-1]:
node = node._AddSingleGroup(chunk, parent=node,
autoclean=autoclean_parents, **kwargs)
return node._AddSingleGroup(chunks[-1], parent=node,
autoclean=autoclean, **kwargs)
@cros_build_lib.MemoizedSingleCall
def Instantiate(self):
"""Ensure this group exists on disk in the cgroup hierarchy"""
if self.namespace == '.':
# If it's the root of the hierarchy, leave it alone.
return True
if self.parent is not None:
self.parent.Instantiate()
osutils.SafeMakedirs(self.path, sudo=True)
force_inheritance = True
if self.parent.GetValue('cgroup.clone_children', '').strip() == '1':
force_inheritance = False
if force_inheritance:
if self._SUPPORTS_AUTOINHERIT:
# If the cgroup version supports it, flip the auto-inheritance setting
# on so that cgroups nested here don't have to manually transfer
# settings
self._SudoSet('cgroup.clone_children', '1')
try:
# TODO(ferringb): sort out an appropriate filter/list for using:
# for name in os.listdir(parent):
# rather than just transfering these two values.
for name in ('cpuset.cpus', 'cpuset.mems'):
if not self._overwrite:
# Top level nodes like cros/cbuildbot we don't want to overwrite-
# users/system may've leveled configuration. If it's empty,
# overwrite it in those cases.
val = self.GetValue(name, '').strip()
if val:
continue
self._SudoSet(name, self.parent.GetValue(name, ''))
except (EnvironmentError, cros_build_lib.RunCommandError):
# Do not leave half created cgroups hanging around-
# it makes compatibility a pain since we have to rewrite
# the cgroup each time. If instantiation fails, we know
# the group is screwed up, or the instantiaton code is-
# either way, no reason to leave it alive.
self.RemoveThisGroup()
raise
return True
# Since some of this code needs to check/reset this function to be ran,
# we use a more developer friendly variable name.
Instantiate._cache_key = '_inited' # pylint: disable=protected-access
def _SudoSet(self, key, value):
"""Set a cgroup file in this namespace to a specific value"""
name = self._LimitName(key, True)
try:
return sudo.SetFileContents(name, value, cwd=os.path.dirname(name))
except cros_build_lib.RunCommandError as e:
if e.exception is not None:
# Command failed before the exec itself; convert ENOENT
# appropriately.
exc = e.exception
if isinstance(exc, EnvironmentError) and exc.errno == errno.ENOENT:
raise _GroupWasRemoved(self.namespace, e)
raise
def RemoveThisGroup(self, strict=False):
"""Remove this specific cgroup
If strict is True, then we must be removed.
"""
if self._RemoveGroupOnDisk(self.path, strict=strict):
self._inited = None
return True
return False
@classmethod
def _RemoveGroupOnDisk(cls, path, strict, sudo_strict=True):
"""Perform the actual group removal.
Args:
path: The cgroup's location on disk.
strict: Boolean; if true, then it's an error if the group can't be
removed. This can occur if there are still processes in it, or in
a nested group.
sudo_strict: See SudoRunCommand's strict option.
"""
# Depth first recursively remove our children cgroups, then ourselves.
# Allow this to fail since currently it's possible for the cleanup code
# to not fully kill the hierarchy. Note that we must do just rmdirs,
# rm -rf cannot be used- it tries to remove files which are unlinkable
# in cgroup (only namespaces can be removed via rmdir).
# See Documentation/cgroups/ for further details.
path = os.path.normpath(path) + '/'
# Do a sanity check to ensure that we're not touching anything we
# shouldn't.
if not path.startswith(cls.CGROUP_ROOT):
raise RuntimeError("cgroups.py: Was asked to wipe path %s, refusing. "
"strict was %r, sudo_strict was %r"
% (path, strict, sudo_strict))
result = cros_build_lib.SudoRunCommand(
['find', path, '-depth', '-type', 'd', '-exec', 'rmdir', '{}', '+'],
redirect_stderr=True, error_code_ok=not strict,
print_cmd=False, strict=sudo_strict)
if result.returncode == 0:
return True
elif not os.path.isdir(path):
# We were invoked against a nonexistant path.
return True
return False
def TransferCurrentProcess(self, threads=True):
"""Move the current process into this cgroup.
If threads is True, we move our threads into the group in addition.
Note this must be called in a threadsafe manner; it primarily exists
as a helpful default since python stdlib generates some background
threads (even when the code is operated synchronously). While we
try to handle that scenario, it's implicitly racy since python
gives no clean/sane way to control/stop thread creation; thus it's
on the invokers head to ensure no new threads are being generated
while this is ran.
"""
if not threads:
return self.TransferPid(os.getpid())
seen = set()
while True:
force_run = False
threads = set(self._GetCurrentProcessThreads())
for tid in threads:
# Track any failures; a failure means the thread died under
# feet, implying we shouldn't trust the current state.
force_run |= not self.TransferPid(tid, True)
if not force_run and threads == seen:
# We got two runs of this code seeing the same threads; assume
# we got them all since the first run moved those threads into
# our cgroup, and the second didn't see any new threads. While
# there may have been new threads between run1/run2, we do run2
# purely to snag threads we missed in run1; anything split by
# a thread from run1 would auto inherit our cgroup.
return
seen = threads
def _GetCurrentProcessThreads(self):
"""Lookup the given tasks (pids fundamentally) for our process."""
# Note that while we could try doing tricks like threading.enumerate,
# that's not guranteed to pick up background c/ffi threads; generally
# that's ultra rare, but the potential exists thus we ask the kernel
# instead. What sucks however is that python releases the GIL; thus
# consuming code has to know of this, and protect against it.
return map(int, os.listdir('/proc/self/task'))
@EnsureInitialized
def TransferPid(self, pid, allow_missing=False):
"""Assigns a given process to this cgroup."""
# Assign this root process to the new cgroup.
try:
self._SudoSet('tasks', '%d' % int(pid))
return True
except cros_build_lib.RunCommandError:
if not allow_missing:
raise
return False
# TODO(ferringb): convert to snakeoil.weakref.WeakRefFinalizer
def __del__(self):
if self.autoclean and self._inited and self.CGROUP_ROOT:
# Suppress any sudo_strict behaviour, since we may be invoked
# during interpreter shutdown.
self._RemoveGroupOnDisk(self.path, False, sudo_strict=False)
def KillProcesses(self, poll_interval=0.05, remove=False, sigterm_timeout=10):
"""Kill all processes in this namespace."""
my_pids = set(map(str, self._GetCurrentProcessThreads()))
def _SignalPids(pids, signum):
cros_build_lib.SudoRunCommand(
['kill', '-%i' % signum] + sorted(pids),
print_cmd=False, error_code_ok=True, redirect_stdout=True,
combine_stdout_stderr=True)
# First sigterm what we can, exiting after 2 runs w/out seeing pids.
# Let this phase run for a max of 10 seconds; afterwards, switch to
# sigkilling.
time_end = time.time() + sigterm_timeout
saw_pids, pids = True, set()
while time.time() < time_end:
previous_pids = pids
pids = self.tasks
self_kill = my_pids.intersection(pids)
if self_kill:
raise Exception("Bad API usage: asked to kill cgroup %s, but "
"current pid %s is in that group. Effectively "
"asked to kill ourselves."
% (self.namespace, self_kill))
if not pids:
if not saw_pids:
break
saw_pids = False
else:
saw_pids = True
new_pids = pids.difference(previous_pids)
if new_pids:
_SignalPids(new_pids, signal.SIGTERM)
# As long as new pids keep popping up, skip sleeping and just keep
# stomping them as quickly as possible (whack-a-mole is a good visual
# analogy of this). We do this to ensure that fast moving spawns
# are dealt with as quickly as possible. When considering this code,
# it's best to think about forkbomb scenarios- shouldn't occur, but
# synthetic fork-bombs can occur, thus this code being aggressive.
continue
time.sleep(poll_interval)
# Next do a sigkill scan. Again, exit only after no pids have been seen
# for two scans, and all groups are removed.
groups_existed = True
while True:
pids = self.all_tasks
if pids:
self_kill = my_pids.intersection(pids)
if self_kill:
raise Exception("Bad API usage: asked to kill cgroup %s, but "
"current pid %i is in that group. Effectively "
"asked to kill ourselves."
% (self.namespace, self_kill))
_SignalPids(pids, signal.SIGKILL)
saw_pids = True
elif not (saw_pids or groups_existed):
break
else:
saw_pids = False
time.sleep(poll_interval)
# Note this is done after the sleep; try to give the kernel time to
# shutdown the processes. They may still be transitioning to defunct
# kernel side by when we hit this scan, but that's fine- the next will
# get it.
# This needs to be nonstrict; it's possible the kernel is currently
# killing the pids we've just sigkill'd, thus the group isn't removable
# yet. Additionally, it's possible a child got forked we didn't see.
# Ultimately via our killing/removal attempts, it will be removed,
# just not necessarily on the first run.
if remove:
if self.RemoveThisGroup(strict=False):
# If we successfully removed this group, then there can be no pids,
# sub groups, etc, within it. No need to scan further.
return True
groups_existed = True
else:
groups_existed = [group.RemoveThisGroup(strict=False)
for group in self.nested_groups]
groups_existed = not all(groups_existed)
@classmethod
def _FindCurrentCrosGroup(cls, pid=None):
"""Find and return the cros namespace a pid is currently in.
If no pid is given, os.getpid() is substituted.
"""
if pid is None:
pid = 'self'
elif not isinstance(pid, (long, int)):
raise ValueError("pid must be None, or an integer/long. Got %r" % (pid,))
cpuset = None
try:
# See the kernels Documentation/filesystems/proc.txt if you're unfamiliar
# w/ procfs, and keep in mind that we have to work across multiple kernel
# versions.
cpuset = osutils.ReadFile('/proc/%s/cpuset' % (pid,)).rstrip('\n')
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
with open('/proc/%s/cgroup' % pid) as f:
for line in f:
# First digit is the hierachy index, 2nd is subsytem, 3rd is space.
# 2:cpuset:/
# 2:cpuset:/cros/cbuildbot/1234
line = line.rstrip('\n')
if not line:
continue
line = line.split(':', 2)
if line[1] == 'cpuset':
cpuset = line[2]
break
if not cpuset or not cpuset.startswith("/cros/"):
return None
return cpuset[len("/cros/"):].strip("/")
@classmethod
def FindStartingGroup(cls, process_name, nesting=True):
"""Create and return the starting cgroup for ourselves nesting if allowed.
Note that the node returned is either a generic process pool (e.g.
cros/cbuildbot), or the parent pool we're nested within; processes
generated in this group are the responsibility of this process to
deal with- nor should this process ever try triggering a kill w/in this
portion of the tree since they don't truly own it.
Args:
process_name: See the hierarchy comments at the start of this module.
This should basically be the process name- cros_sdk for example,
cbuildbot, etc.
nesting: If we're invoked by another cros cgroup aware process,
should we nest ourselves in their hierarchy? Generally speaking,
client code should never have a reason to disable nesting.
"""
if not cls.IsUsable():
return None
target = None
if nesting:
target = cls._FindCurrentCrosGroup()
if target is None:
target = process_name
return _cros_node.AddGroup(target, autoclean=False)
class ContainChildren(cros_build_lib.MasterPidContextManager):
"""Context manager for containing children processes.
This manager creates a job pool derived from the specified Cgroup |node|
and transfers the current process into it upon __enter__.
Any children processes created at that point will inherit our cgroup;
they can only escape the group if they're running as root and move
themselves out of this hierarchy.
Upon __exit__, transfer the current process back to this group, then
SIGTERM (progressing to SIGKILL) any immediate children in the pool,
finally removing the pool if possible. After sending SIGTERM, we wait
|sigterm_timeout| seconds before sending SIGKILL.
If |pool_name| is given, that name is used rather than os.getpid() for
the job pool created.
Finally, note that during cleanup this will suppress all signals
to ensure that it cleanses any children before returning.
"""
def __init__(self, node, pool_name=None, sigterm_timeout=10):
super(ContainChildren, self).__init__()
self.node = node
self.child = None
self.pid = None
self.pool_name = pool_name
self.sigterm_timeout = sigterm_timeout
self.run_kill = False
def _enter(self):
self.pid = os.getpid()
# Note: We use lazy init here so that we cannot trigger a
# _GroupWasRemoved -- we want that to be contained.
pool_name = str(self.pid) if self.pool_name is None else self.pool_name
self.child = self.node.AddGroup(pool_name, autoclean=True, lazy_init=True)
try:
self.child.TransferCurrentProcess()
except _GroupWasRemoved:
raise SystemExit(
"Group %s was removed under our feet; pool shutdown is underway"
% self.child.namespace)
self.run_kill = True
def _exit(self, *_args, **_kwargs):
with signals.DeferSignals():
self.node.TransferCurrentProcess()
if self.run_kill:
self.child.KillProcesses(remove=True,
sigterm_timeout=self.sigterm_timeout)
else:
# Non-strict since the group may have failed to be created.
self.child.RemoveThisGroup(strict=False)
def SimpleContainChildren(process_name, nesting=True, pid=None, **kwargs):
"""Convenience context manager to create a cgroup for children containment
See Cgroup.FindStartingGroup and Cgroup.ContainChildren for specifics.
If Cgroups aren't supported on this system, this is a noop context manager.
"""
node = Cgroup.FindStartingGroup(process_name, nesting=nesting)
if node is None:
return cros_build_lib.NoOpContextManager()
if pid is None:
pid = os.getpid()
name = '%s:%i' % (process_name, pid)
return ContainChildren(node, name, **kwargs)
# This is a generic group, not associated with any specific process id, so
# we shouldn't autoclean it on exit; doing so would delete the group from
# under the feet of any other processes interested in using the group.
_root_node = Cgroup(None, _is_root=True, autoclean=False, lazy_init=True)
_cros_node = _root_node.AddGroup('cros', autoclean=False, lazy_init=True,
_overwrite=False)
|
|
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module holds utility functions for the version 1 of the API."""
import logging
import json
import time
import os
import yaml
from flask import abort
from flask import jsonify
from flask import current_app
from flask_login import current_user
import altair as alt
from timesketch.lib import ontology
from timesketch.lib.aggregators import manager as aggregator_manager
from timesketch.lib.definitions import HTTP_STATUS_CODE_BAD_REQUEST
from timesketch.models import db_session
from timesketch.models.sketch import View
logger = logging.getLogger('timesketch.api_utils')
def bad_request(message):
"""Function to set custom error message for HTTP 400 requests.
Args:
message: Message as string to return to the client.
Returns: Response object (instance of flask.wrappers.Response)
"""
response = jsonify({'message': message})
response.status_code = HTTP_STATUS_CODE_BAD_REQUEST
return response
def get_sketch_attributes(sketch):
"""Returns a dict with all attributes of a sketch."""
attributes = {}
ontology_def = ontology.ONTOLOGY
for attribute in sketch.attributes:
if attribute.sketch_id != sketch.id:
continue
name = attribute.name
attribute_values = []
ontology_string = attribute.ontology
ontology_dict = ontology_def.get(ontology_string, {})
cast_as_str = ontology_dict.get('cast_as', 'str')
for attr_value in attribute.values:
try:
value = ontology.OntologyManager.decode_value(
attr_value.value, cast_as_str)
except ValueError:
value = 'Unable to cast'
except NotImplementedError:
value = f'Ontology {cast_as_str} not yet defined.'
attribute_values.append(value)
values = attribute_values
if len(attribute_values) == 1:
values = attribute_values[0]
attributes[name] = {
'value': values,
'ontology': ontology_string,
}
return attributes
def get_sketch_last_activity(sketch):
"""Returns a date string with the last activity from a sketch."""
try:
last_activity = View.query.filter_by(
sketch=sketch, name='').order_by(
View.updated_at.desc()).first().updated_at
except AttributeError:
return ''
return last_activity.isoformat()
def update_sketch_last_activity(sketch):
"""Update the last activity date of a sketch."""
view = View.get_or_create(
user=current_user, sketch=sketch, name='')
view.update_modification_time()
db_session.add(view)
db_session.commit()
def run_aggregator(sketch_id, aggregator_name, aggregator_parameters=None,
index=None):
"""Run an aggregator and return back results.
Args:
sketch_id (int): the sketch ID.
aggregator_name (str): the name of the aggregator class to run.
aggregator_parameters (dict): dict containing the parameters used
for running the aggregator.
index (list): the list of OpenSearch index names to use.
Returns:
Tuple[Object, Dict]: a tuple containing the aggregator result object
(instance of AggregationResult) and a dict containing metadata
from the aggregator run.
"""
agg_class = aggregator_manager.AggregatorManager.get_aggregator(
aggregator_name)
if not agg_class:
return None, {}
if not aggregator_parameters:
aggregator_parameters = {}
aggregator = agg_class(sketch_id=sketch_id, index=index)
chart_type = aggregator_parameters.pop('supported_charts', None)
chart_color = aggregator_parameters.pop('chart_color', '')
time_before = time.time()
result_obj = aggregator.run(**aggregator_parameters)
time_after = time.time()
aggregator_description = aggregator.describe
meta = {
'method': 'aggregator_run',
'chart_type': chart_type,
'chart_color': chart_color,
'name': aggregator_description.get('name'),
'description': aggregator_description.get('description'),
'es_time': time_after - time_before,
}
if chart_type:
meta['vega_spec'] = result_obj.to_chart(
chart_name=chart_type,
chart_title=aggregator.chart_title, color=chart_color)
meta['vega_chart_title'] = aggregator.chart_title
return result_obj, meta
def run_aggregator_group(group, sketch_id):
"""Run an aggregator group and return back results.
Args:
group (models.sketch.Group): a group object.
sketch_id (int): the sketch ID.
Returns:
Tuple[Object, List, Dict]: a tuple containing the altair chart object,
a list of result object dicts and a dict containing metadata from
the aggregator group run.
"""
result_chart = None
orientation = group.orientation
objects = []
time_before = time.time()
for aggregator in group.aggregations:
if aggregator.aggregationgroup_id != group.id:
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'All aggregations in a group must belong to the group.')
if aggregator.sketch_id != group.sketch_id:
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'All aggregations in a group must belong to the group '
'sketch')
if aggregator.parameters:
aggregator_parameters = json.loads(aggregator.parameters)
else:
aggregator_parameters = {}
agg_class = aggregator_manager.AggregatorManager.get_aggregator(
aggregator.agg_type)
if not agg_class:
continue
aggregator_obj = agg_class(sketch_id=sketch_id)
chart_type = aggregator_parameters.pop('supported_charts', None)
color = aggregator_parameters.pop('chart_color', '')
result_obj = aggregator_obj.run(**aggregator_parameters)
chart = result_obj.to_chart(
chart_name=chart_type,
chart_title=aggregator_obj.chart_title,
as_chart=True, interactive=True, color=color)
if result_chart is None:
result_chart = chart
elif orientation == 'horizontal':
result_chart = alt.hconcat(chart, result_chart)
elif orientation == 'vertical':
result_chart = alt.vconcat(chart, result_chart)
else:
result_chart = alt.layer(chart, result_chart)
buckets = result_obj.to_dict()
buckets['buckets'] = buckets.pop('values')
result = {
'aggregation_result': {
aggregator.name: buckets
}
}
objects.append(result)
parameters = {}
if group.parameters:
parameters = json.loads(group.parameters)
result_chart.title = parameters.get('chart_title', group.name)
time_after = time.time()
meta = {
'method': 'aggregator_group',
'chart_type': 'compound: {0:s}'.format(orientation),
'name': group.name,
'description': group.description,
'es_time': time_after - time_before,
'vega_spec': result_chart.to_dict(),
'vega_chart_title': group.name
}
return result_chart, objects, meta
def load_yaml_config(config_parameter_name):
"""Load a YAML file.
Args:
config_paramater_name (str): Name of the config paramter to get the
path to the YAML file from.
Returns:
A dictionary with the YAML data.
"""
yaml_path = current_app.config.get(config_parameter_name, '')
if not yaml_path:
logger.error(
'The path to the YAML file isn\'t defined in the '
'main configuration file')
return {}
if not os.path.isfile(yaml_path):
logger.error(
'Unable to read the config, file: '
'[{0:s}] does not exist'.format(yaml_path))
return {}
with open(yaml_path, 'r') as fh:
return yaml.safe_load(fh)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._iot_hub_resource_operations import build_check_name_availability_request, build_create_event_hub_consumer_group_request, build_create_or_update_request_initial, build_delete_event_hub_consumer_group_request, build_delete_request_initial, build_export_devices_request, build_get_event_hub_consumer_group_request, build_get_job_request, build_get_keys_for_key_name_request, build_get_quota_metrics_request, build_get_request, build_get_stats_request, build_get_valid_skus_request, build_import_devices_request, build_list_by_resource_group_request, build_list_by_subscription_request, build_list_event_hub_consumer_groups_request, build_list_jobs_request, build_list_keys_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class IotHubResourceOperations:
"""IotHubResourceOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.iothub.v2017_01_19.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.IotHubDescription":
"""Get the non-security related metadata of an IoT hub.
Get the non-security related metadata of an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IotHubDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_01_19.models.IotHubDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
iot_hub_description: "_models.IotHubDescription",
**kwargs: Any
) -> "_models.IotHubDescription":
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(iot_hub_description, 'IotHubDescription')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
iot_hub_description: "_models.IotHubDescription",
**kwargs: Any
) -> AsyncLROPoller["_models.IotHubDescription"]:
"""Create or update the metadata of an IoT hub.
Create or update the metadata of an Iot hub. The usual pattern to modify a property is to
retrieve the IoT hub metadata and security metadata, and then combine them with the modified
values in a new body to update the IoT hub. If certain properties are missing in the JSON,
updating IoT Hub may cause these values to fallback to default, which may lead to unexpected
behavior.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub to create or update.
:type resource_name: str
:param iot_hub_description: The IoT hub metadata and security metadata.
:type iot_hub_description: ~azure.mgmt.iothub.v2017_01_19.models.IotHubDescription
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either IotHubDescription or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2017_01_19.models.IotHubDescription]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
iot_hub_description=iot_hub_description,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> Optional[Union["_models.IotHubDescription", "_models.ErrorDetails"]]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[Union["_models.IotHubDescription", "_models.ErrorDetails"]]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if response.status_code == 404:
deserialized = self._deserialize('ErrorDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncLROPoller[Union["_models.IotHubDescription", "_models.ErrorDetails"]]:
"""Delete an IoT hub.
Delete an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub to delete.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either IotHubDescription or ErrorDetails or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2017_01_19.models.IotHubDescription or
~azure.mgmt.iothub.v2017_01_19.models.ErrorDetails]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[Union["_models.IotHubDescription", "_models.ErrorDetails"]]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
@distributed_trace
def list_by_subscription(
self,
**kwargs: Any
) -> AsyncIterable["_models.IotHubDescriptionListResult"]:
"""Get all the IoT hubs in a subscription.
Get all the IoT hubs in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubDescriptionListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2017_01_19.models.IotHubDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("IotHubDescriptionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/IotHubs'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.IotHubDescriptionListResult"]:
"""Get all the IoT hubs in a resource group.
Get all the IoT hubs in a resource group.
:param resource_group_name: The name of the resource group that contains the IoT hubs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubDescriptionListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2017_01_19.models.IotHubDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("IotHubDescriptionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs'} # type: ignore
@distributed_trace_async
async def get_stats(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.RegistryStatistics":
"""Get the statistics from an IoT hub.
Get the statistics from an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RegistryStatistics, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_01_19.models.RegistryStatistics
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegistryStatistics"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_stats_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get_stats.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RegistryStatistics', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/IotHubStats'} # type: ignore
@distributed_trace
def get_valid_skus(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncIterable["_models.IotHubSkuDescriptionListResult"]:
"""Get the list of valid SKUs for an IoT hub.
Get the list of valid SKUs for an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubSkuDescriptionListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2017_01_19.models.IotHubSkuDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubSkuDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_valid_skus_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get_valid_skus.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_valid_skus_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("IotHubSkuDescriptionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_valid_skus.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/skus'} # type: ignore
@distributed_trace
def list_event_hub_consumer_groups(
self,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
**kwargs: Any
) -> AsyncIterable["_models.EventHubConsumerGroupsListResult"]:
"""Get a list of the consumer groups in the Event Hub-compatible device-to-cloud endpoint in an
IoT hub.
Get a list of the consumer groups in the Event Hub-compatible device-to-cloud endpoint in an
IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint.
:type event_hub_endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EventHubConsumerGroupsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2017_01_19.models.EventHubConsumerGroupsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_event_hub_consumer_groups_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
event_hub_endpoint_name=event_hub_endpoint_name,
template_url=self.list_event_hub_consumer_groups.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_event_hub_consumer_groups_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
event_hub_endpoint_name=event_hub_endpoint_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("EventHubConsumerGroupsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_event_hub_consumer_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups'} # type: ignore
@distributed_trace_async
async def get_event_hub_consumer_group(
self,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
name: str,
**kwargs: Any
) -> "_models.EventHubConsumerGroupInfo":
"""Get a consumer group from the Event Hub-compatible device-to-cloud endpoint for an IoT hub.
Get a consumer group from the Event Hub-compatible device-to-cloud endpoint for an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub.
:type event_hub_endpoint_name: str
:param name: The name of the consumer group to retrieve.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EventHubConsumerGroupInfo, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_01_19.models.EventHubConsumerGroupInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_event_hub_consumer_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
event_hub_endpoint_name=event_hub_endpoint_name,
name=name,
template_url=self.get_event_hub_consumer_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('EventHubConsumerGroupInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore
@distributed_trace_async
async def create_event_hub_consumer_group(
self,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
name: str,
**kwargs: Any
) -> "_models.EventHubConsumerGroupInfo":
"""Add a consumer group to an Event Hub-compatible endpoint in an IoT hub.
Add a consumer group to an Event Hub-compatible endpoint in an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub.
:type event_hub_endpoint_name: str
:param name: The name of the consumer group to add.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EventHubConsumerGroupInfo, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_01_19.models.EventHubConsumerGroupInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_create_event_hub_consumer_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
event_hub_endpoint_name=event_hub_endpoint_name,
name=name,
template_url=self.create_event_hub_consumer_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('EventHubConsumerGroupInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore
@distributed_trace_async
async def delete_event_hub_consumer_group(
self,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete a consumer group from an Event Hub-compatible endpoint in an IoT hub.
Delete a consumer group from an Event Hub-compatible endpoint in an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub.
:type event_hub_endpoint_name: str
:param name: The name of the consumer group to delete.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_event_hub_consumer_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
event_hub_endpoint_name=event_hub_endpoint_name,
name=name,
template_url=self.delete_event_hub_consumer_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore
@distributed_trace
def list_jobs(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncIterable["_models.JobResponseListResult"]:
"""Get a list of all the jobs in an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
Get a list of all the jobs in an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either JobResponseListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2017_01_19.models.JobResponseListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponseListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_jobs_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_jobs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_jobs_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("JobResponseListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_jobs.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/jobs'} # type: ignore
@distributed_trace_async
async def get_job(
self,
resource_group_name: str,
resource_name: str,
job_id: str,
**kwargs: Any
) -> "_models.JobResponse":
"""Get the details of a job from an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
Get the details of a job from an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param job_id: The job identifier.
:type job_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResponse, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_01_19.models.JobResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_job_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
job_id=job_id,
template_url=self.get_job.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_job.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/jobs/{jobId}'} # type: ignore
@distributed_trace
def get_quota_metrics(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncIterable["_models.IotHubQuotaMetricInfoListResult"]:
"""Get the quota metrics for an IoT hub.
Get the quota metrics for an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubQuotaMetricInfoListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2017_01_19.models.IotHubQuotaMetricInfoListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubQuotaMetricInfoListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_quota_metrics_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get_quota_metrics.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_quota_metrics_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("IotHubQuotaMetricInfoListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_quota_metrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/quotaMetrics'} # type: ignore
@distributed_trace_async
async def check_name_availability(
self,
operation_inputs: "_models.OperationInputs",
**kwargs: Any
) -> "_models.IotHubNameAvailabilityInfo":
"""Check if an IoT hub name is available.
Check if an IoT hub name is available.
:param operation_inputs: Set the name parameter in the OperationInputs structure to the name of
the IoT hub to check.
:type operation_inputs: ~azure.mgmt.iothub.v2017_01_19.models.OperationInputs
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IotHubNameAvailabilityInfo, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_01_19.models.IotHubNameAvailabilityInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubNameAvailabilityInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(operation_inputs, 'OperationInputs')
request = build_check_name_availability_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.check_name_availability.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotHubNameAvailabilityInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/checkNameAvailability'} # type: ignore
@distributed_trace
def list_keys(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SharedAccessSignatureAuthorizationRuleListResult"]:
"""Get the security metadata for an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
Get the security metadata for an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SharedAccessSignatureAuthorizationRuleListResult
or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2017_01_19.models.SharedAccessSignatureAuthorizationRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedAccessSignatureAuthorizationRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_keys_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_keys.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_keys_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SharedAccessSignatureAuthorizationRuleListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/listkeys'} # type: ignore
@distributed_trace_async
async def get_keys_for_key_name(
self,
resource_group_name: str,
resource_name: str,
key_name: str,
**kwargs: Any
) -> "_models.SharedAccessSignatureAuthorizationRule":
"""Get a shared access policy by name from an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
Get a shared access policy by name from an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param key_name: The name of the shared access policy.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedAccessSignatureAuthorizationRule, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_01_19.models.SharedAccessSignatureAuthorizationRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedAccessSignatureAuthorizationRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_keys_for_key_name_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
key_name=key_name,
template_url=self.get_keys_for_key_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SharedAccessSignatureAuthorizationRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_keys_for_key_name.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/IotHubKeys/{keyName}/listkeys'} # type: ignore
@distributed_trace_async
async def export_devices(
self,
resource_group_name: str,
resource_name: str,
export_devices_parameters: "_models.ExportDevicesRequest",
**kwargs: Any
) -> "_models.JobResponse":
"""Exports all the device identities in the IoT hub identity registry to an Azure Storage blob
container. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
Exports all the device identities in the IoT hub identity registry to an Azure Storage blob
container. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param export_devices_parameters: The parameters that specify the export devices operation.
:type export_devices_parameters: ~azure.mgmt.iothub.v2017_01_19.models.ExportDevicesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResponse, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_01_19.models.JobResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(export_devices_parameters, 'ExportDevicesRequest')
request = build_export_devices_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self.export_devices.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/exportDevices'} # type: ignore
@distributed_trace_async
async def import_devices(
self,
resource_group_name: str,
resource_name: str,
import_devices_parameters: "_models.ImportDevicesRequest",
**kwargs: Any
) -> "_models.JobResponse":
"""Import, update, or delete device identities in the IoT hub identity registry from a blob. For
more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
Import, update, or delete device identities in the IoT hub identity registry from a blob. For
more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param import_devices_parameters: The parameters that specify the import devices operation.
:type import_devices_parameters: ~azure.mgmt.iothub.v2017_01_19.models.ImportDevicesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResponse, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_01_19.models.JobResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(import_devices_parameters, 'ImportDevicesRequest')
request = build_import_devices_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self.import_devices.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
import_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/importDevices'} # type: ignore
|
|
from __future__ import print_function, absolute_import, division, unicode_literals
import os
import numpy as np
import matplotlib.transforms as mtransforms
from barak.absorb import findtrans, readatom
from barak.io import readtabfits, readtxt, parse_config
from barak.utilities import adict, get_data_path, between
from barak.pyvpfit import readf26
from barak.constants import c_kms
from barak.convolve import convolve_constant_dv
from barak.sed import make_constant_dv_wa_scale
from barak.convolve import convolve_psf
from astropy.table import Table
try:
from COS import convolve_with_COS_FOS
except:
convolve_with_COS_FOS = None
# regions of atmospheric absorption
ATMOS = [(5570, 5590),
(5885, 5900),
(6275, 6325),
(6870, 6950),
(7170, 7350),
(7580, 7690),
(8130, 8350),
(8900, 9200),
(9300, 9850),
(11100, 11620),
(12590, 12790),
(13035, 15110),
(17435, 20850),
(24150, 24800)]
def lines_from_f26(f26):
""" Convert a f26-file list of lines into a list we can pass to
find_tau.
"""
if f26 is None:
return []
if f26.lines is None:
f26.lines = []
lines = []
for l in f26.lines:
print(l['name'])
if l['name'].strip() in ('<<', '>>'):
#print "skipping!"
continue
lines.append((l['name'].replace(' ', ''), l['z'], l['b'], l['logN']))
return lines
def plot_tick_vel(ax, vpos, offset, t, tickz=None):
""" plot a single velocity tick
"""
label = '%s %.0f %.3f' % (t['name'], t['wa0'], t['z'])
label = label.replace('NeVII', 'NeVIII')
T = ax.plot(
[vpos, vpos], [1.05 + offset, 1.4 + offset],
color='k', alpha=0.7, lw=1.5)
Tlabels = []
if (tickz is not None and
not (1e-5 < abs(t['z'] - tickz) < 1e-2)) or tickz is None:
Tlabels.append(ax.text(vpos, 1.05 + offset, label, rotation=60,
fontsize=8, va='bottom', alpha=0.7))
return T, Tlabels
def plot_tick_wa(ax, wa, fl, height, t, tickz=None):
""" plot a single tick on a wavelength scale
"""
label = '%s %.0f %.3f' % (t.name, t.wa0, t.z)
label = label.replace('NeVII', 'NeVIII')
fl = fl * 1.1
T = ax.plot([wa, wa], [fl, fl + height], color='c', alpha=0.7, lw=1.5)
Tlabels = []
if tickz is not None and not (1e-5 < abs(t.z - tickz) < 1e-2) or \
tickz is None:
Tlabels.append(ax.text(wa, fl + 1.35 * height, label, rotation=60,
fontsize=8, va='bottom', alpha=0.7))
return T, Tlabels
def plotregions(ax, wmin, wmax, wa, fl):
""" Plot a series of fitting regions on the matplotlib axes `ax`.
"""
trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes)
regions = []
for w0, w1 in zip(wmin, wmax):
r0, = ax.plot([w0, w1], [0.8, 0.8], color='r', lw=3, alpha=0.7,
transform=trans)
c0 = between(wa, w0, w1)
r1, = ax.plot(wa[c0], fl[c0], color='y', lw=3, alpha=0.5,
zorder=0, drawstyle='steps-mid')
#r = ax.fill_betweenx([0, 0.8], w0, x2=w1, facecolor='b', alpha=0.2,
# lw=0, zorder=1,
# transform=trans)
regions.extend([r0, r1])
return regions
def plot_velocity_regions(wmin, wmax, w0, w1, obswa, ax, offset,
vel, nfl):
""" wmin, wmax is minimum and maximum wavelengths of the plot.
w0 and w1 are the min and max wavelengths of the fitting
regions. obswa is the wavelength of the transition for this plot.
"""
cond = ((w1 >= wmax) & (w0 < wmax)) | \
((w1 <= wmax) & (w0 >= wmin)) | \
((w1 > wmin) & (w0 <= wmin))
regions = []
if not cond.any():
return regions
vel0 = (w0[cond] / obswa - 1) * c_kms
vel1 = (w1[cond] / obswa - 1) * c_kms
for v0, v1 in zip(vel0, vel1):
yoff = 1.1 + offset
R0, = ax.plot([v0, v1], [yoff, yoff], 'r', lw=3, alpha=0.7)
cond = between(vel, v0, v1)
R1, = ax.plot(vel[cond], nfl[cond] + offset, 'y', lw=4, alpha=0.5,
drawstyle='steps-mid',zorder=0)
regions.extend([R0,R1])
return regions
def print_example_options():
print("""\
Rfwhm = 6.67
features = features_filename
f26 = lines.f26
transitions = transitions/z0p56
taulines = taulines_filename
co_is_sky = False
tickz = 0.567311
ticklabels = True
showticks = True
z = 3
wadiv = 6
nsmooth = 1
show_oscillator_strength = False
dv = 1000
residuals = False
""")
def process_options(args):
opt = adict()
filename = os.path.abspath(__file__).rsplit('/', 1)[0] + '/default.cfg'
opt = parse_config(filename)
if os.path.lexists('./plot.cfg'):
opt = parse_config('./plot.cfg', opt)
opt.atom = readatom(molecules=True)
if opt.Rfwhm is not None:
if isinstance(opt.Rfwhm, basestring):
if opt.Rfwhm == 'convolve_with_COS_FOS':
if convolve_with_COS_FOS is None:
raise ValueError('convolve_with_COS_FOS() not available')
print('Using tailored FWHM for COS/FOS data')
opt.Rfwhm = 'convolve_with_COS_FOS'
elif opt.Rfwhm.endswith('fits'):
print('Reading Resolution FWHM from', opt.Rfwhm)
res = readtabfits(opt.Rfwhm)
opt.Rfwhm = res.res / 2.354
else:
print('Reading Resolution FWHM from', opt.Rfwhm)
fh = open(opt.Rfwhm)
opt.Rfwhm = 1 / 2.354 * np.array([float(r) for r in fh])
fh.close()
else:
opt.Rfwhm = float(opt.Rfwhm)
if opt.features is not None:
print('Reading feature list from', opt.features)
opt.features = readtabfits(opt.features)
if opt.f26 is not None:
name = opt.f26
print('Reading ions and fitting regions from', name)
opt.f26 = readf26(name)
opt.f26.filename = name
if opt.transitions is not None:
print('Reading transitions from', opt.transitions)
fh = open(opt.transitions)
trans = list(fh)
fh.close()
temp = []
for tr in trans:
tr = tr.strip()
if tr and not tr.startswith('#'):
junk = tr.split()
tr = junk[0] + ' ' + junk[1]
t = findtrans(tr, atomdat=opt.atom)
temp.append(dict(name=t[0], wa=t[1][0], tr=t[1]))
opt.linelist = temp
else:
opt.linelist = readtxt(get_data_path() + 'linelists/qsoabs_lines',
names='wa,name,select')
if opt.f26 is None and opt.taulines is not None:
print('Reading ions from', opt.taulines)
fh = open(opt.taulines)
lines = []
for row in fh:
if row.lstrip().startswith('#'):
continue
items = row.split()
lines.append([items[0]] + list(map(float, items[1:])))
fh.close()
opt.lines = lines
if opt.show_regions is None:
opt.show_regions = True
if hasattr(opt, 'aodname'):
opt.aod = Table.read(opt.aodname)
return opt
def process_Rfwhm(Rfwhm, wa, model, models):
""" Convolve the input models using the Rfwhm option
Return the new models.
wa: wavelength array, shape (N,)
model: model flux array, shape (N,)
models: list of model flux arrays each with shape (N,)
Rfwhm is one of:
'convolve_with_COS_FOS'
a float
an array floats with shape (N,)
Returns
-------
new_model, new_models
"""
model_out = None
models_out = []
#import pdb; pdb.set_trace()
if Rfwhm is None:
return model, models
elif Rfwhm == 'convolve_with_COS_FOS':
#print 'convolving with COS/FOS instrument profile'
#import pdb; pdb.set_trace()
model_out = convolve_with_COS_FOS(model, wa, use_COS_nuv=True)
for m in models:
#if m.min() < (1 - 1e-2):
m = convolve_with_COS_FOS(m, wa, use_COS_nuv=True)
models_out.append(m)
elif isinstance(Rfwhm, float):
#print 'Convolving with fwhm %.2f km/s' % Rfwhm
# use a pixel velocity width 4 times smaller than the FWHM
ndiv = 4.
try:
wa_dv = make_constant_dv_wa_scale(wa[0], wa[-1], Rfwhm / ndiv)
except:
import pdb
pdb.set_trace()
model_out = convolve_constant_dv(wa, model, wa_dv, ndiv)
# do the same to every model if there's more than one
for m in models:
#if m.min() < (1 - 1e-2):
m = convolve_constant_dv(wa, m, wa_dv, ndiv)
models_out.append(m)
else:
raise ValueError('Unknown value for Rfwhm option')
return model_out, models_out
|
|
"""
These method recursively evaluate the SPARQL Algebra
evalQuery is the entry-point, it will setup context and
return the SPARQLResult object
evalPart is called on each level and will delegate to the right method
A rdflib.plugins.sparql.sparql.QueryContext is passed along, keeping
information needed for evaluation
A list of dicts (solution mappings) is returned, apart from GroupBy which may
also return a dict of list of dicts
"""
import collections
from rdflib import Variable, Graph, BNode, URIRef, Literal
from rdflib.plugins.sparql import CUSTOM_EVALS
from rdflib.plugins.sparql.parserutils import value
from rdflib.plugins.sparql.sparql import (
QueryContext, AlreadyBound, FrozenBindings, SPARQLError)
from rdflib.plugins.sparql.evalutils import (
_filter, _eval, _join, _diff, _minus, _fillTemplate, _ebv)
from rdflib.plugins.sparql.aggregates import evalAgg
def evalBGP(ctx, bgp):
"""
A basic graph pattern
"""
if not bgp:
yield ctx.solution()
return
s, p, o = bgp[0]
_s = ctx[s]
_p = ctx[p]
_o = ctx[o]
for ss, sp, so in ctx.graph.triples((_s, _p, _o)):
if None in (_s, _p, _o):
c = ctx.push()
else:
c = ctx
if _s is None:
c[s] = ss
try:
if _p is None:
c[p] = sp
except AlreadyBound:
continue
try:
if _o is None:
c[o] = so
except AlreadyBound:
continue
for x in evalBGP(c, bgp[1:]):
yield x
def evalExtend(ctx, extend):
# TODO: Deal with dict returned from evalPart from GROUP BY
for c in evalPart(ctx, extend.p):
try:
e = _eval(extend.expr, c.forget(ctx))
if isinstance(e, SPARQLError):
raise e
yield c.merge({extend.var: e})
except SPARQLError:
yield c
def evalLazyJoin(ctx, join):
"""
A lazy join will push the variables bound
in the first part to the second part,
essentially doing the join implicitly
hopefully evaluating much fewer triples
"""
for a in evalPart(ctx, join.p1):
c = ctx.thaw(a)
for b in evalPart(c, join.p2):
yield b
def evalJoin(ctx, join):
# TODO: Deal with dict returned from evalPart from GROUP BY
# only ever for join.p1
if join.lazy:
return evalLazyJoin(ctx, join)
else:
a = evalPart(ctx, join.p1)
b = set(evalPart(ctx, join.p2))
return _join(a, b)
def evalUnion(ctx, union):
res = set()
for x in evalPart(ctx, union.p1):
res.add(x)
yield x
for x in evalPart(ctx, union.p2):
if x not in res:
yield x
def evalMinus(ctx, minus):
a = evalPart(ctx, minus.p1)
b = set(evalPart(ctx, minus.p2))
return _minus(a, b)
def evalLeftJoin(ctx, join):
# import pdb; pdb.set_trace()
for a in evalPart(ctx, join.p1):
ok = False
c = ctx.thaw(a)
for b in evalPart(c, join.p2):
if _ebv(join.expr, b.forget(ctx)):
ok = True
yield b
if not ok:
# we've cheated, the ctx above may contain
# vars bound outside our scope
# before we yield a solution without the OPTIONAL part
# check that we would have had no OPTIONAL matches
# even without prior bindings...
if not any(_ebv(join.expr, b) for b in
evalPart(ctx.thaw(a.remember(join.p1._vars)), join.p2)):
yield a
def evalFilter(ctx, part):
# TODO: Deal with dict returned from evalPart!
for c in evalPart(ctx, part.p):
if _ebv(part.expr, c.forget(ctx)):
yield c
def evalGraph(ctx, part):
if ctx.dataset is None:
raise Exception(
"Non-conjunctive-graph doesn't know about " +
"graphs. Try a query without GRAPH.")
ctx = ctx.clone()
graph = ctx[part.term]
if graph is None:
for graph in ctx.dataset.contexts():
# in SPARQL the default graph is NOT a named graph
if graph == ctx.dataset.default_context:
continue
c = ctx.pushGraph(graph)
c = c.push()
graphSolution = [{part.term: graph.identifier}]
for x in _join(evalPart(c, part.p), graphSolution):
yield x
else:
c = ctx.pushGraph(ctx.dataset.get_context(graph))
for x in evalPart(c, part.p):
yield x
def evalValues(ctx, part):
for r in part.p.res:
c = ctx.push()
try:
for k, v in r.items():
if v != 'UNDEF':
c[k] = v
except AlreadyBound:
continue
yield c.solution()
def evalMultiset(ctx, part):
if part.p.name == 'values':
return evalValues(ctx, part)
return evalPart(ctx, part.p)
def evalPart(ctx, part):
# try custom evaluation functions
for name, c in list(CUSTOM_EVALS.items()):
try:
return c(ctx, part)
except NotImplementedError:
pass # the given custome-function did not handle this part
if part.name == 'BGP':
return evalBGP(ctx, part.triples) # NOTE pass part.triples, not part!
elif part.name == 'Filter':
return evalFilter(ctx, part)
elif part.name == 'Join':
return evalJoin(ctx, part)
elif part.name == 'LeftJoin':
return evalLeftJoin(ctx, part)
elif part.name == 'Graph':
return evalGraph(ctx, part)
elif part.name == 'Union':
return evalUnion(ctx, part)
elif part.name == 'ToMultiSet':
return evalMultiset(ctx, part)
elif part.name == 'Extend':
return evalExtend(ctx, part)
elif part.name == 'Minus':
return evalMinus(ctx, part)
elif part.name == 'Project':
return evalProject(ctx, part)
elif part.name == 'Slice':
return evalSlice(ctx, part)
elif part.name == 'Distinct':
return evalDistinct(ctx, part)
elif part.name == 'Reduced':
return evalReduced(ctx, part)
elif part.name == 'OrderBy':
return evalOrderBy(ctx, part)
elif part.name == 'Group':
return evalGroup(ctx, part)
elif part.name == 'AggregateJoin':
return evalAggregateJoin(ctx, part)
elif part.name == 'SelectQuery':
return evalSelectQuery(ctx, part)
elif part.name == 'AskQuery':
return evalAskQuery(ctx, part)
elif part.name == 'ConstructQuery':
return evalConstructQuery(ctx, part)
elif part.name == 'ServiceGraphPattern':
raise Exception('ServiceGraphPattern not implemented')
elif part.name == 'DescribeQuery':
raise Exception('DESCRIBE not implemented')
else:
# import pdb ; pdb.set_trace()
raise Exception('I dont know: %s' % part.name)
def evalGroup(ctx, group):
"""
http://www.w3.org/TR/sparql11-query/#defn_algGroup
"""
p = evalPart(ctx, group.p)
if not group.expr:
return {1: list(p)}
else:
res = collections.defaultdict(list)
for c in p:
k = tuple(_eval(e, c) for e in group.expr)
res[k].append(c)
return res
def evalAggregateJoin(ctx, agg):
# import pdb ; pdb.set_trace()
p = evalPart(ctx, agg.p)
# p is always a Group, we always get a dict back
for row in p:
bindings = {}
for a in agg.A:
evalAgg(a, p[row], bindings)
yield FrozenBindings(ctx, bindings)
if len(p) == 0:
yield FrozenBindings(ctx)
def evalOrderBy(ctx, part):
res = evalPart(ctx, part.p)
for e in reversed(part.expr):
def val(x):
v = value(x, e.expr, variables=True)
if isinstance(v, Variable):
return (0, v)
elif isinstance(v, BNode):
return (1, v)
elif isinstance(v, URIRef):
return (2, v)
elif isinstance(v, Literal):
return (3, v)
reverse = bool(e.order and e.order == 'DESC')
res = sorted(res, key=val, reverse=reverse)
return res
def evalSlice(ctx, slice):
# import pdb; pdb.set_trace()
res = evalPart(ctx, slice.p)
i = 0
while i < slice.start:
next(res)
i += 1
i = 0
for x in res:
i += 1
if slice.length is None:
yield x
else:
if i <= slice.length:
yield x
else:
break
def evalReduced(ctx, part):
return evalPart(ctx, part.p) # TODO!
def evalDistinct(ctx, part):
res = evalPart(ctx, part.p)
done = set()
for x in res:
if x not in done:
yield x
done.add(x)
def evalProject(ctx, project):
res = evalPart(ctx, project.p)
return (row.project(project.PV) for row in res)
def evalSelectQuery(ctx, query):
res = {}
res["type_"] = "SELECT"
res["bindings"] = evalPart(ctx, query.p)
res["vars_"] = query.PV
return res
def evalAskQuery(ctx, query):
res = {}
res["type_"] = "ASK"
res["askAnswer"] = False
for x in evalPart(ctx, query.p):
res["askAnswer"] = True
break
return res
def evalConstructQuery(ctx, query):
template = query.template
if not template:
# a construct-where query
template = query.p.p.triples # query->project->bgp ...
graph = Graph()
for c in evalPart(ctx, query.p):
graph += _fillTemplate(template, c)
res = {}
res["type_"] = "CONSTRUCT"
res["graph"] = graph
return res
def evalQuery(graph, query, initBindings, base=None):
ctx = QueryContext(graph)
ctx.prologue = query.prologue
if initBindings:
for k, v in initBindings.items():
if not isinstance(k, Variable):
k = Variable(k)
ctx[k] = v
# ctx.push() # nescessary?
main = query.algebra
# import pdb; pdb.set_trace()
if main.datasetClause:
if ctx.dataset is None:
raise Exception(
"Non-conjunctive-graph doesn't know about " +
"graphs! Try a query without FROM (NAMED).")
ctx = ctx.clone() # or push/pop?
firstDefault = False
for d in main.datasetClause:
if d.default:
if firstDefault:
# replace current default graph
dg = ctx.dataset.get_context(BNode())
ctx = ctx.pushGraph(dg)
firstDefault = True
ctx.load(d.default, default=True)
elif d.named:
g = d.named
ctx.load(g, default=False)
return evalPart(ctx, main)
|
|
"""Output or upload a TestRun proto for mfg-inspector.com
MULTIDIM_JSON schema:
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Multi-dimensional test parameter",
"type": "object",
"properties": {
"outcome": {"enum": ["PASS", "FAIL", "ERROR"]},
"name": {"type": "string"},
"dimensions": {
"type": array,
"minItems": 1,
"items": {
"type": "object",
"properties": {
"uom_code": {"type": "string"},
"uom_suffix": {"type": "string"}
}
}
},
"values": {
"type": "array",
"items": {}
}
}
}
"""
import json
import logging
import numbers
import os
import threading
import zlib
try:
from past.types import unicode
except ImportError:
pass
import httplib2
import oauth2client.client
from openhtf.core import measurements
from openhtf.core import test_record
from openhtf.output import callbacks
from openhtf.output.callbacks import json_factory
from openhtf.output.proto import guzzle_pb2
from openhtf.output.proto import test_runs_pb2
from openhtf.util import validators
import six
# pylint: disable=no-member
MIMETYPE_MAP = {
'image/jpeg': test_runs_pb2.JPG,
'image/png': test_runs_pb2.PNG,
'audio/x-wav': test_runs_pb2.WAV,
'text/plain': test_runs_pb2.TEXT_UTF8,
'image/tiff': test_runs_pb2.TIFF,
'video/mp4': test_runs_pb2.MP4,
}
OUTCOME_MAP = {
test_record.Outcome.ERROR: test_runs_pb2.ERROR,
test_record.Outcome.FAIL: test_runs_pb2.FAIL,
test_record.Outcome.PASS: test_runs_pb2.PASS,
test_record.Outcome.TIMEOUT: test_runs_pb2.ERROR,
test_record.Outcome.ABORTED: test_runs_pb2.ERROR,
}
UOM_CODE_MAP = {
u.GetOptions().Extensions[
test_runs_pb2.uom_code]: num
for num, u in six.iteritems(
test_runs_pb2.Units.UnitCode.DESCRIPTOR.values_by_number)
}
# pylint: enable=no-member
# Control how many flattened parameters we'll output per multidimensional
# measurement.
MAX_PARAMS_PER_MEASUREMENT = 100
class UploadFailedError(Exception):
"""Raised when an upload to mfg-inspector fails."""
class InvalidTestRunError(Exception):
"""Raised if test run is invalid."""
# pylint: disable=invalid-name
def _populate_header(record, testrun):
"""Populate header-like info in testrun from record.
Mostly obvious, some stuff comes from metadata, see docstring of
_test_run_from_test_record for details.
"""
testrun.dut_serial = record.dut_id
testrun.tester_name = record.station_id
if 'test_name' in record.metadata:
testrun.test_info.name = record.metadata['test_name']
else:
# Default to copying tester_name into test_info.name.
testrun.test_info.name = record.station_id
if 'test_description' in record.metadata:
testrun.test_info.description = record.metadata['test_description']
if 'test_version' in record.metadata:
testrun.test_info.version_string = record.metadata['test_version']
testrun.test_status = OUTCOME_MAP[record.outcome]
testrun.start_time_millis = record.start_time_millis
testrun.end_time_millis = record.end_time_millis
if 'run_name' in record.metadata:
testrun.run_name = record.metadata['run_name']
for details in record.outcome_details:
testrun_code = testrun.failure_codes.add()
testrun_code.code = details.code
testrun_code.details = details.description
for phase in record.phases:
testrun_phase = testrun.phases.add()
testrun_phase.name = phase.name
testrun_phase.description = phase.codeinfo.sourcecode
testrun_phase.timing.start_time_millis = phase.start_time_millis
testrun_phase.timing.end_time_millis = phase.end_time_millis
if 'config' in record.metadata:
attachment = testrun.info_parameters.add()
attachment.name = 'config'
attachment.value_binary = json.dumps(
record.metadata['config'], sort_keys=True, indent=4).encode('utf-8')
def _ensure_unique_parameter_name(name, used_parameter_names):
while name in used_parameter_names:
name += '_' # Hack to avoid collisions between phases.
used_parameter_names.add(name)
return name
def _attach_json(record, testrun):
"""Attach a copy of the JSON-ified record as an info parameter.
Save a copy of the JSON-ified record in an attachment so we can access
un-mangled fields later if we want. Remove attachments since those get
copied over and can potentially be quite large.
"""
record_json = json_factory.OutputToJSON(
inline_attachments=False,
sort_keys=True, indent=2).serialize_test_record(record)
testrun_param = testrun.info_parameters.add()
testrun_param.name = 'OpenHTF_record.json'
testrun_param.value_binary = record_json.encode('utf-8')
# pylint: disable=no-member
testrun_param.type = test_runs_pb2.TEXT_UTF8
# pylint: enable=no-member
def _extract_attachments(phase, testrun, used_parameter_names):
"""Extract attachments, just copy them over."""
for name, (attachment_data, mimetype) in sorted(six.iteritems(phase.attachments)):
name = _ensure_unique_parameter_name(name, used_parameter_names)
testrun_param = testrun.info_parameters.add()
testrun_param.name = name
if isinstance(attachment_data, unicode):
attachment_data = attachment_data.encode('utf-8')
testrun_param.value_binary = attachment_data
if mimetype in MIMETYPE_MAP:
testrun_param.type = MIMETYPE_MAP[mimetype]
else:
# pylint: disable=no-member
testrun_param.type = test_runs_pb2.BINARY
# pylint: enable=no-member
def _mangle_measurement(name, measured_value, measurement, mangled_parameters,
attachment_name):
"""Flatten parameters for backwards compatibility, watch for collisions.
We generate these by doing some name mangling, using some sane limits for
very large multidimensional measurements.
"""
for coord, val in list(measured_value.value_dict.items(
))[:MAX_PARAMS_PER_MEASUREMENT]:
# Mangle names so they look like 'myparameter_Xsec_Ynm_ZHz'
mangled_name = '_'.join([name] + [
'%s%s' % (
dim_val,
dim_units.suffix if dim_units.suffix else '') for
dim_val, dim_units in zip(
coord, measurement.dimensions)])
while mangled_name in mangled_parameters:
logging.warning('Mangled name %s already in use', mangled_name)
mangled_name += '_'
mangled_param = test_runs_pb2.TestParameter()
mangled_param.name = mangled_name
mangled_param.associated_attachment = attachment_name
mangled_param.description = (
'Mangled parameter from measurement %s with dimensions %s' % (
name, tuple(d.suffix for d in measurement.dimensions)))
if isinstance(val, numbers.Number):
mangled_param.numeric_value = float(val)
else:
mangled_param.text_value = str(val)
# Check for validators we know how to translate.
for validator in measurement.validators:
mangled_param.description += '\nValidator: ' + str(validator)
if measurement.units and measurement.units.code in UOM_CODE_MAP:
mangled_param.unit_code = UOM_CODE_MAP[measurement.units.code]
mangled_parameters[mangled_name] = mangled_param
def _extract_parameters(record, testrun, used_parameter_names):
"""Extract parameters from phases.
Generate mangled parameters afterwards so we give real measurements priority
getting names.
"""
mangled_parameters = {}
for phase in record.phases:
_extract_attachments(phase, testrun, used_parameter_names)
for name, measurement in sorted(six.iteritems(phase.measurements)):
tr_name = _ensure_unique_parameter_name(name, used_parameter_names)
testrun_param = testrun.test_parameters.add()
testrun_param.name = tr_name
if measurement.docstring:
testrun_param.description = measurement.docstring
if measurement.units and measurement.units.code in UOM_CODE_MAP:
testrun_param.unit_code = UOM_CODE_MAP[measurement.units.code]
if measurement.outcome == measurements.Outcome.PASS:
testrun_param.status = test_runs_pb2.PASS
elif (not measurement.measured_value
or not measurement.measured_value.is_value_set):
testrun_param.status = test_runs_pb2.ERROR
continue
else:
testrun_param.status = test_runs_pb2.FAIL
value = None
if measurement.measured_value.is_value_set:
value = measurement.measured_value.value
else:
testrun_param.status = test_runs_pb2.ERROR
if measurement.dimensions is None:
# Just a plain ol' value.
if isinstance(value, numbers.Number):
testrun_param.numeric_value = float(value)
else:
testrun_param.text_value = str(value)
# Check for validators we know how to translate.
for validator in measurement.validators:
if isinstance(validator, validators.RangeValidatorBase):
if validator.minimum is not None:
testrun_param.numeric_minimum = float(validator.minimum)
if validator.maximum is not None:
testrun_param.numeric_maximum = float(validator.maximum)
elif isinstance(validator, validators.RegexMatcher):
testrun_param.expected_text = validator.regex
else:
testrun_param.description += '\nValidator: ' + str(validator)
else:
attachment = testrun.info_parameters.add()
attachment.name = 'multidim_%s' % name
dims = [{
'uom_suffix': d.suffix,
'uom_code': d.code}
for d in measurement.dimensions]
# Refer to the module docstring for the expected schema.
attachment.value_binary = json.dumps({
'outcome': str(testrun_param.status), 'name': name,
'dimensions': dims,
'value': value
}, sort_keys=True).encode('utf-8')
attachment.type = test_runs_pb2.MULTIDIM_JSON
_mangle_measurement(
name, measurement.measured_value, measurement, mangled_parameters,
attachment.name)
if testrun_param.status == test_runs_pb2.FAIL:
testrun_code = testrun.failure_codes.add()
testrun_code.code = testrun_param.name
if measurement.dimensions is None:
if isinstance(testrun_param.numeric_value, float):
testrun_code.details = str(testrun_param.numeric_value)
else:
testrun_code.details = testrun_param.text_value
return mangled_parameters
def _add_mangled_parameters(testrun, mangled_parameters, used_parameter_names):
"""Add any mangled parameters we generated from multidim measurements."""
for mangled_name, mangled_param in sorted(six.iteritems(mangled_parameters)):
if mangled_name != _ensure_unique_parameter_name(mangled_name,
used_parameter_names):
logging.warning('Mangled name %s in use by non-mangled parameter',
mangled_name)
testrun_param = testrun.test_parameters.add()
testrun_param.CopyFrom(mangled_param)
def _add_log_lines(record, testrun):
"""Copy log records over, this is a fairly straightforward mapping."""
for log in record.log_records:
testrun_log = testrun.test_logs.add()
testrun_log.timestamp_millis = log.timestamp_millis
testrun_log.log_message = log.message
testrun_log.logger_name = log.logger_name
testrun_log.levelno = log.level
# pylint: disable=no-member
if log.level <= logging.DEBUG:
testrun_log.level = test_runs_pb2.TestRunLogMessage.DEBUG
elif log.level <= logging.INFO:
testrun_log.level = test_runs_pb2.TestRunLogMessage.INFO
elif log.level <= logging.WARNING:
testrun_log.level = test_runs_pb2.TestRunLogMessage.WARNING
elif log.level <= logging.ERROR:
testrun_log.level = test_runs_pb2.TestRunLogMessage.ERROR
elif log.level <= logging.CRITICAL:
testrun_log.level = test_runs_pb2.TestRunLogMessage.CRITICAL
# pylint: enable=no-member
testrun_log.log_source = log.source
testrun_log.lineno = log.lineno
def _test_run_from_test_record(record):
"""Create a TestRun proto from an OpenHTF TestRecord.
Most fields are just copied over, some are pulled out of metadata (listed
below), and measurements are munged a bit for backwards compatibility.
Metadata fields:
'test_description': TestInfo's description field.
'test_version': TestInfo's version_string field.
'test_name': TestInfo's name field.
'run_name': TestRun's run_name field.
'operator_name': TestRun's operator_name field.
Returns: An instance of the TestRun proto for the given record.
"""
testrun = test_runs_pb2.TestRun()
_populate_header(record, testrun)
_attach_json(record, testrun)
used_parameter_names = set(['OpenHTF_record.json'])
mangled_parameters = _extract_parameters(record, testrun,
used_parameter_names)
_add_mangled_parameters(testrun, mangled_parameters, used_parameter_names)
_add_log_lines(record, testrun)
return testrun
class OutputToTestRunProto(callbacks.OutputToFile):
"""Return an output callback that writes mfg-inspector TestRun Protos.
Example filename_patterns might be:
'/data/test_records/{dut_id}.{metadata[test_name]}.pb' or
'/data/test_records/%(dut_id)s.%(start_time_millis)s'
To use this output mechanism:
test = openhtf.Test(PhaseOne, PhaseTwo)
test.add_output_callback(openhtf.OutputToTestRunProto(
'/data/test_records/{dut_id}.{metadata[test_name]}.pb'))
Args:
filename_pattern: A format string specifying the filename to write to,
will be formatted with the Test Record as a dictionary. May also be a
file-like object to write directly to.
Returns:
filename of local file.
"""
def __init__(self, filename_pattern):
super(OutputToTestRunProto, self).__init__(filename_pattern)
@staticmethod
def serialize_test_record(test_record_obj):
return _test_run_from_test_record(test_record_obj).SerializeToString()
class UploadToMfgInspector(object):
"""Generate a mfg-inspector TestRun proto and upload it.
Create an output callback to upload to mfg-inspector.com using the given
username and authentication key (which should be the key data itself, not a
filename or file).
"""
TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
SCOPE_CODE_URI = 'https://www.googleapis.com/auth/glass.infra.quantum_upload'
DESTINATION_URL = ('https://clients2.google.com/factoryfactory/'
'uploads/quantum_upload/?json')
# pylint: disable=invalid-name,missing-docstring
class _MemStorage(oauth2client.client.Storage):
"""Helper Storage class that keeps credentials in memory."""
def __init__(self):
self._lock = threading.Lock()
self._credentials = None
def acquire_lock(self):
self._lock.acquire(True)
def release_lock(self):
self._lock.release()
def locked_get(self):
return self._credentials
def locked_put(self, credentials):
self._credentials = credentials
# pylint: enable=invalid-name,missing-docstring
def __init__(self, user, keydata,
token_uri=TOKEN_URI, destination_url=DESTINATION_URL):
self.user = user
self.keydata = keydata
self.token_uri = token_uri
self.destination_url = destination_url
self.credentials = oauth2client.client.SignedJwtAssertionCredentials(
service_account_name=self.user,
private_key=self.keydata,
scope=self.SCOPE_CODE_URI,
user_agent='OpenHTF Guzzle Upload Client',
token_uri=self.token_uri)
self.credentials.set_store(self._MemStorage())
@classmethod
def from_json(cls, json_data):
"""Create an uploader given (parsed) JSON data.
Note that this is a JSON-formatted key file downloaded from Google when
the service account key is created, *NOT* a json-encoded
oauth2client.client.SignedJwtAssertionCredentials object.
Args:
json_data: Dict containing the loaded JSON key data.
"""
return cls(user=json_data['client_email'],
keydata=json_data['private_key'],
token_uri=json_data['token_uri'])
def upload_test_run(self, testrun):
"""Uploads the TestRun at a particular file.
Args:
testrun: TestRun proto or filepath.
"""
http = httplib2.Http()
if self.credentials.access_token_expired:
self.credentials.refresh(http)
self.credentials.authorize(http)
if isinstance(testrun, test_runs_pb2.TestRun):
serialized_run = testrun.SerializeToString()
elif os.path.isfile(testrun):
with open(testrun, 'rb') as testrun_file:
serialized_run = testrun_file.read()
else:
InvalidTestRunError('Invalid test run data')
test_run_envelope = guzzle_pb2.TestRunEnvelope()
test_run_envelope.payload = zlib.compress(serialized_run)
test_run_envelope.payload_type = guzzle_pb2.COMPRESSED_TEST_RUN
serialized_envelope = test_run_envelope.SerializeToString()
resp, content = http.request(self.destination_url, 'POST',
serialized_envelope)
if resp.status != 200:
try:
results = json.loads(content)
except Exception:
raise UploadFailedError(resp, content)
else:
raise UploadFailedError(results['error'], results)
result = json.loads(content)
return result['key']
def __call__(self, test_record_obj): # pylint: disable=invalid-name
testrun = _test_run_from_test_record(test_record_obj)
self.upload_test_run(testrun)
class UploadOrOutput(object):
"""Attempt to upload to inspector, output to local if fail.
Args:
user: Google cloud service account for Oauth2client.
keydata: Google cloud key data for Oauth2client.
filename_pattern: A format string specifying the filename to write to,
will be formatted with the Test Record as a dictionary. May also be a
file-like object to write directly to.
upload_fail_message: Message to log on upload failure.
"""
def __init__(self, user, keydata, filename_pattern,
upload_fail_message='Upload to mfg-inspector failed!'):
self._upload_fail_message = upload_fail_message
self._UploadToMfgInspector = UploadToMfgInspector(user, keydata)
self._OutputToTestRunProto = OutputToTestRunProto(filename_pattern)
def __call__(self, test_record_obj): # pylint: disable=invalid-name
try:
logging.info('Attempting to upload to mfg-inspector')
return self._UploadToMfgInspector(test_record_obj)
except Exception:
logging.warning('%s', self._upload_fail_message)
filename = self._OutputToTestRunProto(test_record_obj)
logging.info('Saved local file: %s', filename)
raise
|
|
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.api.rpc.handlers import l3_rpc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import topics
from neutron import context
from neutron.extensions import external_net
from neutron.extensions import portbindings
from neutron.tests.common import helpers
from neutron.tests.unit.plugins.ml2 import base as ml2_test_base
DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
class L3DvrTestCase(ml2_test_base.ML2TestFramework):
def setUp(self):
super(L3DvrTestCase, self).setUp()
self.l3_agent = helpers.register_l3_agent(
agent_mode=constants.L3_AGENT_MODE_DVR_SNAT)
def _create_router(self, distributed=True, ha=False):
return (super(L3DvrTestCase, self).
_create_router(distributed=distributed, ha=ha))
def test_update_router_db_centralized_to_distributed(self):
router = self._create_router(distributed=False)
# router needs to be in admin state down in order to be upgraded to DVR
self.l3_plugin.update_router(
self.context, router['id'], {'router': {'admin_state_up': False}})
self.assertFalse(router['distributed'])
self.l3_plugin.update_router(
self.context, router['id'], {'router': {'distributed': True}})
router = self.l3_plugin.get_router(self.context, router['id'])
self.assertTrue(router['distributed'])
def test_get_device_owner_distributed_router_object(self):
router = self._create_router()
self.assertEqual(
constants.DEVICE_OWNER_DVR_INTERFACE,
self.l3_plugin._get_device_owner(self.context, router))
def test_get_device_owner_distributed_router_id(self):
router = self._create_router()
self.assertEqual(
constants.DEVICE_OWNER_DVR_INTERFACE,
self.l3_plugin._get_device_owner(self.context, router['id']))
def test_get_device_owner_centralized(self):
router = self._create_router(distributed=False)
self.assertEqual(
constants.DEVICE_OWNER_ROUTER_INTF,
self.l3_plugin._get_device_owner(self.context, router['id']))
def test_get_agent_gw_ports_exist_for_network_no_port(self):
self.assertIsNone(
self.l3_plugin._get_agent_gw_ports_exist_for_network(
self.context, 'network_id', 'host', 'agent_id'))
def _test_remove_router_interface_leaves_snat_intact(self, by_subnet):
with self.subnet() as subnet1, \
self.subnet(cidr='20.0.0.0/24') as subnet2:
kwargs = {'arg_list': (external_net.EXTERNAL,),
external_net.EXTERNAL: True}
with self.network(**kwargs) as ext_net, \
self.subnet(network=ext_net,
cidr='30.0.0.0/24'):
router = self._create_router()
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': subnet1['subnet']['id']})
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': subnet2['subnet']['id']})
self.l3_plugin._update_router_gw_info(
self.context, router['id'],
{'network_id': ext_net['network']['id']})
snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces(
self.context, [router['id']])
self.assertEqual(
2, len(snat_router_intfs[router['id']]))
if by_subnet:
self.l3_plugin.remove_router_interface(
self.context, router['id'],
{'subnet_id': subnet1['subnet']['id']})
else:
port = self.core_plugin.get_ports(
self.context, filters={
'network_id': [subnet1['subnet']['network_id']],
'device_owner':
[constants.DEVICE_OWNER_DVR_INTERFACE]})[0]
self.l3_plugin.remove_router_interface(
self.context, router['id'],
{'port_id': port['id']})
self.assertEqual(
1, len(self.l3_plugin._get_snat_sync_interfaces(
self.context, [router['id']])))
def test_remove_router_interface_by_subnet_leaves_snat_intact(self):
self._test_remove_router_interface_leaves_snat_intact(by_subnet=True)
def test_remove_router_interface_by_port_leaves_snat_intact(self):
self._test_remove_router_interface_leaves_snat_intact(
by_subnet=False)
def setup_create_agent_gw_port_for_network(self, network=None):
if not network:
network = self._make_network(self.fmt, '', True)
network_id = network['network']['id']
port = self.core_plugin.create_port(
self.context,
{'port': {'tenant_id': '',
'network_id': network_id,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'device_id': self.l3_agent['id'],
'device_owner': constants.DEVICE_OWNER_AGENT_GW,
portbindings.HOST_ID: '',
'admin_state_up': True,
'name': ''}})
return network_id, port
def test_get_agent_gw_port_for_network(self):
network_id, port = (
self.setup_create_agent_gw_port_for_network())
self.assertEqual(
port['id'],
self.l3_plugin._get_agent_gw_ports_exist_for_network(
self.context, network_id, None, self.l3_agent['id'])['id'])
def test_delete_agent_gw_port_for_network(self):
network_id, port = (
self.setup_create_agent_gw_port_for_network())
self.l3_plugin.delete_floatingip_agent_gateway_port(
self.context, "", network_id)
self.assertIsNone(
self.l3_plugin._get_agent_gw_ports_exist_for_network(
self.context, network_id, "", self.l3_agent['id']))
def test_get_fip_sync_interfaces(self):
self.setup_create_agent_gw_port_for_network()
self.assertEqual(
1, len(self.l3_plugin._get_fip_sync_interfaces(
self.context, self.l3_agent['id'])))
def test_process_routers(self):
router = self._create_router()
result = self.l3_plugin._process_routers(self.context, [router])
self.assertEqual(
router['id'], result[router['id']]['id'])
def test_agent_gw_port_delete_when_last_gateway_for_ext_net_removed(self):
kwargs = {'arg_list': (external_net.EXTERNAL,),
external_net.EXTERNAL: True}
net1 = self._make_network(self.fmt, 'net1', True)
net2 = self._make_network(self.fmt, 'net2', True)
subnet1 = self._make_subnet(
self.fmt, net1, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True)
subnet2 = self._make_subnet(
self.fmt, net2, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True)
ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs)
self._make_subnet(
self.fmt, ext_net, '20.0.0.1', '20.0.0.0/24', enable_dhcp=True)
# Create first router and add an interface
router1 = self._create_router()
ext_net_id = ext_net['network']['id']
self.l3_plugin.add_router_interface(
self.context, router1['id'],
{'subnet_id': subnet1['subnet']['id']})
# Set gateway to first router
self.l3_plugin._update_router_gw_info(
self.context, router1['id'],
{'network_id': ext_net_id})
# Create second router and add an interface
router2 = self._create_router()
self.l3_plugin.add_router_interface(
self.context, router2['id'],
{'subnet_id': subnet2['subnet']['id']})
# Set gateway to second router
self.l3_plugin._update_router_gw_info(
self.context, router2['id'],
{'network_id': ext_net_id})
# Create an agent gateway port for the external network
net_id, agent_gw_port = (
self.setup_create_agent_gw_port_for_network(network=ext_net))
# Check for agent gateway ports
self.assertIsNotNone(
self.l3_plugin._get_agent_gw_ports_exist_for_network(
self.context, ext_net_id, "", self.l3_agent['id']))
self.l3_plugin._update_router_gw_info(
self.context, router1['id'], {})
# Check for agent gateway port after deleting one of the gw
self.assertIsNotNone(
self.l3_plugin._get_agent_gw_ports_exist_for_network(
self.context, ext_net_id, "", self.l3_agent['id']))
self.l3_plugin._update_router_gw_info(
self.context, router2['id'], {})
# Check for agent gateway port after deleting last gw
self.assertIsNone(
self.l3_plugin._get_agent_gw_ports_exist_for_network(
self.context, ext_net_id, "", self.l3_agent['id']))
def _test_create_floating_ip_agent_notification(self, dvr=True):
with self.subnet() as ext_subnet,\
self.subnet(cidr='20.0.0.0/24') as int_subnet,\
self.port(subnet=int_subnet,
device_owner=DEVICE_OWNER_COMPUTE) as int_port:
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
{'network': {external_net.EXTERNAL: True}})
router = self._create_router(distributed=dvr)
self.l3_plugin.update_router(
self.context, router['id'],
{'router': {
'external_gateway_info': {'network_id': ext_net_id}}})
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': int_subnet['subnet']['id']})
floating_ip = {'floating_network_id': ext_net_id,
'router_id': router['id'],
'port_id': int_port['port']['id'],
'tenant_id': int_port['port']['tenant_id'],
'dns_name': '', 'dns_domain': ''}
with mock.patch.object(
self.l3_plugin, '_l3_rpc_notifier') as l3_notif:
self.l3_plugin.create_floatingip(
self.context, {'floatingip': floating_ip})
if dvr:
l3_notif.routers_updated_on_host.assert_called_once_with(
self.context, [router['id']],
int_port['port'][portbindings.HOST_ID])
self.assertFalse(l3_notif.routers_updated.called)
else:
l3_notif.routers_updated.assert_called_once_with(
self.context, [router['id']], None)
self.assertFalse(
l3_notif.routers_updated_on_host.called)
def test_create_floating_ip_agent_notification(self):
self._test_create_floating_ip_agent_notification()
def test_create_floating_ip_agent_notification_non_dvr(self):
self._test_create_floating_ip_agent_notification(dvr=False)
def _test_update_floating_ip_agent_notification(self, dvr=True):
with self.subnet() as ext_subnet,\
self.subnet(cidr='20.0.0.0/24') as int_subnet1,\
self.subnet(cidr='30.0.0.0/24') as int_subnet2,\
self.port(subnet=int_subnet1,
device_owner=DEVICE_OWNER_COMPUTE) as int_port1,\
self.port(subnet=int_subnet2,
device_owner=DEVICE_OWNER_COMPUTE) as int_port2:
# locate internal ports on different hosts
self.core_plugin.update_port(
self.context, int_port1['port']['id'],
{'port': {portbindings.HOST_ID: 'host1'}})
self.core_plugin.update_port(
self.context, int_port2['port']['id'],
{'port': {portbindings.HOST_ID: 'host2'}})
# and create l3 agents on corresponding hosts
helpers.register_l3_agent(host='host1',
agent_mode=constants.L3_AGENT_MODE_DVR)
helpers.register_l3_agent(host='host2',
agent_mode=constants.L3_AGENT_MODE_DVR)
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
{'network': {external_net.EXTERNAL: True}})
router1 = self._create_router(distributed=dvr)
router2 = self._create_router(distributed=dvr)
for router in (router1, router2):
self.l3_plugin.update_router(
self.context, router['id'],
{'router': {
'external_gateway_info': {'network_id': ext_net_id}}})
self.l3_plugin.add_router_interface(
self.context, router1['id'],
{'subnet_id': int_subnet1['subnet']['id']})
self.l3_plugin.add_router_interface(
self.context, router2['id'],
{'subnet_id': int_subnet2['subnet']['id']})
floating_ip = {'floating_network_id': ext_net_id,
'router_id': router1['id'],
'port_id': int_port1['port']['id'],
'tenant_id': int_port1['port']['tenant_id'],
'dns_name': '', 'dns_domain': ''}
floating_ip = self.l3_plugin.create_floatingip(
self.context, {'floatingip': floating_ip})
with mock.patch.object(
self.l3_plugin, '_l3_rpc_notifier') as l3_notif:
updated_floating_ip = {'router_id': router2['id'],
'port_id': int_port2['port']['id']}
self.l3_plugin.update_floatingip(
self.context, floating_ip['id'],
{'floatingip': updated_floating_ip})
if dvr:
self.assertEqual(
2, l3_notif.routers_updated_on_host.call_count)
expected_calls = [
mock.call(self.context, [router1['id']], 'host1'),
mock.call(self.context, [router2['id']], 'host2')]
l3_notif.routers_updated_on_host.assert_has_calls(
expected_calls)
self.assertFalse(l3_notif.routers_updated.called)
else:
self.assertEqual(
2, l3_notif.routers_updated.call_count)
expected_calls = [
mock.call(self.context, [router1['id']], None),
mock.call(self.context, [router2['id']], None)]
l3_notif.routers_updated.assert_has_calls(
expected_calls)
self.assertFalse(l3_notif.routers_updated_on_host.called)
def test_update_floating_ip_agent_notification(self):
self._test_update_floating_ip_agent_notification()
def test_update_floating_ip_agent_notification_non_dvr(self):
self._test_update_floating_ip_agent_notification(dvr=False)
def _test_delete_floating_ip_agent_notification(self, dvr=True):
with self.subnet() as ext_subnet,\
self.subnet(cidr='20.0.0.0/24') as int_subnet,\
self.port(subnet=int_subnet,
device_owner=DEVICE_OWNER_COMPUTE) as int_port:
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
{'network': {external_net.EXTERNAL: True}})
router = self._create_router(distributed=dvr)
self.l3_plugin.update_router(
self.context, router['id'],
{'router': {
'external_gateway_info': {'network_id': ext_net_id}}})
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': int_subnet['subnet']['id']})
floating_ip = {'floating_network_id': ext_net_id,
'router_id': router['id'],
'port_id': int_port['port']['id'],
'tenant_id': int_port['port']['tenant_id'],
'dns_name': '', 'dns_domain': ''}
floating_ip = self.l3_plugin.create_floatingip(
self.context, {'floatingip': floating_ip})
with mock.patch.object(
self.l3_plugin, '_l3_rpc_notifier') as l3_notif:
self.l3_plugin.delete_floatingip(
self.context, floating_ip['id'])
if dvr:
l3_notif.routers_updated_on_host.assert_called_once_with(
self.context, [router['id']],
int_port['port'][portbindings.HOST_ID])
self.assertFalse(l3_notif.routers_updated.called)
else:
l3_notif.routers_updated.assert_called_once_with(
self.context, [router['id']], None)
self.assertFalse(
l3_notif.routers_updated_on_host.called)
def test_delete_floating_ip_agent_notification(self):
self._test_delete_floating_ip_agent_notification()
def test_delete_floating_ip_agent_notification_non_dvr(self):
self._test_delete_floating_ip_agent_notification(dvr=False)
def test_router_with_ipv4_and_multiple_ipv6_on_same_network(self):
kwargs = {'arg_list': (external_net.EXTERNAL,),
external_net.EXTERNAL: True}
ext_net = self._make_network(self.fmt, '', True, **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.0.0.1', '10.0.0.0/24',
ip_version=4, enable_dhcp=True)
self._make_subnet(
self.fmt, ext_net, '2001:db8::1', '2001:db8::/64',
ip_version=6, enable_dhcp=True)
router1 = self._create_router()
self.l3_plugin._update_router_gw_info(
self.context, router1['id'],
{'network_id': ext_net['network']['id']})
snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces(
self.context, [router1['id']])
self.assertEqual(0, len(snat_router_intfs[router1['id']]))
private_net1 = self._make_network(self.fmt, 'net1', True)
private_ipv6_subnet1 = self._make_subnet(self.fmt,
private_net1, 'fd00::1',
cidr='fd00::1/64', ip_version=6,
ipv6_ra_mode='slaac',
ipv6_address_mode='slaac')
private_ipv6_subnet2 = self._make_subnet(self.fmt,
private_net1, 'fd01::1',
cidr='fd01::1/64', ip_version=6,
ipv6_ra_mode='slaac',
ipv6_address_mode='slaac')
# Add the first IPv6 subnet to the router
self.l3_plugin.add_router_interface(
self.context, router1['id'],
{'subnet_id': private_ipv6_subnet1['subnet']['id']})
# Check for the internal snat port interfaces
snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces(
self.context, [router1['id']])
self.assertEqual(1, len(snat_router_intfs[router1['id']]))
# Add the second IPv6 subnet to the router
self.l3_plugin.add_router_interface(
self.context, router1['id'],
{'subnet_id': private_ipv6_subnet2['subnet']['id']})
# Check for the internal snat port interfaces
snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces(
self.context, [router1['id']])
snat_intf_list = snat_router_intfs[router1['id']]
fixed_ips = snat_intf_list[0]['fixed_ips']
self.assertEqual(1, len(snat_router_intfs[router1['id']]))
self.assertEqual(2, len(fixed_ips))
# Now delete the router interface and it should update the
# SNAT port with the right fixed_ips instead of deleting it.
self.l3_plugin.remove_router_interface(
self.context, router1['id'],
{'subnet_id': private_ipv6_subnet2['subnet']['id']})
# Check for the internal snat port interfaces
snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces(
self.context, [router1['id']])
snat_intf_list = snat_router_intfs[router1['id']]
fixed_ips = snat_intf_list[0]['fixed_ips']
self.assertEqual(1, len(snat_router_intfs[router1['id']]))
self.assertEqual(1, len(fixed_ips))
def test_update_service_port_with_allowed_address_pairs(self):
HOST1 = 'host1'
helpers.register_l3_agent(
host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR)
router = self._create_router()
private_net1 = self._make_network(self.fmt, 'net1', True)
test_allocation_pools = [{'start': '10.1.0.2',
'end': '10.1.0.20'}]
fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}]
kwargs = {'arg_list': (external_net.EXTERNAL,),
external_net.EXTERNAL: True}
ext_net = self._make_network(self.fmt, '', True, **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=4, enable_dhcp=True)
# Set gateway to router
self.l3_plugin._update_router_gw_info(
self.context, router['id'],
{'network_id': ext_net['network']['id']})
private_subnet1 = self._make_subnet(
self.fmt,
private_net1,
'10.1.0.1',
cidr='10.1.0.0/24',
ip_version=4,
allocation_pools=test_allocation_pools,
enable_dhcp=True)
vrrp_port = self._make_port(
self.fmt,
private_net1['network']['id'],
device_owner=constants.DEVICE_OWNER_LOADBALANCER,
fixed_ips=fixed_vrrp_ip)
allowed_address_pairs = [
{'ip_address': '10.1.0.201',
'mac_address': vrrp_port['port']['mac_address']}]
with self.port(
subnet=private_subnet1,
device_owner=DEVICE_OWNER_COMPUTE) as int_port:
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': private_subnet1['subnet']['id']})
with mock.patch.object(self.l3_plugin,
'_l3_rpc_notifier') as l3_notifier:
self.core_plugin.update_port(
self.context, int_port['port']['id'],
{'port': {portbindings.HOST_ID: HOST1}})
l3_notifier.routers_updated_on_host.assert_called_once_with(
self.context, {router['id']}, HOST1)
floating_ip = {'floating_network_id': ext_net['network']['id'],
'router_id': router['id'],
'port_id': vrrp_port['port']['id'],
'tenant_id': vrrp_port['port']['tenant_id']}
floating_ip = self.l3_plugin.create_floatingip(
self.context, {'floatingip': floating_ip})
vrrp_port_db = self.core_plugin.get_port(
self.context, vrrp_port['port']['id'])
self.assertNotEqual(vrrp_port_db[portbindings.HOST_ID], HOST1)
# Now update the VM port with the allowed_address_pair
cur_int_port = self.core_plugin.update_port(
self.context, int_port['port']['id'],
{'port': {
'allowed_address_pairs': allowed_address_pairs}})
cur_vrrp_port_db = self.core_plugin.get_port(
self.context, vrrp_port['port']['id'])
# Check to make sure that we are not chaning the existing
# device_owner for the allowed_address_pair port.
self.assertEqual(
cur_vrrp_port_db['device_owner'],
constants.DEVICE_OWNER_LOADBALANCER)
self.assertEqual(cur_vrrp_port_db[portbindings.HOST_ID], HOST1)
self.assertTrue(cur_vrrp_port_db.get(portbindings.PROFILE))
port_profile = cur_vrrp_port_db.get(portbindings.PROFILE)
self.assertTrue(port_profile)
self.assertEqual(port_profile['original_owner'],
constants.DEVICE_OWNER_LOADBALANCER)
# Now change the compute port admin_state_up from True to
# False, and see if the vrrp ports device_owner and binding
# inheritence reverts back to normal
mod_int_port = self.core_plugin.update_port(
self.context, cur_int_port['id'],
{'port': {
'admin_state_up': False}})
self.assertFalse(mod_int_port['admin_state_up'])
new_vrrp_port_db = self.core_plugin.get_port(
self.context, cur_vrrp_port_db['id'])
new_port_profile = new_vrrp_port_db.get(portbindings.PROFILE)
self.assertEqual({}, new_port_profile)
self.assertNotEqual(
new_vrrp_port_db[portbindings.HOST_ID], HOST1)
# Now change the compute port admin_state_up from False to
# True, and see if the vrrp ports device_owner and binding
# inherits from the associated parent compute port.
new_mod_int_port = self.core_plugin.update_port(
self.context, mod_int_port['id'],
{'port': {
'admin_state_up': True}})
self.assertTrue(new_mod_int_port['admin_state_up'])
cur_new_vrrp_port_db = self.core_plugin.get_port(
self.context, new_vrrp_port_db['id'])
self.assertNotEqual(
cur_new_vrrp_port_db['device_owner'], DEVICE_OWNER_COMPUTE)
self.assertEqual(
cur_new_vrrp_port_db[portbindings.HOST_ID], HOST1)
# Now let us try to remove vrrp_port device_owner and see
# how it inherits from the compute port.
updated_vrrp_port = self.core_plugin.update_port(
self.context, cur_new_vrrp_port_db['id'],
{'port': {'device_owner': "",
portbindings.PROFILE: {'original_owner': ""}}})
updated_vm_port = self.core_plugin.update_port(
self.context, new_mod_int_port['id'],
{'port': {
'admin_state_up': False}})
self.assertFalse(updated_vm_port['admin_state_up'])
# This port admin_state down should not cause any issue
# with the existing vrrp port device_owner, but should
# only change the port_binding HOST_ID.
cur_new_vrrp_port_db = self.core_plugin.get_port(
self.context, updated_vrrp_port['id'])
self.assertEqual(
"", cur_new_vrrp_port_db['device_owner'])
self.assertEqual(
"", cur_new_vrrp_port_db[portbindings.HOST_ID])
updated_vm_port = self.core_plugin.update_port(
self.context, new_mod_int_port['id'],
{'port': {
'admin_state_up': True}})
self.assertTrue(updated_vm_port['admin_state_up'])
updated_vrrp_port_db = self.core_plugin.get_port(
self.context, new_vrrp_port_db['id'])
self.assertEqual(
updated_vrrp_port_db['device_owner'], DEVICE_OWNER_COMPUTE)
self.assertEqual(
updated_vrrp_port_db[portbindings.HOST_ID], HOST1)
def test_update_vm_port_host_router_update(self):
# register l3 agents in dvr mode in addition to existing dvr_snat agent
HOST1 = 'host1'
helpers.register_l3_agent(
host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR)
HOST2 = 'host2'
helpers.register_l3_agent(
host=HOST2, agent_mode=constants.L3_AGENT_MODE_DVR)
router = self._create_router()
with self.subnet() as subnet:
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': subnet['subnet']['id']})
with mock.patch.object(self.l3_plugin,
'_l3_rpc_notifier') as l3_notifier,\
self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE) as port:
self.l3_plugin.agent_notifiers[
constants.AGENT_TYPE_L3] = l3_notifier
self.core_plugin.update_port(
self.context, port['port']['id'],
{'port': {portbindings.HOST_ID: HOST1}})
l3_notifier.routers_updated_on_host.assert_called_once_with(
self.context, {router['id']}, HOST1)
self.assertFalse(l3_notifier.routers_updated.called)
# updating port's host (instance migration)
l3_notifier.reset_mock()
self.core_plugin.update_port(
self.context, port['port']['id'],
{'port': {portbindings.HOST_ID: HOST2}})
l3_notifier.routers_updated_on_host.assert_called_once_with(
self.context, {router['id']}, HOST2)
l3_notifier.router_removed_from_agent.assert_called_once_with(
mock.ANY, router['id'], HOST1)
def _test_router_remove_from_agent_on_vm_port_deletion(
self, non_admin_port=False):
# register l3 agent in dvr mode in addition to existing dvr_snat agent
HOST = 'host1'
non_admin_tenant = 'tenant1'
helpers.register_l3_agent(
host=HOST, agent_mode=constants.L3_AGENT_MODE_DVR)
router = self._create_router()
with self.network(shared=True) as net,\
self.subnet(network=net) as subnet,\
self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
tenant_id=non_admin_tenant,
set_context=non_admin_port) as port:
self.core_plugin.update_port(
self.context, port['port']['id'],
{'port': {portbindings.HOST_ID: HOST}})
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': subnet['subnet']['id']})
with mock.patch.object(self.l3_plugin.l3_rpc_notifier,
'router_removed_from_agent') as remove_mock:
ctx = context.Context(
'', non_admin_tenant) if non_admin_port else self.context
self._delete('ports', port['port']['id'], neutron_context=ctx)
remove_mock.assert_called_once_with(
mock.ANY, router['id'], HOST)
def test_router_remove_from_agent_on_vm_port_deletion(self):
self._test_router_remove_from_agent_on_vm_port_deletion()
def test_admin_router_remove_from_agent_on_vm_port_deletion(self):
self._test_router_remove_from_agent_on_vm_port_deletion(
non_admin_port=True)
def test_dvr_router_notifications_for_live_migration_with_fip(self):
self._dvr_router_notifications_for_live_migration(
with_floatingip=True)
def test_dvr_router_notifications_for_live_migration_without_fip(self):
self._dvr_router_notifications_for_live_migration()
def _dvr_router_notifications_for_live_migration(
self, with_floatingip=False):
"""Check the router notifications go to the right hosts
with live migration without hostbinding on the port.
"""
# register l3 agents in dvr mode in addition to existing dvr_snat agent
HOST1, HOST2 = 'host1', 'host2'
for host in [HOST1, HOST2]:
helpers.register_l3_agent(
host=host, agent_mode=constants.L3_AGENT_MODE_DVR)
router = self._create_router()
arg_list = (portbindings.HOST_ID,)
with self.subnet() as ext_subnet,\
self.subnet(cidr='20.0.0.0/24') as subnet1,\
self.port(subnet=subnet1,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST1}) as vm_port:
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
{'network': {external_net.EXTERNAL: True}})
# add external gateway to router
self.l3_plugin.update_router(
self.context, router['id'],
{'router': {
'external_gateway_info': {'network_id': ext_net_id}}})
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': subnet1['subnet']['id']})
if with_floatingip:
floating_ip = {'floating_network_id': ext_net_id,
'router_id': router['id'],
'port_id': vm_port['port']['id'],
'tenant_id': vm_port['port']['tenant_id'],
'dns_name': '', 'dns_domain': ''}
floating_ip = self.l3_plugin.create_floatingip(
self.context, {'floatingip': floating_ip})
with mock.patch.object(self.l3_plugin,
'_l3_rpc_notifier') as l3_notifier,\
mock.patch.object(
self.l3_plugin,
'create_fip_agent_gw_port_if_not_exists'
) as fip_agent:
live_migration_port_profile = {
'migrating_to': HOST2
}
# Update the VM Port with Migration porbinding Profile.
# With this change, it should trigger a notification to
# the Destination host to create a Router ahead of time
# before the VM Port binding has changed to HOST2.
updated_port = self.core_plugin.update_port(
self.context, vm_port['port']['id'],
{'port': {
portbindings.PROFILE: live_migration_port_profile}})
l3_notifier.routers_updated_on_host.assert_called_once_with(
self.context, {router['id']}, HOST2)
# Check the port-binding is still with the old HOST1, but
# the router update notification has been sent to the new
# host 'HOST2' based on the live migration profile change.
self.assertEqual(updated_port[portbindings.HOST_ID], HOST1)
self.assertNotEqual(updated_port[portbindings.HOST_ID], HOST2)
if with_floatingip:
fip_agent.return_value = True
# Since we have already created the floatingip for the
# port, it should be creating the floatingip agent gw
# port for the new host if it does not exist.
fip_agent.assert_called_once_with(
mock.ANY, floating_ip['floating_network_id'], HOST2)
def test_router_notifications(self):
"""Check that notifications go to the right hosts in different
conditions
"""
# register l3 agents in dvr mode in addition to existing dvr_snat agent
HOST1, HOST2, HOST3 = 'host1', 'host2', 'host3'
for host in [HOST1, HOST2, HOST3]:
helpers.register_l3_agent(
host=host, agent_mode=constants.L3_AGENT_MODE_DVR)
router = self._create_router()
arg_list = (portbindings.HOST_ID,)
with self.subnet() as ext_subnet,\
self.subnet(cidr='20.0.0.0/24') as subnet1,\
self.subnet(cidr='30.0.0.0/24') as subnet2,\
self.subnet(cidr='40.0.0.0/24') as subnet3,\
self.port(subnet=subnet1,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST1}),\
self.port(subnet=subnet2,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST2}),\
self.port(subnet=subnet3,
device_owner=constants.DEVICE_OWNER_NEUTRON_PREFIX,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST3}):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
{'network': {external_net.EXTERNAL: True}})
with mock.patch.object(self.l3_plugin.l3_rpc_notifier.client,
'prepare') as mock_prepare:
# add external gateway to router
self.l3_plugin.update_router(
self.context, router['id'],
{'router': {
'external_gateway_info': {'network_id': ext_net_id}}})
# router has no interfaces so notification goes
# to only dvr_snat agent
mock_prepare.assert_called_once_with(
server=self.l3_agent['host'],
topic=topics.L3_AGENT,
version='1.1')
mock_prepare.reset_mock()
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': subnet1['subnet']['id']})
self.assertEqual(2, mock_prepare.call_count)
expected = [mock.call(server=self.l3_agent['host'],
topic=topics.L3_AGENT,
version='1.1'),
mock.call(server=HOST1,
topic=topics.L3_AGENT,
version='1.1')]
mock_prepare.assert_has_calls(expected, any_order=True)
mock_prepare.reset_mock()
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': subnet2['subnet']['id']})
self.assertEqual(3, mock_prepare.call_count)
expected = [mock.call(server=self.l3_agent['host'],
topic=topics.L3_AGENT,
version='1.1'),
mock.call(server=HOST1,
topic=topics.L3_AGENT,
version='1.1'),
mock.call(server=HOST2,
topic=topics.L3_AGENT,
version='1.1')]
mock_prepare.assert_has_calls(expected, any_order=True)
mock_prepare.reset_mock()
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': subnet3['subnet']['id']})
# there are no dvr serviceable ports on HOST3, so notification
# goes to the same hosts
self.assertEqual(3, mock_prepare.call_count)
expected = [mock.call(server=self.l3_agent['host'],
topic=topics.L3_AGENT,
version='1.1'),
mock.call(server=HOST1,
topic=topics.L3_AGENT,
version='1.1'),
mock.call(server=HOST2,
topic=topics.L3_AGENT,
version='1.1')]
mock_prepare.assert_has_calls(expected, any_order=True)
def test_router_is_not_removed_from_snat_agent_on_interface_removal(self):
"""Check that dvr router is not removed from l3 agent hosting
SNAT for it on router interface removal
"""
router = self._create_router()
kwargs = {'arg_list': (external_net.EXTERNAL,),
external_net.EXTERNAL: True}
with self.subnet() as subnet,\
self.network(**kwargs) as ext_net,\
self.subnet(network=ext_net, cidr='20.0.0.0/24'):
self.l3_plugin._update_router_gw_info(
self.context, router['id'],
{'network_id': ext_net['network']['id']})
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': subnet['subnet']['id']})
agents = self.l3_plugin.list_l3_agents_hosting_router(
self.context, router['id'])
self.assertEqual(1, len(agents['agents']))
with mock.patch.object(self.l3_plugin,
'_l3_rpc_notifier') as l3_notifier:
self.l3_plugin.remove_router_interface(
self.context, router['id'],
{'subnet_id': subnet['subnet']['id']})
agents = self.l3_plugin.list_l3_agents_hosting_router(
self.context, router['id'])
self.assertEqual(1, len(agents['agents']))
self.assertFalse(l3_notifier.router_removed_from_agent.called)
def test_router_is_not_removed_from_snat_agent_on_dhcp_port_deletion(self):
"""Check that dvr router is not removed from l3 agent hosting
SNAT for it on DHCP port removal
"""
router = self._create_router()
kwargs = {'arg_list': (external_net.EXTERNAL,),
external_net.EXTERNAL: True}
with self.network(**kwargs) as ext_net,\
self.subnet(network=ext_net),\
self.subnet(cidr='20.0.0.0/24') as subnet,\
self.port(subnet=subnet,
device_owner=constants.DEVICE_OWNER_DHCP) as port:
self.core_plugin.update_port(
self.context, port['port']['id'],
{'port': {'binding:host_id': self.l3_agent['host']}})
self.l3_plugin._update_router_gw_info(
self.context, router['id'],
{'network_id': ext_net['network']['id']})
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': subnet['subnet']['id']})
# router should be scheduled to the dvr_snat l3 agent
agents = self.l3_plugin.list_l3_agents_hosting_router(
self.context, router['id'])
self.assertEqual(1, len(agents['agents']))
self.assertEqual(self.l3_agent['id'], agents['agents'][0]['id'])
notifier = self.l3_plugin.agent_notifiers[
constants.AGENT_TYPE_L3]
with mock.patch.object(
notifier, 'router_removed_from_agent') as remove_mock:
self._delete('ports', port['port']['id'])
# now when port is deleted the router still has external
# gateway and should still be scheduled to the snat agent
agents = self.l3_plugin.list_l3_agents_hosting_router(
self.context, router['id'])
self.assertEqual(1, len(agents['agents']))
self.assertEqual(self.l3_agent['id'],
agents['agents'][0]['id'])
self.assertFalse(remove_mock.called)
def test__get_dvr_subnet_ids_on_host_query(self):
with self.subnet(cidr='20.0.0.0/24') as subnet1,\
self.subnet(cidr='30.0.0.0/24') as subnet2,\
self.subnet(cidr='40.0.0.0/24') as subnet3,\
self.port(subnet=subnet1,
device_owner=DEVICE_OWNER_COMPUTE) as p1,\
self.port(subnet=subnet2,
device_owner=constants.DEVICE_OWNER_DHCP) as p2,\
self.port(subnet=subnet3,
device_owner=constants.DEVICE_OWNER_NEUTRON_PREFIX)\
as p3,\
self.port(subnet=subnet3,
device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX)\
as p4:
host = 'host1'
subnet_ids = [item[0] for item in
self.l3_plugin._get_dvr_subnet_ids_on_host_query(
self.context, host)]
self.assertEqual([], subnet_ids)
self.core_plugin.update_port(
self.context, p1['port']['id'],
{'port': {portbindings.HOST_ID: host}})
expected = {subnet1['subnet']['id']}
subnet_ids = [item[0] for item in
self.l3_plugin._get_dvr_subnet_ids_on_host_query(
self.context, host)]
self.assertEqual(expected, set(subnet_ids))
self.core_plugin.update_port(
self.context, p2['port']['id'],
{'port': {portbindings.HOST_ID: host}})
expected.add(subnet2['subnet']['id'])
subnet_ids = [item[0] for item in
self.l3_plugin._get_dvr_subnet_ids_on_host_query(
self.context, host)]
self.assertEqual(expected, set(subnet_ids))
self.core_plugin.update_port(
self.context, p3['port']['id'],
{'port': {portbindings.HOST_ID: host}})
# p3 is non dvr serviceable so no subnet3 expected
subnet_ids = [item[0] for item in
self.l3_plugin._get_dvr_subnet_ids_on_host_query(
self.context, host)]
self.assertEqual(expected, set(subnet_ids))
other_host = 'other' + host
self.core_plugin.update_port(
self.context, p4['port']['id'],
{'port': {portbindings.HOST_ID: other_host}})
# p4 is on other host so no subnet3 expected
subnet_ids = [item[0] for item in
self.l3_plugin._get_dvr_subnet_ids_on_host_query(
self.context, host)]
self.assertEqual(expected, set(subnet_ids))
self.core_plugin.update_port(
self.context, p4['port']['id'],
{'port': {portbindings.HOST_ID: host}})
# finally p4 is on the right host so subnet3 is expected
expected.add(subnet3['subnet']['id'])
subnet_ids = [item[0] for item in
self.l3_plugin._get_dvr_subnet_ids_on_host_query(
self.context, host)]
self.assertEqual(expected, set(subnet_ids))
def test__get_dvr_router_ids_for_host(self):
router1 = self._create_router()
router2 = self._create_router()
host = 'host1'
arg_list = (portbindings.HOST_ID,)
with self.subnet(cidr='20.0.0.0/24') as subnet1,\
self.subnet(cidr='30.0.0.0/24') as subnet2,\
self.port(subnet=subnet1,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: host}),\
self.port(subnet=subnet2,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: host}):
router_ids = self.l3_plugin._get_dvr_router_ids_for_host(
self.context, host)
self.assertEqual([], router_ids)
self.l3_plugin.add_router_interface(
self.context, router1['id'],
{'subnet_id': subnet1['subnet']['id']})
router_ids = self.l3_plugin._get_dvr_router_ids_for_host(
self.context, host)
expected = {router1['id']}
self.assertEqual(expected, set(router_ids))
self.l3_plugin.add_router_interface(
self.context, router2['id'],
{'subnet_id': subnet2['subnet']['id']})
router_ids = self.l3_plugin._get_dvr_router_ids_for_host(
self.context, host)
expected.add(router2['id'])
self.assertEqual(expected, set(router_ids))
def test__get_router_ids_for_agent(self):
router1 = self._create_router()
router2 = self._create_router()
router3 = self._create_router()
arg_list = (portbindings.HOST_ID,)
host = self.l3_agent['host']
with self.subnet() as ext_subnet,\
self.subnet(cidr='20.0.0.0/24') as subnet1,\
self.subnet(cidr='30.0.0.0/24') as subnet2,\
self.port(subnet=subnet1,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: host}),\
self.port(subnet=subnet2,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: host}):
ids = self.l3_plugin._get_router_ids_for_agent(
self.context, self.l3_agent, [])
self.assertEqual([], ids)
ids = self.l3_plugin._get_router_ids_for_agent(
self.context, self.l3_agent, [router1['id'], router2['id']])
self.assertEqual([], ids)
self.l3_plugin.add_router_interface(
self.context, router1['id'],
{'subnet_id': subnet1['subnet']['id']})
ids = self.l3_plugin._get_router_ids_for_agent(
self.context, self.l3_agent, [])
self.assertEqual([router1['id']], ids)
ids = self.l3_plugin._get_router_ids_for_agent(
self.context, self.l3_agent, [router1['id']])
self.assertEqual([router1['id']], ids)
ids = self.l3_plugin._get_router_ids_for_agent(
self.context, self.l3_agent, [router1['id'], router2['id']])
self.assertEqual([router1['id']], ids)
ids = self.l3_plugin._get_router_ids_for_agent(
self.context, self.l3_agent, [router2['id']])
self.assertEqual([], ids)
self.l3_plugin.add_router_interface(
self.context, router2['id'],
{'subnet_id': subnet2['subnet']['id']})
ids = self.l3_plugin._get_router_ids_for_agent(
self.context, self.l3_agent, [])
self.assertEqual({router1['id'], router2['id']}, set(ids))
ids = self.l3_plugin._get_router_ids_for_agent(
self.context, self.l3_agent, [router1['id']])
self.assertEqual([router1['id']], ids)
ids = self.l3_plugin._get_router_ids_for_agent(
self.context, self.l3_agent, [router1['id'], router2['id']])
self.assertEqual({router1['id'], router2['id']}, set(ids))
ids = self.l3_plugin._get_router_ids_for_agent(
self.context, self.l3_agent, [router2['id']])
self.assertEqual([router2['id']], ids)
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
{'network': {external_net.EXTERNAL: True}})
# add external gateway to router
self.l3_plugin.update_router(
self.context, router3['id'],
{'router': {
'external_gateway_info': {'network_id': ext_net_id}}})
ids = self.l3_plugin._get_router_ids_for_agent(
self.context, self.l3_agent, [])
self.assertEqual({router1['id'], router2['id'], router3['id']},
set(ids))
ids = self.l3_plugin._get_router_ids_for_agent(
self.context, self.l3_agent, [router3['id']])
self.assertEqual([router3['id']], ids)
ids = self.l3_plugin._get_router_ids_for_agent(
self.context, self.l3_agent, [router1['id'], router3['id']])
self.assertEqual({router1['id'], router3['id']}, set(ids))
def test_remove_router_interface(self):
HOST1 = 'host1'
helpers.register_l3_agent(
host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR)
router = self._create_router()
arg_list = (portbindings.HOST_ID,)
with self.subnet() as subnet,\
self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST1}):
l3_notifier = mock.Mock()
self.l3_plugin.l3_rpc_notifier = l3_notifier
self.l3_plugin.agent_notifiers[
constants.AGENT_TYPE_L3] = l3_notifier
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': subnet['subnet']['id']})
self.l3_plugin.schedule_router(self.context, router['id'])
self.l3_plugin.remove_router_interface(
self.context, router['id'],
{'subnet_id': subnet['subnet']['id']})
l3_notifier.router_removed_from_agent.assert_called_once_with(
self.context, router['id'], HOST1)
def test_router_auto_scheduling(self):
router = self._create_router()
agents = self.l3_plugin.list_l3_agents_hosting_router(
self.context, router['id'])
# router is not scheduled yet
self.assertEqual([], agents['agents'])
l3_rpc_handler = l3_rpc.L3RpcCallback()
# router should be auto scheduled once l3 agent requests router ids
l3_rpc_handler.get_router_ids(self.context, self.l3_agent['host'])
agents = self.l3_plugin.list_l3_agents_hosting_router(
self.context, router['id'])
self.assertEqual(1, len(agents['agents']))
self.assertEqual(self.l3_agent['id'], agents['agents'][0]['id'])
|
|
#!/bin/env python
#
#
# This file is part Protein Engineering Analysis Tool (PEAT)
# (C) Copyright Jens Erik Nielsen, University College Dublin 2003-
# All rights reserved
#
#
# Rewritten by D Farrell, Jan 2009
#
import cgi
import sys,os, string, types
os.environ['MPLCONFIGDIR']='/tmp'
from PEATDB.Base import PDatabase
from PEATDB.Record import PEATRecord
from PEATDB.PEATTables import PEATTableModel, PEATTable
from PEATDB.web import PEATWeb
from pKD import pKDInterface
class titdbWeb(PEATWeb):
"""Class providing a web interface for PEAT projects subclassed for titration_db"""
def __init__(self, server='localhost', project='test', port=8080,
user=None, passwd=None,
bindir='', fullpath=''):
"""bindir : path to cgi scripts in server address, usually 'titration_db'
fullpath : file system path to script folder """
import socket
self.host = socket.getfqdn(socket.gethostname())
self.sessionkey=None
self.form=cgi.FieldStorage()
self.server = server
self.project = project
self.user = user
self.password = passwd
self.port = port
self.bindir = bindir
dirname = os.path.basename(fullpath)
self.imgdir = '/' + dirname + '/images'
self.plotsdir = '/'+ dirname + '/plots'
self.imagepath = fullpath + '/plots'
action = self.form.getfirst('action')
self.action = action
if not self.form.getfirst('action'):
self.show_intro()
return
elif action == 'show_intro':
self.show_intro()
elif action == 'show_help':
self.show_help()
elif action == 'summary':
self.showSummary()
elif action == 'analysis':
self.showAnalysis()
elif action == 'show_all':
self.show_all()
elif action=='show_specific':
self.show_specific()
elif action == 'show_datasets':
self.show_datasets()
elif action == 'search':
self.show_search_results()
elif action == 'downloads':
self.showDownloads()
elif action == 'selectpKD':
self.selectpKD()
elif action == 'showpKD':
self.showpKD()
return
def show_intro(self):
'''Show intro page'''
self.showHeader(menu=1)
print '<div class="main">'
print '<right><img src="%s/intro_image.png" width=250 class="align-right"></a></right>' %self.imgdir
print '<h1><a>Welcome to the protein NMR pH titration database.</h1></a>'
print 'NMR-monitored pH titration experiments are routinely used to measure\
site-specific protein pKa values. Accurate experimental pKa values are\
essential in dissecting enzyme catalysis, in studying the pH-dependence of protein\
stability and ligand binding and ultimately in understanding electrostatic effects in proteins.\
It is therefore important to retain the raw NMR spectroscopic data to allow for\
possible reinterpretation.'
print '<br>'
print 'This web interface provides access to a database of experimental NMR pH titration\
curves obtained largely from published sources.'
print '<br><br>'
print '<a>It is designed to provide the following services:'
print '<UL>\
<LI>Browse contents of the DB in one table\
<LI>Search for curves based on residue name, pka values, residue type etc.\
<LI>Visualise and overlay any combination of curves (and our fits)\
<LI>View some summary statistics on the current dataset\
<LI>Download the raw data as csv/text files and refit/analyse\
</UL>'
print 'We are continually trying to expand the dataset and rely on researchers to voluntarily provide \
us with their data. If you wish to submit data to be included here, please send the \
tabulated data in any convenient text format (see help page) by e-mail to us at \
<a href="mailto:[email protected]">[email protected]</a><p>'
print '<center><img src="%s/banner_icon.png"></center>' %self.imgdir
print '</div>'
self.footer()
return
def show_help(self):
'''Show help page'''
self.showHeader(menu=1)
print '<div class="main">'
print '<h2>The primary help information for using these pages is available <a href="http://enzyme.engr.ccny.cuny.edu/wiki/index.php/TitrationDB"> here</a></h2>'
print '<h2>Performing searches:</h2>'
print '<UL>\
<LI>Searching by protein name - these can be seperated by spaces - you may match any or all the words in the search phrase, by choosing in the match drop-down list\
<LI>Searching by pka value - enter values as ranges, eg. 3-6 or sets of ranges separated by spaces, eg. 1-2 3-4\
<LI>Searching by residue - simply enter names of each residue, eg. GLU. Or names separated by spaces, e.g. 45 34 67 will simply return any residues with those numbers in them\
<LI>searches are not case sensitive\
<LI>Searching/filtering by nucleus - choose one of the three nuclei from the drop down menu, if required\
</UL>'
print '<h3>Please report any bugs or requests for improvements to <a href="mailto:[email protected]">[email protected]</a><br><br>'
print 'You may cite this database using the following reference:</h3>'
print '<b>Farrell, D., et al., Titration_DB: Storage and analysis of NMR-monitored protein pH titration curves.<br>'
print 'Proteins: Structure, Function, and Bioinformatics, 2010 Mar;78(4):843-57</b>'
print '<a href="http://www3.interscience.wiley.com/journal/122593997/abstract"> link </a>'
print '</div>'
self.footer()
return
def showHeader(self,title=None,menu=None):
"""Show page header"""
imgdir = self.imgdir
#html_header = 'Content-Type: text/html; charset=utf-8\n'
#html_header += '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/loose.dtd">\n\n'
print "Content-type: text/html; charset=utf-8\n"
print '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">'
print '<html>'
print '<head>'
if title==None:
print '<title>Protein Titration Database</title>'
else:
print '<title>%s</title>' % title
print '<link href="%s/styles.css" rel="stylesheet" type="text/css" />' %self.bindir
print '<link rel="shortcut icon" href="%s/favicon.ico" type="image/x-icon" />' %self.bindir
print '<script type="text/javascript" src="%s/scripts/checkbox.js"></script>' %self.bindir
print '</head>'
print '<body>'
print '<div class="header">'
print '<a href="http://enzyme.engr.ccny.cuny.edu/wiki/index.php/TitrationDB">Help</a>'
print '<a href="mailto:[email protected]">Contact</a>'
print '</div>'
print '<script type="text/javascript" src="%s/scripts/boxover.js"></script>' %self.bindir
if menu==1:
self.menu()
return
def footer(self):
"""Print page footer"""
print '<br>'
print '<p><center>Provided by <a href="http://enzyme.ucd.ie">Nielsen Research Group at UCD</a></center></p>'
print '<p><center>Supported by <a href="http://www.sfi.ie">Science Foundation Ireland</a></center></p>'
print '<center><a href="http://www.sfi.ie"><img src="%s/sfi_logo.png" width=200 border="0"></a></center></p>' %self.imgdir
print '</div>'
print '</div>'
print '</body>'
return
def showFormButton(self, action, label, helptext=None):
"""Add a button"""
bindir = self.bindir
print '<form action="%s/main.cgi" METHOD="POST" ENCTYPE="multipart/form-data">' %bindir
self.write_sessionkey(action)
print '<tr><td class="menu">'
print '<input type=submit value="%s" name=submit class="btn"' %label
if helptext != None:
print 'title="header=[%s] body=[%s]"></td></tr>' %(label,helptext)
else:
print '></td></tr>'
print '</form>'
print
return
def menu(self):
"""Print the menu"""
bindir = self.bindir
print '<div class="menu">'
print '<table id="menu" valign=top align=left>'
print '<td class="menu"><b><img src="%s/titDB_logo.png" width="230"></b></td>' %self.imgdir
print
self.showFormButton('show_intro', 'Home')
self.showFormButton('show_all', 'Browse Records', 'Browse all records in the DB')
self.showFormButton('summary', 'Summary','Summary statistics')
self.showFormButton('analysis', 'Analysis')
self.showFormButton('downloads', 'Downloads', 'Export data in text format')
self.showFormButton('selectpKD', 'pKD','Cross reference curves with calculated data from pKD server')
self.showFormButton('show_help', 'Help')
searchmessage = 'Enter your search here'
print '<tr><td class="menu"><a>SEARCH OPTIONS</td><a></tr>'
print '<form action="%s/main.cgi" METHOD="POST" ENCTYPE="multipart/form-data">' %bindir
self.write_sessionkey('search')
#protein search box
print '<tr><td class="menu">'
print 'Protein: '
print '<select name="proteinmatchmethod" class="btn">'
print '<option value="or">Any words'
print '<option value="and">All words'
print '</select>'
print '</td></tr>'
print '<td class="menu">'
print """<input type="text" name="words" size=22 maxlength=50 value="" class="btn1"\
title="header=[Protein search] body=[Enter multiple names seperated by a space.\
This search is not case sensitive]"></td></tr>"""
#residue search box
print '<tr><td class="menu">'
print 'Residue: '
print '<tr><td class="menu">'
print """<input type="text" name="residue" size=22 maxlength=40 value="" class="btn1"\
title="header=[Residue type] body=[Use three letter codes,\
separated by a space e.g. GLU ASP LYS]"></td></tr>"""
#pka search box
print """<tr><td class="menu"> pKa range: <input type="text" name="pka" size=22 \
maxlength=30 value="" class="btn1" title="header=[pKa range] body=[Enter a range of values using \
this format: e.g. 5-6]"></td></tr>"""
#nucleus search box
print '<tr><td class="menu">'
print 'Nucleus: <select name="nucleus" class="btn">'
print '<option value="any">Any'
print '<option value="1H">1H'
print '<option value="15N">15N'
print '<option value="13C">13C'
print '</select>'
print '</td></tr>'
#drop list for whether to match any or all of the searches from each box
print '<tr><td class="menu">'
print """Match: <select name="matchmethod" class="btn" title="header=[Global match method] \
body=[Match records with ALL of the search criteria or only those with ANY those\
attributes]">"""
print '<option value="and">All'
print '<option value="or">Any'
print '</select> <p>'
print '</td></tr>'
print '</td></tr>'
print '<tr><td class="menu"><input type="submit" value="Search Now" class="btn"></td></tr>'
print '</form>'
print '</table>'
print '</div>'
print '<div id="content">'
return
def showSummary(self):
from PEATDB.Ekin.Titration import TitrationAnalyser
self.showHeader(menu=1)
DB = self.DB = self.connect()
sys.stdout.flush()
t = TitrationAnalyser()
ekindata = t.getEkinDicts(DB)
print '<div class="main">'
t.dotitDBStats(ekindata)
print '</div>'
return
def showAnalysis(self):
"""Analysis of current pKas"""
from PEATDB.Ekin.Titration import TitrationAnalyser
self.showHeader(menu=1)
DB = self.DB = self.connect()
t = TitrationAnalyser()
print '<div class="main">'
print '<p>Selected plots below reflect some of the analysis shown in the \
<a href="%s/paper_2010.pdf"> original paper</a> updated for the current dataset. </p>' %self.bindir
print '<a>The distributions shown are of the change in chemical shift over all\
detected titrations. `Reliable` pKas are those associated with\
the largest chemical shift changes in a titration curve and that meets the criteria defined in\
the paper. We define primary pKa values simply as the subset of the reliable pKa values \
that originate from titration curves with with only one titration.</a>'
sys.stdout.flush()
colnames = ['1H NMR','15N NMR','13C NMR']
for col in colnames:
p = t.extractpKas(DB,col,silent=True,minspan=0.06)
print '<div>'
print "<h2>%s: Distribution of Δδ for fitted pKa values</h2>" %col
img1 = t.analysepKas(p, silent=True, prefix=col, path=self.imagepath)
#t.makepKasTable(p)
print '<img src="%s/%s" align=center width=800 class="plot">' %(self.plotsdir, img1)
print '</div>'
sys.stdout.flush()
#compare nuclei
img2, img3 = t.compareNuclei(DB, '15N NMR', '1H NMR', titratable=False, silent=True, path=self.imagepath)
print '<p>Below is an analysis of the correspondence between fitted pKas for 1H and 15N \
where they are available for the same residue in the same protein. This is the same\
plot as figure 4 in the original paper updated for the current dataset.\
The plots are divided into reliable and other pKas for comparison.</p>'
print '<div>'
print '<center><img src="%s/%s" align=center width=600 class="plot"></center>' %(self.plotsdir, img2)
print '</div>'
print '<p>The same plot as above broken down by residue type and shown only for titratable\
residues.</p>'
print '<div>'
print '<center><img src="%s/%s" align=center width=600 class="plot"></center>' %(self.plotsdir, img3)
print '</div>'
self.footer()
return
def showDownloads(self):
"""Downloads links"""
self.showHeader(menu=1)
print '<div class="main">'
print '<h2>Downloads</h2>'
print '<h3>The entire dataset may be downloaded in text format as a single zip file.</h3>'
print
#print '<p>Exporting current data, please wait a moment...</p>'
sys.stdout.flush()
DB = self.DB = self.connect()
from PEATDB.Ekin.Titration import TitrationAnalyser
t = TitrationAnalyser()
filename = t.exportAll(DB)
print '<h2><a href="%s/%s"> download zip</a></h2>' %(self.bindir,os.path.basename(filename))
print '</div>'
self.footer()
return
def do_search(self, globalop, proteinop, proteins, residues, nucleus, pka):
"""Do searches for the various parameters, name, pka etc"""
import re, urllib
from PEATDB.PEATTables import PEATTableModel
import PEATDB.Ekin.Fitting as Fitting
from PEATDB.Ekin.Web import EkinWeb
DB = self.DB
EW = EkinWeb()
ekincols = DB.ekintypes
found=[]
protlist = proteins.split(' ')
residuelist = residues.split(' ')
def createPhrase(items, op):
if op == 'or':
logicsymbol = '|'
else:
logicsymbol = '+'
itemlist = items.split(' ')
phrase =''
c=1
for i in itemlist:
if c == len(itemlist):
phrase = phrase + i
else:
phrase = phrase + i + logicsymbol
c=c+1
return re.compile(phrase, re.IGNORECASE)
#if there is a protein name entered, get the list of proteins/records to use
#otherwise use all records to search for the other keys
names = []
keywords = {}
for p in DB.getRecs():
name = DB[p].name
names.append((name,p))
if hasattr(DB[p], 'keywords'):
keywords[name] = DB[p].keywords
else:
keywords[name] = ''
names.sort()
#create search expressions
s_protein = createPhrase(proteins, proteinop)
s_residue = createPhrase(residues, 'or')
pkarange = pka.split('-')
#do search
for name in names:
proteinname = name[0]
protein = name[1]
if s_protein.search(proteinname) or s_protein.search(keywords[proteinname]):
found.append(protein)
if len(found)==0:
print '<h2>Sorry, no proteins found that match this name. <h2>'
return
elif residues == '' and pka == '':
self.show_DB(selected=found)
return found
#now search for the other keys inside selected proteins if needed
ekinfound={}; foundfits={}
ignore =['__fit_matches__', '__Exp_Meta_Dat__', '__datatabs_fits__']
kys = list(DB.getRecs())
kys.sort()
if len(found) == 0 or globalop == 'or':
found = DB.getRecs()
print '<table id="mytable" cellspacing=0 align=center>'
'''search recs for residue and pkarange
and add dataset names to new ekinfound dict'''
for protein in found:
for col in DB[protein].keys():
if nucleus != 'any':
if nucleus not in col:
continue
E = DB[protein][col]
if DB['userfields'].has_key(col):
fieldtype=DB['userfields'][col]['field_type']
else:
fieldtype=None
if fieldtype in ekincols:
dk = E.datasets
fits = E.__datatabs_fits__
for k in dk:
meta = E.getMetaData(k)
try:
thisres = meta['residue']
except:
thisres = k
if thisres == None:
thisres = k
if residues != '':
if s_residue.search(thisres) and not k in ignore:
if not ekinfound.has_key(protein+'$'+col):
ekinfound[protein+'$'+col]=[]
ekinfound[protein+'$'+col].append(k)
if pka != '':
foundpka = 0
if k in fits.keys():
if fits[k] == None:
continue
if fits[k].has_key('model'):
model = fits[k]['model']
else:
continue
if not foundfits.has_key(protein+'$'+col):
foundfits[protein+'$'+col]={}
X = Fitting.getFitter(model)
pnames = X.varnames
i=0
#check if first num is larger, then swap
try:
pk1=float(pkarange[0])
pk2=float(pkarange[1])
except:
print '<h2> Error: pka values are not valid </h2>'
return
if pk1 > pk2:
tmp = pk1
pk1 = pk2
pk2 = tmp
#iterate thru parameter names and match any pK fields
#this code is not that efficient!
for p in pnames:
if 'pK' in p:
pka = fits[k][i]
if pka >= pk1 and pka <= pk2:
foundpka=1
i=i+1
#if match is 'ANY', just append dataset if not there already
#also for case if no residue value entered
if globalop == 'or' or residues == '':
if foundpka == 1:
if not ekinfound.has_key(protein+'$'+col):
ekinfound[protein+'$'+col]=[]
if not k in ekinfound[protein+'$'+col]:
ekinfound[protein+'$'+col].append(k)
#if match is 'ALL', need to check dataset already found
#and if no pka found, remove it. if both are true keep it
elif globalop == 'and':
if foundpka == 0:
if ekinfound.has_key(protein+'$'+col) and k in ekinfound[protein+'$'+col]:
#print 'removing', protein, col
ekinfound[protein+'$'+col].remove(k)
foundfits[protein+'$'+col][k]=fits[k]
#check for empty fields in ekinfound dict and delete them..
for d in ekinfound.keys():
if len(ekinfound[d])==0:
del(ekinfound[d])
#if no results, just say that and return
if len(ekinfound)==0:
print '<h2> Sorry, no records found that match these parameters. </h2>'
return
#top display button and options
print '<form name="resultsform" action="%s/main.cgi" METHOD="POST" ENCTYPE="multipart/form-data">' %self.bindir
self.write_sessionkey('show_datasets')
print '<td valign=top align=right colspan=3> <input type=submit value="display selected" name=submit>'
print '<label><input type=checkbox name="plotoption" value=3>single graph</label>'
print '<label><input type=checkbox name="normalise" value=1>normalise</label>'
print '<label><input type=checkbox name="logx" value=1>log-x</label>'
print '<label><input type=checkbox name="logy" value=1>log-y</label>'
print '<label><input type=checkbox name="legend" value=1>legend</label>'
print '<input type=button value="Check All" onClick="checkAll(document.resultsform.residue)">'
print '<input type=button value="Uncheck All" onClick="uncheckAll(document.resultsform.residue)"></td>'
print '</tr>'
#header
cols=['protein','column','residues']
for c in cols:
print '<th>'+c+'</th>'
print '</tr>'
ekys = ekinfound.keys()
ekys.sort()
r=1
for k in ekys:
if r % 2 == 0:
cls = "spec"
else:
cls = ""
fields = k.split('$')
protein = fields[0]
column = fields[1]
proteinname = DB[protein]['name']
print '<tr>'
print '<th class="spec">%s</th> <td> %s </td>' %(proteinname, column)
print '<td>'
for residue in ekinfound[k]:
residuefield = k+"$"+residue
fithtml = ''
try:
print '<input type=checkbox id="residue" name="residue" value=%s>' %("'"+residuefield+"'")
except:
print 'UnicodeEncodeError'
continue
urlfields = urllib.urlencode({'login':self.user,'project':self.project,
'residue': residuefield,'action': 'show_datasets'})
print '<a href="/cgi-bin/titration_db/main.cgi?%s" target="_blank">%s</a>'\
% (urlfields, residue)
print '</tr>'
r=r+1
print '</form>'
print '</table>'
self.footer()
return
def show_search_results(self):
"""Display search results"""
self.showHeader(menu=1)
sys.stdout.flush()
self.DB = self.connect()
key=self.form.getfirst('key')
matchmethod=self.form.getfirst('matchmethod')
proteinmatchmethod=self.form.getfirst('proteinmatchmethod')
words=self.form.getfirst('words')
residue=self.form.getfirst('residue')
nucleus=self.form.getfirst('nucleus')
pka=self.form.getfirst('pka')
print '<table bgcolor=#CD9B9B border="1" bordercolor=black cellspacing=0 cellpadding=4 valign=top \
align=center width=80%><tr><td>'
print '<div align=left>'
print '<big><a>Search results for protein=%s %s residue=%s %s nucleus=%s pka=%s</big></a>'\
%(words,matchmethod,residue,matchmethod,nucleus,pka)
print '<br>'
print '</div>'
print '</table>'
self.do_search(matchmethod, proteinmatchmethod, words, residue, nucleus, pka)
return
def selectpKD(self):
"""Allow user to select a protein and field for pKD"""
self.showHeader(menu=1)
sys.stdout.flush()
DB = self.DB = self.connect()
names = []
for p in DB.getRecs():
names.append((DB[p].name,p))
names.sort()
nucl = ['1H NMR', '15N NMR', '13C NMR']
#check pKD available calcs
P = pKDInterface()
pkdprots = [string.upper(e[0]) for e in P.getProteins()]
#add selection boxes
print '<div align=left>'
print '<form name="pkdform" action="%s/main.cgi" METHOD="POST" ENCTYPE="multipart/form-data">' %self.bindir
self.write_sessionkey('showpKD')
print '<big>Select experimental curves to compare with calculated curves in pKD</big><br>'
print '<a>This will show a set of plots for each residue where there is an experimental and calculated curve</a><p>'
print 'Choose the protein and nucleus:<p>'
print '<select name="protein" class="btn">'
for n in names:
print '<option value="%s">%s</option>' %(n[1],n[0])
print '</select>'
print '<select name="column" class="btn">'
for c in nucl:
print '<option value="%s">%s</option>' %(c,c)
print '</select>'
print '<p>'
#button
print '<input type=submit value="Display Plots" name=submit class="btn">'
print '<label><input type=radio name="option" value=1 checked="yes">compare both</label>'
print '<label><input type=radio name="option" value=2>exp only</label>'
print '<label><input type=radio name="option" value=3>calcs only</label>'
print '</form>'
print '</div>'
print 'Below is a reference table showing which proteins in our DB currently have calculations in the pKD, with the PDB ID and PDB link.<br>'
print 'If the protein is not yet available, you can go to <a href=http://enzyme.ucd.ie/cgi-bin/pKD/server_start.cgi target="blank"> \
the pKD page</a> and submit the calculation.<p>'
#table of proteins and their PDBs
print '<div align=left>'
print '<table id="mytable" cellspacing="0">'
r=0
print '<th>Protein</th><th>PDB</th><th>Has pKD data</th>'
for name in names:
protein = name[1]
protname = name[0]
if not DB[protein].has_key('PDB_link'):
continue
pdbinfo = DB[protein].PDB_link
if pdbinfo == '':
continue
pdbid = pdbinfo['text']
pdblink = pdbinfo['link']
if r % 2 == 0:
cls = 'alt'
else:
cls = ''
print '<tr>'
print '<td class=%s> %s </td>' % (cls, protname)
print '<td class=%s> <a href=%s target="blank"> %s</a> </td>' % (cls, pdblink, pdbid)
if pdbid in pkdprots:
status = 'yes'
else:
status = 'no'
print '<td class=%s> <b>%s</b> </td>' %(cls, status)
print '</tr>'
r=r+1
print '</table>'
return
def showpKD(self):
"""Show pKD calcs with exp data for selected ekindata"""
protein = self.form.getfirst('protein')
col = self.form.getfirst('column')
showopt = self.form.getfirst('option')
self.showHeader(menu=1)
sys.stdout.flush()
DB = self.DB = self.connect()
protname = DB[protein]['name']
pdbid = DB[protein]['PDB_link']['text']
expdata = DB[protein][col]
if type(expdata) is types.StringType:
print '<a>No data for column %s, go back and choose another nucleus..</a>' %col
return
P = pKDInterface()
calcs, pkas, pdblines = P.loadfrompKD(pdbid)
print '<div><a>Protein: %s PDB: %s </a><br>' %(protname, pdbid)
print 'Column data: %s <p></div>' %col
if calcs == None or not type(calcs) is types.DictType:
print '<a>There are currently no pKD calculations for this protein.</a>'
return
else:
print '<a>Found data on pKD server. We can plot.</a>'
sys.stdout.flush()
#plot them
self.plotpKDCalcs(calcs, expdata, showopt)
self.footer()
return
def plotpKDCalcs(self, calcs, Ed=None, option=1):
"""Do pKD calcs with exp data plots"""
from PEATDB.Ekin.Web import EkinWeb
from PEATDB.Ekin.Base import EkinProject
from PEATDB.Ekin.Convert import EkinConvert
from PEATDB.Ekin.Titration import TitrationAnalyser
import PEATDB.Ekin.Utils as Utils
t = TitrationAnalyser()
c=calcs
EW = EkinWeb()
if option == '2':
print '<a>Just showing experimental data</a>'
EW.showEkinPlots(project=Ed, datasets='ALL',
path=self.plotsdir,
imgpath=self.imagepath)
return
#create ekin proj from pKD titcurves
Ec = EkinProject()
for r in c.keys():
xd=[];yd=[]
for i in c[r]:
if type(i) is types.StringType:
continue
xd.append(i)
yd.append(c[r][i])
edata=EkinConvert.xy2ekin([xd,yd])
Ec.insertDataset(edata, r)
print '<a>Please wait, fitting calcated curves...</a>'
sys.stdout.flush()
Ec.fitDatasets(models=['1 pKa 2 Chemical shifts'], silent=True)
if option == '3':
print '<a>Just showing pKD data</a>'
EW.showEkinPlots(project=Ec, datasets='ALL',
path=self.plotsdir,
imgpath=self.imagepath)
return
#transform exp data names to match pKD ones
s=':'
usechainid = True
#if pKD names have no chain id, we don't need one for exp names
if Ec.datasets[0].startswith(':'):
usechainid=False
for d in Ed.datasets[:]:
r = Ed.getMetaData(d)
if r != None:
if r['chain_id'] == None or usechainid == False:
chain = ''
else:
chain = r['chain_id']
new = chain+s+Utils.leadingZeros(r['res_num'],4)+s+r['residue']
if new in Ed.datasets:
atom = r['atom']
new = new + '_' + atom
Ed.renameDataset(d, new)
#now we overlay the same datasets in Ed and Ec
#also handles cases where same residue multiple times for diff atoms in exp data
for d in Ed.datasets:
if d in Ec.datasets:
Ep = EkinProject()
cdata = Ec.getDataset(d)
Ep.insertDataset(cdata, d+'_pKD')
Ep.setFitData(d+'_pKD', Ec.getFitData(d))
ddata = Ed.getDataset(d)
Ep.insertDataset(ddata, d+'_exp')
Ep.setFitData(d+'_exp', Ed.getFitData(d))
EW.showEkinPlots(project=Ep, datasets='ALL', plotoption=3,
normalise=True, legend=True,
path=self.plotsdir,
imgpath=self.imagepath)
return
if __name__ == '__main__':
PEATWeb()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import os
import serial
from time import sleep
from time import time
import sys
import json
import argparse
from ksp_console import *
import signal
#port = "COM4"
port = "/dev/ttyS6"
ser = ""
args = ""
class Telemetry:
def __init__(self, conn, args):
self.conn = conn
self.args = args
self.init_vessel()
def init_vessel(self):
self.altitude = 0
self.apoapsis = 999
self.speed = 0
self.sas = False
self.rcs = False
self.lights = False
self.gear = False
self.brakes = False
def add_orbit_to_status(self, status_updates):
status_updates[str(INFO_PERIAPSIS)] = "999"
status_updates[str(INFO_PERIAPSIS_TIME)] = "8s"
return status_updates
def add_landing_info(self, status_updates):
status_updates[str(INFO_SURFACE_HEIGHT)] = "124"
status_updates[str(INFO_SURFACE_TIME)] = "2s"
return status_updates
def add_data(self, status_updates):
self.altitude = self.altitude+1
self.speed = 22
if (int(time())%10) < 5:
self.button = 1
else:
self.button = 0
status_updates[str(INFO_HEIGHT)] = self.altitude
status_updates[str(INFO_SPEED)] = self.speed
status_updates[str(BUTTON_SAS)] = self.button
status_updates[str(BUTTON_RCS)] = self.button
status_updates[str(BUTTON_LIGHTS)] = self.button
status_updates[str(BUTTON_GEAR)] = self.button
status_updates[str(BUTTON_BREAKS)] = self.button
status_updates = self.add_orbit_to_status(status_updates)
status_updates = self.add_landing_info(status_updates)
return status_updates
def serial_read_line():
global ser
serial_data = ""
while True:
data = ser.read(1)
if len(data) > 0:
data = data.decode('iso8859-1')
if data=='\n':
return serial_data
serial_data += data
# auf -1 ... 1 normieren
def normiere_joystick(value):
if value>512:
value=512
if value<-512:
value=-512
value = float(value)
value = value/512.0
return value
def normiere_throttle(value):
if value<20:
return 0;
if value>890:
return 1;
return float(value)/1000.0;
def send_handshake():
send_data = {}
send_data["start"] = 2016
send_serial( CMD_INIT, send_data)
def send_serial( command, send_data):
global args
global ser
send_data["cmd"]=command;
data=json.dumps(send_data,separators=(',',':'))+"\n"
if args.debugsend:
print("sending %d bytes " % len(data))
print("send: "+data)
sys.stdout.flush()
data = data.encode('iso8859-1')
#got to send in 32 byte chunks to avoid loosing stuff
len_data = str(len(data))+":"
ser.write(len_data.encode('iso8859-1'))
while( len(data)>0 ):
send_pkt = data[:32]
data = data[32:]
ser.write(send_pkt)
ser.flush()
response = ""
while( len(response)!=2 ):
response += ser.read(1).decode('iso8859-1')
# print( "response: " + response)
# sys.stdout.flush()
if response != "OK":
print( "got the wrong ACK for the serial protocol: " +response )
def decode_json_array(arr):
res={}
print(len(arr))
for index in range( 0, len(arr), 2):
# if int(arr[index])>7:
res[arr[index]]=arr[index+1]
return res
def encode_json_array(arr):
res=[]
for element in arr:
res.append(int(element))
res.append(arr[element])
return res
def send_updates(status_updates):
send_data = encode_json_array(status_updates)
send_serial( CMD_UPDATE_CONSOLE, {"data":send_data})
status_updates={}
def work_on_json(input_data):
global args
global state
json_data = json.loads(input_data)
data = decode_json_array(json_data["data"])
if args.debugrecv:
if len(data)>0:
print( data )
sys.stdout.flush()
# main
def main_function():
global ser
global args
global state
global telemetry
last_chip_data = 0
parser = argparse.ArgumentParser()
parser.add_argument("--debugsend", help="print data sent to con", action="store_true")
parser.add_argument("--debugrecv", help="print some received from con", action="store_true")
parser.add_argument("--debugchip", help="print chip debug output", action="store_true")
args = parser.parse_args()
telemetry = Telemetry( "nix", args)
ref_time = datetime.datetime.now()
serial_connected = False
# run forever (until ctrl-c)
while True:
try:
if serial_connected == False:
print("trying serial")
ser = serial.Serial( port, 115200, timeout=2)
ser.reset_input_buffer()
ser.reset_output_buffer()
sleep(5)
print("sending handshake")
send_handshake()
print("serial OK")
serial_connected = True
except (serial.SerialException, ConnectionRefusedError):
print("serial failed")
serial_connected = False
pass
try:
if serial_connected:
while True:
# we have two types of commands
# 1) just requests data, there is a response
# 2) just send data to the arduino, there is no response
# so, after we have send one of type 1 we know we have
# to wait for a reply before we can send again
# after we send one of type 2) we don't know how long
# the processing in the arduino is going to take, so
# we always send after a command of type 2 one type 1
# command
now = datetime.datetime.now()
time_diff = now - ref_time
# this works command driven, so we send commands,
# wait for the reply and done
# every 1-2 seconds: send update to the arduino
#if (time_diff.seconds>1 or time_diff.microseconds>300000) and ser.out_waiting == 0:
if (time_diff.seconds>1 or time_diff.microseconds>100000):
status_updates = {}
status_updates = telemetry.add_data(status_updates)
send_updates(status_updates)
ref_time = now
# read the current status and button updates and so on
send_serial( CMD_GET_UPDATES, {})
serial_data=serial_read_line()
if args.debugrecv:
print("Got %d bytes of data\n" % (len(serial_data)))
print(serial_data)
sys.stdout.flush()
if args.debugchip:
try:
data = json.loads(serial_data)
if "chip" in data and data["chip"]!=last_chip_data:
print("Chip: "+str(data["chip"]))
last_chip_data = data["chip"]
sys.stdout.flush()
except ValueError:
print('Decoding JSON failed for: '+lines[0])
work_on_json(serial_data)
else:
# not everything connected, sleep and try again
print( "Connection missing: Serial:%s\n" %
("connected" if serial_connected else "disconnected"))
sleep(1)
except serial.SerialException:
serial_connected = False
pass
if __name__ == '__main__':
main_function()
|
|
# -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2006, Frank Scholz <[email protected]>
import os, stat
import tempfile
import shutil
import time
import re
from datetime import datetime
import urllib
from sets import Set
import mimetypes
mimetypes.init()
mimetypes.add_type('audio/x-m4a', '.m4a')
mimetypes.add_type('video/mp4', '.mp4')
mimetypes.add_type('video/mpegts', '.ts')
mimetypes.add_type('video/divx', '.divx')
from urlparse import urlsplit
from twisted.python.filepath import FilePath
from twisted.python import failure
from coherence.upnp.core.DIDLLite import classChooser, Container, Resource
from coherence.upnp.core.DIDLLite import DIDLElement
from coherence.upnp.core.DIDLLite import simple_dlna_tags
from coherence.upnp.core.soap_service import errorCode
from coherence.upnp.core import utils
try:
from coherence.extern.inotify import INotify
from coherence.extern.inotify import IN_CREATE, IN_DELETE, IN_MOVED_FROM, IN_MOVED_TO, IN_ISDIR
from coherence.extern.inotify import IN_CHANGED
haz_inotify = True
except Exception,msg:
haz_inotify = False
no_inotify_reason = msg
from coherence.extern.xdg import xdg_content
import coherence.extern.louie as louie
from coherence.backend import BackendItem, BackendStore
## Sorting helpers
NUMS = re.compile('([0-9]+)')
def _natural_key(s):
# strip the spaces
s = s.get_name().strip()
return [ part.isdigit() and int(part) or part.lower() for part in NUMS.split(s) ]
class FSItem(BackendItem):
logCategory = 'fs_item'
def __init__(self, object_id, parent, path, mimetype, urlbase, UPnPClass,update=False,store=None):
self.id = object_id
self.parent = parent
if parent:
parent.add_child(self,update=update)
if mimetype == 'root':
self.location = unicode(path)
else:
if mimetype == 'item' and path is None:
path = os.path.join(parent.get_realpath(),unicode(self.id))
#self.location = FilePath(unicode(path))
self.location = FilePath(path)
self.mimetype = mimetype
if urlbase[-1] != '/':
urlbase += '/'
self.url = urlbase + str(self.id)
self.store = store
if parent == None:
parent_id = -1
else:
parent_id = parent.get_id()
self.item = UPnPClass(object_id, parent_id, self.get_name())
if isinstance(self.item, Container):
self.item.childCount = 0
self.child_count = 0
self.children = []
self.sorted = False
if mimetype in ['directory','root']:
self.update_id = 0
self.get_url = lambda : self.url
self.get_path = lambda : None
#self.item.searchable = True
#self.item.searchClass = 'object'
if(isinstance(self.location,FilePath) and
self.location.isdir() == True):
self.check_for_cover_art()
if hasattr(self, 'cover'):
_,ext = os.path.splitext(self.cover)
""" add the cover image extension to help clients not reacting on
the mimetype """
self.item.albumArtURI = ''.join((urlbase,str(self.id),'?cover',ext))
else:
self.get_url = lambda : self.url
if self.mimetype.startswith('audio/'):
if hasattr(parent, 'cover'):
_,ext = os.path.splitext(parent.cover)
""" add the cover image extension to help clients not reacting on
the mimetype """
self.item.albumArtURI = ''.join((urlbase,str(self.id),'?cover',ext))
_,host_port,_,_,_ = urlsplit(urlbase)
if host_port.find(':') != -1:
host,port = tuple(host_port.split(':'))
else:
host = host_port
try:
size = self.location.getsize()
except:
size = 0
if self.store.server.coherence.config.get('transcoding', 'no') == 'yes':
if self.mimetype in ('application/ogg','audio/ogg',
'audio/x-wav',
'audio/x-m4a',
'application/x-flac'):
new_res = Resource(self.url+'/transcoded.mp3',
'http-get:*:%s:*' % 'audio/mpeg')
new_res.size = None
#self.item.res.append(new_res)
if mimetype != 'item':
res = Resource('file://'+ urllib.quote(self.get_path()), 'internal:%s:%s:*' % (host,self.mimetype))
res.size = size
self.item.res.append(res)
if mimetype != 'item':
res = Resource(self.url, 'http-get:*:%s:*' % self.mimetype)
else:
res = Resource(self.url, 'http-get:*:*:*')
res.size = size
self.item.res.append(res)
""" if this item is of type audio and we want to add a transcoding rule for it,
this is the way to do it:
create a new Resource object, at least a 'http-get'
and maybe an 'internal' one too
for transcoding to wav this looks like that
res = Resource(url_for_transcoded audio,
'http-get:*:audio/x-wav:%s'% ';'.join(['DLNA.ORG_PN=JPEG_TN']+simple_dlna_tags))
res.size = None
self.item.res.append(res)
"""
if self.store.server.coherence.config.get('transcoding', 'no') == 'yes':
if self.mimetype in ('audio/mpeg',
'application/ogg','audio/ogg',
'audio/x-wav',
'audio/x-m4a',
'application/x-flac'):
dlna_pn = 'DLNA.ORG_PN=LPCM'
dlna_tags = simple_dlna_tags[:]
#dlna_tags[1] = 'DLNA.ORG_OP=00'
dlna_tags[2] = 'DLNA.ORG_CI=1'
new_res = Resource(self.url+'?transcoded=lpcm',
'http-get:*:%s:%s' % ('audio/L16;rate=44100;channels=2', ';'.join([dlna_pn]+dlna_tags)))
new_res.size = None
#self.item.res.append(new_res)
if self.mimetype != 'audio/mpeg':
new_res = Resource(self.url+'?transcoded=mp3',
'http-get:*:%s:*' % 'audio/mpeg')
new_res.size = None
#self.item.res.append(new_res)
""" if this item is an image and we want to add a thumbnail for it
we have to follow these rules:
create a new Resource object, at least a 'http-get'
and maybe an 'internal' one too
for an JPG this looks like that
res = Resource(url_for_thumbnail,
'http-get:*:image/jpg:%s'% ';'.join(['DLNA.ORG_PN=JPEG_TN']+simple_dlna_tags))
res.size = size_of_thumbnail
self.item.res.append(res)
and for a PNG the Resource creation is like that
res = Resource(url_for_thumbnail,
'http-get:*:image/png:%s'% ';'.join(simple_dlna_tags+['DLNA.ORG_PN=PNG_TN']))
if not hasattr(self.item, 'attachments'):
self.item.attachments = {}
self.item.attachments[key] = utils.StaticFile(filename_of_thumbnail)
"""
if self.mimetype in ('image/jpeg', 'image/png'):
path = self.get_path()
thumbnail = os.path.join(os.path.dirname(path),'.thumbs',os.path.basename(path))
if os.path.exists(thumbnail):
mimetype,_ = mimetypes.guess_type(thumbnail, strict=False)
if mimetype in ('image/jpeg','image/png'):
if mimetype == 'image/jpeg':
dlna_pn = 'DLNA.ORG_PN=JPEG_TN'
else:
dlna_pn = 'DLNA.ORG_PN=PNG_TN'
dlna_tags = simple_dlna_tags[:]
dlna_tags[3] = 'DLNA.ORG_FLAGS=00f00000000000000000000000000000'
hash_from_path = str(id(thumbnail))
new_res = Resource(self.url+'?attachment='+hash_from_path,
'http-get:*:%s:%s' % (mimetype, ';'.join([dlna_pn]+dlna_tags)))
new_res.size = os.path.getsize(thumbnail)
self.item.res.append(new_res)
if not hasattr(self.item, 'attachments'):
self.item.attachments = {}
self.item.attachments[hash_from_path] = utils.StaticFile(urllib.quote(thumbnail))
try:
# FIXME: getmtime is deprecated in Twisted 2.6
self.item.date = datetime.fromtimestamp(self.location.getmtime())
except:
self.item.date = None
def rebuild(self, urlbase):
#print "rebuild", self.mimetype
if self.mimetype != 'item':
return
#print "rebuild for", self.get_path()
mimetype,_ = mimetypes.guess_type(self.get_path(),strict=False)
if mimetype == None:
return
self.mimetype = mimetype
#print "rebuild", self.mimetype
UPnPClass = classChooser(self.mimetype)
self.item = UPnPClass(self.id, self.parent.id, self.get_name())
if hasattr(self.parent, 'cover'):
_,ext = os.path.splitext(self.parent.cover)
""" add the cover image extension to help clients not reacting on
the mimetype """
self.item.albumArtURI = ''.join((urlbase,str(self.id),'?cover',ext))
_,host_port,_,_,_ = urlsplit(urlbase)
if host_port.find(':') != -1:
host,port = tuple(host_port.split(':'))
else:
host = host_port
res = Resource('file://'+urllib.quote(self.get_path()), 'internal:%s:%s:*' % (host,self.mimetype))
try:
res.size = self.location.getsize()
except:
res.size = 0
self.item.res.append(res)
res = Resource(self.url, 'http-get:*:%s:*' % self.mimetype)
try:
res.size = self.location.getsize()
except:
res.size = 0
self.item.res.append(res)
try:
# FIXME: getmtime is deprecated in Twisted 2.6
self.item.date = datetime.fromtimestamp(self.location.getmtime())
except:
self.item.date = None
self.parent.update_id += 1
def check_for_cover_art(self):
""" let's try to find in the current directory some jpg file,
or png if the jpg search fails, and take the first one
that comes around
"""
try:
jpgs = [i.path for i in self.location.children() if i.splitext()[1] in ('.jpg', '.JPG')]
try:
self.cover = jpgs[0]
except IndexError:
pngs = [i.path for i in self.location.children() if i.splitext()[1] in ('.png', '.PNG')]
try:
self.cover = pngs[0]
except IndexError:
return
except UnicodeDecodeError:
self.warning("UnicodeDecodeError - there is something wrong with a file located in %r", self.location.path)
def remove(self):
#print "FSItem remove", self.id, self.get_name(), self.parent
if self.parent:
self.parent.remove_child(self)
del self.item
def add_child(self, child, update=False):
self.children.append(child)
self.child_count += 1
if isinstance(self.item, Container):
self.item.childCount += 1
if update == True:
self.update_id += 1
self.sorted = False
def remove_child(self, child):
#print "remove_from %d (%s) child %d (%s)" % (self.id, self.get_name(), child.id, child.get_name())
if child in self.children:
self.child_count -= 1
if isinstance(self.item, Container):
self.item.childCount -= 1
self.children.remove(child)
self.update_id += 1
self.sorted = False
def get_children(self,start=0,request_count=0):
if self.sorted == False:
self.children.sort(key=_natural_key)
self.sorted = True
if request_count == 0:
return self.children[start:]
else:
return self.children[start:request_count]
def get_child_count(self):
return self.child_count
def get_id(self):
return self.id
def get_update_id(self):
if hasattr(self, 'update_id'):
return self.update_id
else:
return None
def get_path(self):
if isinstance( self.location,FilePath):
return self.location.path
else:
self.location
def get_realpath(self):
if isinstance( self.location,FilePath):
return self.location.path
else:
self.location
def set_path(self,path=None,extension=None):
if path is None:
path = self.get_path()
if extension is not None:
path,old_ext = os.path.splitext(path)
path = ''.join((path,extension))
if isinstance( self.location,FilePath):
self.location = FilePath(path)
else:
self.location = path
def get_name(self):
if isinstance(self.location,FilePath):
name = self.location.basename().decode("utf-8", "replace")
else:
name = self.location.decode("utf-8", "replace")
return name
def get_cover(self):
try:
return self.cover
except:
try:
return self.parent.cover
except:
return ''
def get_parent(self):
return self.parent
def get_item(self):
return self.item
def get_xml(self):
return self.item.toString()
def __repr__(self):
return 'id: ' + str(self.id) + ' @ ' + self.get_name().encode('ascii','xmlcharrefreplace')
class FSStore(BackendStore):
logCategory = 'fs_store'
implements = ['MediaServer']
def __init__(self, server, **kwargs):
BackendStore.__init__(self,server,**kwargs)
self.next_id = 1000
self.name = kwargs.get('name','my media')
self.content = kwargs.get('content',None)
if self.content != None:
if isinstance(self.content,basestring):
self.content = [self.content]
l = []
for a in self.content:
l += a.split(',')
self.content = l
else:
self.content = xdg_content()
self.content = [x[0] for x in self.content]
if self.content == None:
self.content = 'tests/content'
if not isinstance( self.content, list):
self.content = [self.content]
self.content = Set([os.path.abspath(x) for x in self.content])
ignore_patterns = kwargs.get('ignore_patterns',[])
self.store = {}
self.inotify = None
if haz_inotify == True:
try:
self.inotify = INotify()
except Exception,msg:
self.info("%s" %msg)
else:
self.info("%s" %no_inotify_reason)
if kwargs.get('enable_destroy','no') == 'yes':
self.upnp_DestroyObject = self.hidden_upnp_DestroyObject
self.import_folder = kwargs.get('import_folder',None)
if self.import_folder != None:
self.import_folder = os.path.abspath(self.import_folder)
if not os.path.isdir(self.import_folder):
self.import_folder = None
self.ignore_file_pattern = re.compile('|'.join(['^\..*'] + list(ignore_patterns)))
parent = None
self.update_id = 0
if(len(self.content)>1 or
utils.means_true(kwargs.get('create_root',False)) or
self.import_folder != None):
UPnPClass = classChooser('root')
id = str(self.getnextID())
parent = self.store[id] = FSItem( id, parent, 'media', 'root', self.urlbase, UPnPClass, update=True,store=self)
if self.import_folder != None:
id = str(self.getnextID())
self.store[id] = FSItem( id, parent, self.import_folder, 'directory', self.urlbase, UPnPClass, update=True,store=self)
self.import_folder_id = id
for path in self.content:
if isinstance(path,(list,tuple)):
path = path[0]
if self.ignore_file_pattern.match(path):
continue
try:
self.walk(path, parent, self.ignore_file_pattern)
except Exception,msg:
self.warning('on walk of %r: %r' % (path,msg))
import traceback
self.debug(traceback.format_exc())
self.wmc_mapping.update({'14': '0',
'15': '0',
'16': '0',
'17': '0'
})
louie.send('Coherence.UPnP.Backend.init_completed', None, backend=self)
def __repr__(self):
return str(self.__class__).split('.')[-1]
def release(self):
if self.inotify != None:
self.inotify.release()
def len(self):
return len(self.store)
def get_by_id(self,id):
#print "get_by_id", id, type(id)
# we have referenced ids here when we are in WMC mapping mode
if isinstance(id, basestring):
id = id.split('@',1)
id = id[0]
#try:
# id = int(id)
#except ValueError:
# id = 1000
if id == '0':
id = '1000'
#print "get_by_id 2", id
try:
r = self.store[id]
except:
r = None
#print "get_by_id 3", r
return r
def get_id_by_name(self, parent='0', name=''):
self.info('get_id_by_name %r (%r) %r' % (parent, type(parent), name))
try:
parent = self.store[parent]
self.debug("%r %d" % (parent,len(parent.children)))
for child in parent.children:
#if not isinstance(name, unicode):
# name = name.decode("utf8")
self.debug("%r %r %r" % (child.get_name(),child.get_realpath(), name == child.get_realpath()))
if name == child.get_realpath():
return child.id
except:
import traceback
self.info(traceback.format_exc())
self.debug('get_id_by_name not found')
return None
def get_url_by_name(self,parent='0',name=''):
self.info('get_url_by_name %r %r' % (parent, name))
id = self.get_id_by_name(parent,name)
#print 'get_url_by_name', id
if id == None:
return ''
return self.store[id].url
def update_config(self,**kwargs):
print "update_config", kwargs
if 'content' in kwargs:
new_content = kwargs['content']
new_content = Set([os.path.abspath(x) for x in new_content.split(',')])
new_folders = new_content.difference(self.content)
obsolete_folders = self.content.difference(new_content)
print new_folders, obsolete_folders
for folder in obsolete_folders:
self.remove_content_folder(folder)
for folder in new_folders:
self.add_content_folder(folder)
self.content = new_content
def add_content_folder(self,path):
path = os.path.abspath(path)
if path not in self.content:
self.content.add(path)
self.walk(path, self.store['1000'], self.ignore_file_pattern)
def remove_content_folder(self,path):
path = os.path.abspath(path)
if path in self.content:
id = self.get_id_by_name('1000', path)
self.remove(id)
self.content.remove(path)
def walk(self, path, parent=None, ignore_file_pattern=''):
self.debug("walk %r" % path)
containers = []
parent = self.append(path,parent)
if parent != None:
containers.append(parent)
while len(containers)>0:
container = containers.pop()
try:
self.debug('adding %r' % container.location)
for child in container.location.children():
if ignore_file_pattern.match(child.basename()) != None:
continue
new_container = self.append(child.path,container)
if new_container != None:
containers.append(new_container)
except UnicodeDecodeError:
self.warning("UnicodeDecodeError - there is something wrong with a file located in %r", container.get_path())
def create(self, mimetype, path, parent):
self.debug("create ", mimetype, path, type(path), parent)
UPnPClass = classChooser(mimetype)
if UPnPClass == None:
return None
id = self.getnextID()
if mimetype in ('root','directory'):
id = str(id)
else:
_,ext = os.path.splitext(path)
id = str(id) + ext.lower()
update = False
if hasattr(self, 'update_id'):
update = True
self.store[id] = FSItem( id, parent, path, mimetype, self.urlbase, UPnPClass, update=True,store=self)
if hasattr(self, 'update_id'):
self.update_id += 1
#print self.update_id
if self.server:
if hasattr(self.server,'content_directory_server'):
self.server.content_directory_server.set_variable(0, 'SystemUpdateID', self.update_id)
if parent is not None:
value = (parent.get_id(),parent.get_update_id())
if self.server:
if hasattr(self.server,'content_directory_server'):
self.server.content_directory_server.set_variable(0, 'ContainerUpdateIDs', value)
return id
def append(self,path,parent):
self.debug("append ", path, type(path), parent)
if os.path.exists(path) == False:
self.warning("path %r not available - ignored", path)
return None
if stat.S_ISFIFO(os.stat(path).st_mode):
self.warning("path %r is a FIFO - ignored", path)
return None
try:
mimetype,_ = mimetypes.guess_type(path, strict=False)
if mimetype == None:
if os.path.isdir(path):
mimetype = 'directory'
if mimetype == None:
return None
id = self.create(mimetype,path,parent)
if mimetype == 'directory':
if self.inotify is not None:
mask = IN_CREATE | IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO | IN_CHANGED
self.inotify.watch(path, mask=mask, auto_add=False, callbacks=(self.notify,id))
return self.store[id]
except OSError, msg:
""" seems we have some permissions issues along the content path """
self.warning("path %r isn't accessible, error %r", path, msg)
return None
def remove(self, id):
print 'FSSTore remove id', id
try:
item = self.store[id]
parent = item.get_parent()
item.remove()
del self.store[id]
if hasattr(self, 'update_id'):
self.update_id += 1
if self.server:
self.server.content_directory_server.set_variable(0, 'SystemUpdateID', self.update_id)
#value = '%d,%d' % (parent.get_id(),parent_get_update_id())
value = (parent.get_id(),parent.get_update_id())
if self.server:
self.server.content_directory_server.set_variable(0, 'ContainerUpdateIDs', value)
except:
pass
def notify(self, iwp, filename, mask, parameter=None):
self.info("Event %s on %s %s - parameter %r" % (
', '.join(self.inotify.flag_to_human(mask)), iwp.path, filename, parameter))
path = iwp.path
if filename:
path = os.path.join(path, filename)
if mask & IN_CHANGED:
# FIXME react maybe on access right changes, loss of read rights?
#print '%s was changed, parent %d (%s)' % (path, parameter, iwp.path)
pass
if(mask & IN_DELETE or mask & IN_MOVED_FROM):
self.info('%s was deleted, parent %r (%s)' % (path, parameter, iwp.path))
id = self.get_id_by_name(parameter,os.path.join(iwp.path,filename))
if id != None:
self.remove(id)
if(mask & IN_CREATE or mask & IN_MOVED_TO):
if mask & IN_ISDIR:
self.info('directory %s was created, parent %r (%s)' % (path, parameter, iwp.path))
else:
self.info('file %s was created, parent %r (%s)' % (path, parameter, iwp.path))
if self.get_id_by_name(parameter,os.path.join(iwp.path,filename)) is None:
if os.path.isdir(path):
self.walk(path, self.get_by_id(parameter), self.ignore_file_pattern)
else:
if self.ignore_file_pattern.match(filename) == None:
self.append(path, self.get_by_id(parameter))
def getnextID(self):
ret = self.next_id
self.next_id += 1
return ret
def backend_import(self,item,data):
try:
f = open(item.get_path(), 'w+b')
if hasattr(data,'read'):
data = data.read()
f.write(data)
f.close()
item.rebuild(self.urlbase)
return 200
except IOError:
self.warning("import of file %s failed" % item.get_path())
except Exception,msg:
import traceback
self.warning(traceback.format_exc())
return 500
def upnp_init(self):
self.current_connection_id = None
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
[#'http-get:*:audio/mpeg:DLNA.ORG_PN=MP3;DLNA.ORG_OP=11;DLNA.ORG_FLAGS=01700000000000000000000000000000',
#'http-get:*:audio/x-ms-wma:DLNA.ORG_PN=WMABASE;DLNA.ORG_OP=11;DLNA.ORG_FLAGS=01700000000000000000000000000000',
#'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_TN;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000',
#'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_SM;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000',
#'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_MED;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000',
#'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_LRG;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000',
#'http-get:*:video/mpeg:DLNA.ORG_PN=MPEG_PS_PAL;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=01700000000000000000000000000000',
#'http-get:*:video/x-ms-wmv:DLNA.ORG_PN=WMVMED_BASE;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=01700000000000000000000000000000',
'internal:%s:audio/mpeg:*' % self.server.coherence.hostname,
'http-get:*:audio/mpeg:*',
'internal:%s:video/mp4:*' % self.server.coherence.hostname,
'http-get:*:video/mp4:*',
'internal:%s:application/ogg:*' % self.server.coherence.hostname,
'http-get:*:application/ogg:*',
'internal:%s:video/x-msvideo:*' % self.server.coherence.hostname,
'http-get:*:video/x-msvideo:*',
'internal:%s:video/mpeg:*' % self.server.coherence.hostname,
'http-get:*:video/mpeg:*',
'internal:%s:video/avi:*' % self.server.coherence.hostname,
'http-get:*:video/avi:*',
'internal:%s:video/quicktime:*' % self.server.coherence.hostname,
'http-get:*:video/quicktime:*',
'internal:%s:image/gif:*' % self.server.coherence.hostname,
'http-get:*:image/gif:*',
'internal:%s:image/jpeg:*' % self.server.coherence.hostname,
'http-get:*:image/jpeg:*'],
default=True)
self.server.content_directory_server.set_variable(0, 'SystemUpdateID', self.update_id)
#self.server.content_directory_server.set_variable(0, 'SortCapabilities', '*')
def upnp_ImportResource(self, *args, **kwargs):
SourceURI = kwargs['SourceURI']
DestinationURI = kwargs['DestinationURI']
if DestinationURI.endswith('?import'):
id = DestinationURI.split('/')[-1]
id = id[:-7] # remove the ?import
else:
return failure.Failure(errorCode(718))
item = self.get_by_id(id)
if item == None:
return failure.Failure(errorCode(718))
def gotPage(headers):
#print "gotPage", headers
content_type = headers.get('content-type',[])
if not isinstance(content_type, list):
content_type = list(content_type)
if len(content_type) > 0:
extension = mimetypes.guess_extension(content_type[0], strict=False)
item.set_path(None,extension)
shutil.move(tmp_path, item.get_path())
item.rebuild(self.urlbase)
if hasattr(self, 'update_id'):
self.update_id += 1
if self.server:
if hasattr(self.server,'content_directory_server'):
self.server.content_directory_server.set_variable(0, 'SystemUpdateID', self.update_id)
if item.parent is not None:
value = (item.parent.get_id(),item.parent.get_update_id())
if self.server:
if hasattr(self.server,'content_directory_server'):
self.server.content_directory_server.set_variable(0, 'ContainerUpdateIDs', value)
def gotError(error, url):
self.warning("error requesting", url)
self.info(error)
os.unlink(tmp_path)
return failure.Failure(errorCode(718))
tmp_fp, tmp_path = tempfile.mkstemp()
os.close(tmp_fp)
utils.downloadPage(SourceURI,
tmp_path).addCallbacks(gotPage, gotError, None, None, [SourceURI], None)
transfer_id = 0 #FIXME
return {'TransferID': transfer_id}
def upnp_CreateObject(self, *args, **kwargs):
#print "CreateObject", kwargs
if kwargs['ContainerID'] == 'DLNA.ORG_AnyContainer':
if self.import_folder != None:
ContainerID = self.import_folder_id
else:
return failure.Failure(errorCode(712))
else:
ContainerID = kwargs['ContainerID']
Elements = kwargs['Elements']
parent_item = self.get_by_id(ContainerID)
if parent_item == None:
return failure.Failure(errorCode(710))
if parent_item.item.restricted:
return failure.Failure(errorCode(713))
if len(Elements) == 0:
return failure.Failure(errorCode(712))
elt = DIDLElement.fromString(Elements)
if elt.numItems() != 1:
return failure.Failure(errorCode(712))
item = elt.getItems()[0]
if item.parentID == 'DLNA.ORG_AnyContainer':
item.parentID = ContainerID
if(item.id != '' or
item.parentID != ContainerID or
item.restricted == True or
item.title == ''):
return failure.Failure(errorCode(712))
if('..' in item.title or
'~' in item.title or
os.sep in item.title):
return failure.Failure(errorCode(712))
if item.upnp_class == 'object.container.storageFolder':
if len(item.res) != 0:
return failure.Failure(errorCode(712))
path = os.path.join(parent_item.get_path(),item.title)
id = self.create('directory',path,parent_item)
try:
os.mkdir(path)
except:
self.remove(id)
return failure.Failure(errorCode(712))
if self.inotify is not None:
mask = IN_CREATE | IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO | IN_CHANGED
self.inotify.watch(path, mask=mask, auto_add=False, callbacks=(self.notify,id))
new_item = self.get_by_id(id)
didl = DIDLElement()
didl.addItem(new_item.item)
return {'ObjectID': id, 'Result': didl.toString()}
if item.upnp_class.startswith('object.item'):
_,_,content_format,_ = item.res[0].protocolInfo.split(':')
extension = mimetypes.guess_extension(content_format, strict=False)
path = os.path.join(parent_item.get_realpath(),item.title+extension)
id = self.create('item',path,parent_item)
new_item = self.get_by_id(id)
for res in new_item.item.res:
res.importUri = new_item.url+'?import'
res.data = None
didl = DIDLElement()
didl.addItem(new_item.item)
return {'ObjectID': id, 'Result': didl.toString()}
return failure.Failure(errorCode(712))
def hidden_upnp_DestroyObject(self, *args, **kwargs):
ObjectID = kwargs['ObjectID']
item = self.get_by_id(ObjectID)
if item == None:
return failure.Failure(errorCode(701))
print "upnp_DestroyObject", item.location
try:
item.location.remove()
except Exception, msg:
print Exception, msg
return failure.Failure(errorCode(715))
return {}
if __name__ == '__main__':
from twisted.internet import reactor
p = 'tests/content'
f = FSStore(None,name='my media',content=p, urlbase='http://localhost/xyz')
print f.len()
print f.get_by_id(1000).child_count, f.get_by_id(1000).get_xml()
print f.get_by_id(1001).child_count, f.get_by_id(1001).get_xml()
print f.get_by_id(1002).child_count, f.get_by_id(1002).get_xml()
print f.get_by_id(1003).child_count, f.get_by_id(1003).get_xml()
print f.get_by_id(1004).child_count, f.get_by_id(1004).get_xml()
print f.get_by_id(1005).child_count, f.get_by_id(1005).get_xml()
print f.store[1000].get_children(0,0)
#print f.upnp_Search(ContainerID ='4',
# Filter ='dc:title,upnp:artist',
# RequestedCount = '1000',
# StartingIndex = '0',
# SearchCriteria = '(upnp:class = "object.container.album.musicAlbum")',
# SortCriteria = '+dc:title')
f.upnp_ImportResource(SourceURI='http://spiegel.de',DestinationURI='ttt')
reactor.run()
|
|
#!/bin/py
#
# interpolate over data field with 2d polynomial fit
#
# fit a 2D, 3rd order polynomial to data
# estimate the 16 coefficients using all of your data points.
#
# http://stackoverflow.com/questions/18832763/drawing-directions-fields
#
#
import numpy as np
import matplotlib
matplotlib.use('Agg')
import itertools
import matplotlib.pyplot as plt
from scipy import integrate
from scipy.integrate import ode
#
# adding functions
#
import top
hprime = -12
Rprime = 3.0
def load_ell():
#
# Generate Data from ellipses
#
h = hprime
thetaf = 20*np.pi/180.
a = -h*1.0
miny = -1
#
# create data
#
space = 0.02
R = Rprime
y0 = np.arange(Rprime,miny,-space)
x0 = -np.sqrt(R*R-y0*y0)
theta0 = np.arctan2(y0,x0)
thetafy = thetaf*(R-y0)/R
#thetafy = thetaf*np.arccos(y0/R)/2.
thetam = theta0-np.pi/2-thetafy
m = np.tan(thetam)
k = (y0 + a*a*m/(x0-h) - m*(x0-h))
bs = -a*a*m*(y0-k)/(x0-h)
b = np.sqrt(bs)
xl = []
yl = []
zl = []
print 'y0 ', y0
print 'b/a: ',b/a
fudge = 0.05
dx_space=0.1
for i in xrange(len(k)):
dx = np.arange(h,x0[i]+fudge,dx_space)
xl = xl + dx.tolist()
dy = -(b[i]*np.sqrt(1-((dx-h)/(a))**2))+k[i]
#yl.append(-(b[i]*np.sqrt(1-((dx-h)/(a))**2))+k[i])
yl = yl + dy.tolist()
#zl.append(np.arctan(dy/dx))
if(i == 0):
m = np.zeros(len(dy))
else:
m = -b[i]*b[i]*(dx-h)/((dy-k[i])*(a*a))
zl = zl + m.tolist()
#
# convert to numpy array
#
x = np.asarray(xl)
y = np.asarray(yl)
z = np.asarray(zl)
#
# steady as she goes
#
return x,y,z
def vf(t,x,m):
#
# Vector field function
#
dx=np.zeros(2)
zz = polyval2d(x[0], x[1], m)
theta = np.arctan(zz)
dx[0]=np.cos(theta)
dx[1]=np.sin(theta)
#dx[1]=x[0]**2-x[0]-2
#polyval2d(xx, yy, m)
#dx[1]=polyval2d(xx, yy, m)
return dx
def arr(m):
#
# Solution curves
#
h = hprime
#ic=[[h,-4],[h,-1],[h,1],[h,-8]]
ic=[[h,-3.4],[h,-0.1],[h,-8]]
end = [2,2,2,2]
t0=0; dt=0.1;
r = ode(vf).set_integrator('vode', method='bdf',max_step=dt)
for k in range(len(ic)):
tEnd=np.sqrt(ic[k][0]**2 + ic[k][1]**2)-end[k]
Y=[];T=[];S=[];
r.set_initial_value(ic[k], t0).set_f_params(m)
while r.successful() and r.t +dt < tEnd:
r.integrate(r.t+dt)
Y.append(r.y)
S=np.array(np.real(Y))
plt.plot(S[:,0],S[:,1], color = 'red', lw = 4.25)
plt.hlines(Rprime, hprime, 0, color='red',lw = 4.25)
def polyfit2d(x, y, z, order=5):
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(range(order+1), range(order+1))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
#
cnd=1e-5
#m, _, _, _ = np.linalg.lstsq(G, z,rcond=cnd)
m, _, _, _ = np.linalg.lstsq(G, z)
return m
def polyval2d(x, y, m):
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
tmp = a * x**i * y**j
z += tmp
#print a,i,j,tmp,z
return z
def polyval2d_disp(x, y, m):
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
tmp = a * x**i * y**j
z += tmp
print a,i,j,tmp,z
return z
#
#
#
def poly_disp_fparse(m):
print "#"
print "# Polynomial Interpolation Function"
print "#"
print "slope_func = '"
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
for a, (i,j) in zip(m, ij):
if( (i+1)*(j+1) != len(m)):
print ' %.15f * x^%i * y^%i +' % (a,i,j )
else:
print " %.15f * x^%i * y^%i'" % (a,i,j )
print
return 0
#
#
#
def poly_disp_py(m):
print "#"
print "# Polynomial Interpolation Function"
print "# For python"
print "return ",
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
for a, (i,j) in zip(m, ij):
if( (i+1)*(j+1) != len(m)):
print '%.15f * x**%i * y**%i +' % (a,i,j ),
else:
print "%.15f * x**%i * y**%i" % (a,i,j ),
print
return 0
#
#
#
def poly_disp_py_line(m):
print "#"
print "# Polynomial Interpolation Function"
print "# For python"
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
for a, (i,j) in zip(m, ij):
if( (i+1)*(j+1) != len(m)):
print ' tmp += %.15f * x**%i * y**%i' % (a,i,j )
print ' print tmp'
else:
print " tmp += %.15f * x**%i * y**%i" % (a,i,j )
print ' print tmp'
print
return 0
def load_ex():
#
# Generate Example Data
#
numdata = 100
x = np.random.random(numdata)
y = np.random.random(numdata)
#
# silly fake function for z
#
z = x**2 + y**2 + 3*x**3 + y + np.random.random(numdata)
return x,y,z
#
# main function: execute
#
def main():
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# ----------------------------------------
#
# load data in
#
x,y,z=load_ell()
#x,y,z=load_ex()
#
# Fit polynomial
#
m = polyfit2d(x,y,z)
#
# Evaluate it on a grid...
#
nx, ny = 200, 200
xx, yy = np.meshgrid(np.linspace(x.min(), x.max(), nx),
np.linspace(y.min(), y.max(), ny))
zz = polyval2d(xx, yy, m)
#
# m is a matrix of polynomial values...
# e.g.
#
# Plot!
#
arr(m)
#
# ----------------------------------------
xt,yt,zt = top.load_ell()
mt = polyfit2d(xt,yt,zt)
nxt, nyt = 200, 200
xxt, yyt = np.meshgrid(np.linspace(x.min(), x.max(), nx),
np.linspace(y.min(), y.max(), ny))
zzt = top.polyval2d(xxt, yyt, mt)
top.arr(mt)
top.poly_disp_fparse(m)
#
# ----------------------------------------
plt.suptitle("SoV Configuration: Top Tier")
plt.title("Seven Vane")
xmin = -15
xmax = 6
ymin = -10
ymax = 14
major_ticksx = np.arange(xmin, xmax, 5)
minor_ticksx = np.arange(xmin, xmax, 1)
major_ticksy = np.arange(ymin, ymax, 5)
minor_ticksy = np.arange(ymin, ymax, 1)
ax.set_xticks(major_ticksx)
ax.set_xticks(minor_ticksx, minor=True)
ax.set_yticks(major_ticksy)
ax.set_yticks(minor_ticksy, minor=True)
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
plt.xlabel('Streamwise (X) [Meters]')
plt.ylabel('Spanwise (Y) [Meters]')
plt.grid()
# add circle
R = Rprime
circle=plt.Circle((0,0),R,color='black',linestyle='dotted',fill=False,linewidth=4)
from matplotlib.patches import Ellipse, Arc
ellipse = Arc([0.0,0.0],2*Rprime,2*Rprime,0,180,0,color='black', linewidth='5.0')
ax.add_patch(ellipse)
# adding text
#
ax.text(-20, 15, r'Upstream', fontsize=15)
ax.text(5, 15, r'Downstream', fontsize=15)
# angles
ax.text(-2, 1, r'$\theta^{t,r}$', fontsize=15,color='blue')
ax.text(3, 2, r'$\theta^{t,l}$', fontsize=15,color='blue')
ax.text(-12, -3, r'$\phi^{t,r}$', fontsize=15,color='blue')
ax.text(-12, 10, r'$\phi^{t,l}$', fontsize=15,color='blue')
# outer and inner radius
ax.annotate(r'$L_{x}$', xy=(-12,0), xytext=(0, 0),
arrowprops=dict(facecolor='black', shrink=0.05),color='blue',fontsize=15)
ax.annotate(r'cylinder', xy=(2,-3), xytext=(6,-7),
arrowprops=dict(facecolor='black', shrink=0.05),color='blue',fontsize=15)
ax.annotate(r'$L^{t,r}$', xy=(-12.5,-8.5), xytext=(-12.5, 4.0),
arrowprops=dict(facecolor='black', shrink=0.05),color='blue',fontsize=15)
ax.annotate(r'$L^{t,l}$', xy=(-13, 3), xytext=(-13,13),
arrowprops=dict(facecolor='black', shrink=0.05),color='blue',fontsize=15)
fig = plt.gcf()
fig.gca().add_artist(circle)
plt.axes().set_aspect('equal', 'datalim')
plt.savefig('interp_entire_top.png')
plt.savefig('interp_entire_top.pdf', format='pdf', dpi=1000)
#
# output polynomial for input
#
poly_disp_fparse(m)
#
# EXECUTE
#
main()
#
# nick
# 4/28/16
#
|
|
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
import json
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Instruction,
OutputAction,
PushVlanHeaderAction,
SetFieldAction,
Match)
from pybvc.common.status import STATUS
from pybvc.common.utils import load_dict_from_file
from pybvc.common.constants import (ETH_TYPE_IPv4,
ETH_TYPE_DOT1Q)
def of_demo_14():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit(0)
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 14 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
# --- Flow Match: Ethernet Type
# Ethernet Source Address
# Ethernet Destination Address
# Input Port
eth_type = ETH_TYPE_IPv4
eth_src = "00:00:00:AA:BB:CC"
eth_dst = "FF:FF:AA:BC:ED:FE"
input_port = 5
# --- Flow Actions: Push VLAN: Ethernet Type
# Set Field: VLAN ID
# Output: Port Number
push_eth_type = ETH_TYPE_DOT1Q # 802.1q VLAN tagged frame
push_vlan_id = 100
output_port = 5
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'"
% (ctrlIpAddr, nodeName))
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Ethernet Type (%s)\n"
" Ethernet Source Address (%s)\n"
" Ethernet Destination Address (%s)\n"
" Input Port (%s)"
% (hex(eth_type), eth_src, eth_dst, input_port))
print (" Action: Push VLAN (Ethernet Type=%s)"
% (hex(push_eth_type)))
print (" Set Field (VLAN ID=%s)"
% (push_vlan_id))
print (" Output (to Physical Port Number %s)"
% (output_port))
time.sleep(rundelay)
flow_entry = FlowEntry()
flow_entry.set_flow_name(flow_name="Push VLAN 100")
table_id = 0
flow_entry.set_flow_table_id(table_id)
flow_id = 21
flow_entry.set_flow_id(flow_id)
flow_entry.set_flow_priority(flow_priority=1012)
flow_entry.set_flow_cookie(cookie=401)
flow_entry.set_flow_cookie_mask(cookie_mask=255)
flow_entry.set_flow_hard_timeout(hard_timeout=1200)
flow_entry.set_flow_idle_timeout(idle_timeout=3400)
# --- Instruction: 'Apply-actions'
# Actions: 'PushVlan'
# 'SetField'
# 'Output'
instruction = Instruction(instruction_order=0)
action = PushVlanHeaderAction(order=0)
action.set_eth_type(eth_type=push_eth_type)
instruction.add_apply_action(action)
action = SetFieldAction(order=1)
action.set_vlan_id(vid=push_vlan_id)
instruction.add_apply_action(action)
action = OutputAction(order=2, port=output_port)
instruction.add_apply_action(action)
flow_entry.add_instruction(instruction)
# --- Match Fields: Ethernet Type
# Ethernet Source Address
# Ethernet Destination Address
# Input Port
match = Match()
match.set_eth_type(eth_type)
match.set_eth_src(eth_src)
match.set_eth_dst(eth_dst)
match.set_in_port(in_port=input_port)
flow_entry.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Get configured flow from the Controller")
time.sleep(rundelay)
result = ofswitch.get_configured_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully read from the Controller")
print ("Flow info:")
flow = result.get_data()
print json.dumps(flow, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Delete flow with id of '%s' from the Controller's cache "
"and from the table '%s' on the '%s' node"
% (flow_id, table_id, nodeName))
time.sleep(rundelay)
result = ofswitch.delete_flow(flow_entry.get_flow_table_id(),
flow_entry.get_flow_id())
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully removed from the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_14()
|
|
# Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import fixtures
import mock
import oslo_messaging as messaging
from oslo_messaging.rpc import dispatcher
from oslo_serialization import jsonutils
from nova import context
from nova import rpc
from nova import test
# Make a class that resets all of the global variables in nova.rpc
class RPCResetFixture(fixtures.Fixture):
def _setUp(self):
self.trans = copy.copy(rpc.TRANSPORT)
self.noti_trans = copy.copy(rpc.NOTIFICATION_TRANSPORT)
self.noti = copy.copy(rpc.NOTIFIER)
self.all_mods = copy.copy(rpc.ALLOWED_EXMODS)
self.ext_mods = copy.copy(rpc.EXTRA_EXMODS)
self.conf = copy.copy(rpc.CONF)
self.addCleanup(self._reset_everything)
def _reset_everything(self):
rpc.TRANSPORT = self.trans
rpc.NOTIFICATION_TRANSPORT = self.noti_trans
rpc.NOTIFIER = self.noti
rpc.ALLOWED_EXMODS = self.all_mods
rpc.EXTRA_EXMODS = self.ext_mods
rpc.CONF = self.conf
class TestRPC(test.NoDBTestCase):
# We're testing the rpc code so we can't use the RPCFixture.
STUB_RPC = False
def setUp(self):
super(TestRPC, self).setUp()
self.useFixture(RPCResetFixture())
@mock.patch.object(rpc, 'get_allowed_exmods')
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'get_notification_transport')
@mock.patch.object(messaging, 'Notifier')
def test_init_unversioned(self, mock_notif, mock_noti_trans,
mock_ser, mock_exmods):
# The expected call to get the legacy notifier will require no new
# kwargs, and we expect the new notifier will need the noop driver
expected = [{}, {'driver': 'noop'}]
self._test_init(mock_notif, mock_noti_trans, mock_ser,
mock_exmods, 'unversioned', expected)
@mock.patch.object(rpc, 'get_allowed_exmods')
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'get_notification_transport')
@mock.patch.object(messaging, 'Notifier')
def test_init_both(self, mock_notif, mock_noti_trans,
mock_ser, mock_exmods):
expected = [{}, {'topics': ['versioned_notifications']}]
self._test_init(mock_notif, mock_noti_trans, mock_ser,
mock_exmods, 'both', expected)
@mock.patch.object(rpc, 'get_allowed_exmods')
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'get_notification_transport')
@mock.patch.object(messaging, 'Notifier')
def test_init_versioned(self, mock_notif, mock_noti_trans,
mock_ser, mock_exmods):
expected = [{'driver': 'noop'},
{'topics': ['versioned_notifications']}]
self._test_init(mock_notif, mock_noti_trans, mock_ser,
mock_exmods, 'versioned', expected)
@mock.patch.object(rpc, 'get_allowed_exmods')
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'get_notification_transport')
@mock.patch.object(messaging, 'Notifier')
def test_init_versioned_with_custom_topics(self, mock_notif,
mock_noti_trans, mock_ser,
mock_exmods):
expected = [{'driver': 'noop'},
{'topics': ['custom_topic1', 'custom_topic2']}]
self._test_init(
mock_notif, mock_noti_trans, mock_ser, mock_exmods, 'versioned',
expected, versioned_notification_topics=['custom_topic1',
'custom_topic2'])
def test_cleanup_transport_null(self):
rpc.NOTIFICATION_TRANSPORT = mock.Mock()
rpc.LEGACY_NOTIFIER = mock.Mock()
rpc.NOTIFIER = mock.Mock()
self.assertRaises(AssertionError, rpc.cleanup)
def test_cleanup_notification_transport_null(self):
rpc.TRANSPORT = mock.Mock()
rpc.NOTIFIER = mock.Mock()
self.assertRaises(AssertionError, rpc.cleanup)
def test_cleanup_legacy_notifier_null(self):
rpc.TRANSPORT = mock.Mock()
rpc.NOTIFICATION_TRANSPORT = mock.Mock()
rpc.NOTIFIER = mock.Mock()
def test_cleanup_notifier_null(self):
rpc.TRANSPORT = mock.Mock()
rpc.LEGACY_NOTIFIER = mock.Mock()
rpc.NOTIFICATION_TRANSPORT = mock.Mock()
self.assertRaises(AssertionError, rpc.cleanup)
def test_cleanup(self):
rpc.LEGACY_NOTIFIER = mock.Mock()
rpc.NOTIFIER = mock.Mock()
rpc.NOTIFICATION_TRANSPORT = mock.Mock()
rpc.TRANSPORT = mock.Mock()
trans_cleanup = mock.Mock()
not_trans_cleanup = mock.Mock()
rpc.TRANSPORT.cleanup = trans_cleanup
rpc.NOTIFICATION_TRANSPORT.cleanup = not_trans_cleanup
rpc.cleanup()
trans_cleanup.assert_called_once_with()
not_trans_cleanup.assert_called_once_with()
self.assertIsNone(rpc.TRANSPORT)
self.assertIsNone(rpc.NOTIFICATION_TRANSPORT)
self.assertIsNone(rpc.LEGACY_NOTIFIER)
self.assertIsNone(rpc.NOTIFIER)
@mock.patch.object(messaging, 'set_transport_defaults')
def test_set_defaults(self, mock_set):
control_exchange = mock.Mock()
rpc.set_defaults(control_exchange)
mock_set.assert_called_once_with(control_exchange)
def test_add_extra_exmods(self):
rpc.EXTRA_EXMODS = []
rpc.add_extra_exmods('foo', 'bar')
self.assertEqual(['foo', 'bar'], rpc.EXTRA_EXMODS)
def test_clear_extra_exmods(self):
rpc.EXTRA_EXMODS = ['foo', 'bar']
rpc.clear_extra_exmods()
self.assertEqual(0, len(rpc.EXTRA_EXMODS))
def test_get_allowed_exmods(self):
rpc.ALLOWED_EXMODS = ['foo']
rpc.EXTRA_EXMODS = ['bar']
exmods = rpc.get_allowed_exmods()
self.assertEqual(['foo', 'bar'], exmods)
@mock.patch.object(messaging, 'TransportURL')
def test_get_transport_url(self, mock_url):
conf = mock.Mock()
rpc.CONF = conf
mock_url.parse.return_value = 'foo'
url = rpc.get_transport_url(url_str='bar')
self.assertEqual('foo', url)
mock_url.parse.assert_called_once_with(conf, 'bar')
@mock.patch.object(messaging, 'TransportURL')
def test_get_transport_url_null(self, mock_url):
conf = mock.Mock()
rpc.CONF = conf
mock_url.parse.return_value = 'foo'
url = rpc.get_transport_url()
self.assertEqual('foo', url)
mock_url.parse.assert_called_once_with(conf, None)
@mock.patch.object(rpc, 'profiler', None)
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'RPCClient')
def test_get_client(self, mock_client, mock_ser):
rpc.TRANSPORT = mock.Mock()
tgt = mock.Mock()
ser = mock.Mock()
mock_client.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
mock_client.assert_called_once_with(rpc.TRANSPORT,
tgt, version_cap='1.0',
serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'profiler', None)
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'get_rpc_server')
def test_get_server(self, mock_get, mock_ser):
rpc.TRANSPORT = mock.Mock()
ser = mock.Mock()
tgt = mock.Mock()
ends = mock.Mock()
mock_ser.return_value = ser
mock_get.return_value = 'server'
server = rpc.get_server(tgt, ends, serializer='foo')
mock_ser.assert_called_once_with('foo')
access_policy = dispatcher.DefaultRPCAccessPolicy
mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends,
executor='eventlet', serializer=ser,
access_policy=access_policy)
self.assertEqual('server', server)
@mock.patch.object(rpc, 'profiler', mock.Mock())
@mock.patch.object(rpc, 'ProfilerRequestContextSerializer')
@mock.patch.object(messaging, 'RPCClient')
def test_get_client_profiler_enabled(self, mock_client, mock_ser):
rpc.TRANSPORT = mock.Mock()
tgt = mock.Mock()
ser = mock.Mock()
mock_client.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
mock_client.assert_called_once_with(rpc.TRANSPORT,
tgt, version_cap='1.0',
serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'profiler', mock.Mock())
@mock.patch.object(rpc, 'ProfilerRequestContextSerializer')
@mock.patch.object(messaging, 'get_rpc_server')
def test_get_server_profiler_enabled(self, mock_get, mock_ser):
rpc.TRANSPORT = mock.Mock()
ser = mock.Mock()
tgt = mock.Mock()
ends = mock.Mock()
mock_ser.return_value = ser
mock_get.return_value = 'server'
server = rpc.get_server(tgt, ends, serializer='foo')
mock_ser.assert_called_once_with('foo')
access_policy = dispatcher.DefaultRPCAccessPolicy
mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends,
executor='eventlet', serializer=ser,
access_policy=access_policy)
self.assertEqual('server', server)
def test_get_notifier(self):
rpc.LEGACY_NOTIFIER = mock.Mock()
mock_prep = mock.Mock()
mock_prep.return_value = 'notifier'
rpc.LEGACY_NOTIFIER.prepare = mock_prep
notifier = rpc.get_notifier('service', publisher_id='foo')
mock_prep.assert_called_once_with(publisher_id='foo')
self.assertIsInstance(notifier, rpc.LegacyValidatingNotifier)
self.assertEqual('notifier', notifier.notifier)
def test_get_notifier_null_publisher(self):
rpc.LEGACY_NOTIFIER = mock.Mock()
mock_prep = mock.Mock()
mock_prep.return_value = 'notifier'
rpc.LEGACY_NOTIFIER.prepare = mock_prep
notifier = rpc.get_notifier('service', host='bar')
mock_prep.assert_called_once_with(publisher_id='service.bar')
self.assertIsInstance(notifier, rpc.LegacyValidatingNotifier)
self.assertEqual('notifier', notifier.notifier)
def test_get_versioned_notifier(self):
rpc.NOTIFIER = mock.Mock()
mock_prep = mock.Mock()
mock_prep.return_value = 'notifier'
rpc.NOTIFIER.prepare = mock_prep
notifier = rpc.get_versioned_notifier('service.foo')
mock_prep.assert_called_once_with(publisher_id='service.foo')
self.assertEqual('notifier', notifier)
@mock.patch.object(rpc, 'get_allowed_exmods')
@mock.patch.object(messaging, 'get_rpc_transport')
def test_create_transport(self, mock_transport, mock_exmods):
exmods = mock_exmods.return_value
transport = rpc.create_transport(mock.sentinel.url)
self.assertEqual(mock_transport.return_value, transport)
mock_exmods.assert_called_once_with()
mock_transport.assert_called_once_with(rpc.CONF,
url=mock.sentinel.url,
allowed_remote_exmods=exmods)
def _test_init(self, mock_notif, mock_noti_trans, mock_ser,
mock_exmods, notif_format, expected_driver_topic_kwargs,
versioned_notification_topics=['versioned_notifications']):
legacy_notifier = mock.Mock()
notifier = mock.Mock()
notif_transport = mock.Mock()
transport = mock.Mock()
serializer = mock.Mock()
conf = mock.Mock()
conf.transport_url = None
conf.notifications.notification_format = notif_format
conf.notifications.versioned_notifications_topics = (
versioned_notification_topics)
mock_exmods.return_value = ['foo']
mock_noti_trans.return_value = notif_transport
mock_ser.return_value = serializer
mock_notif.side_effect = [legacy_notifier, notifier]
@mock.patch.object(rpc, 'CONF', new=conf)
@mock.patch.object(rpc, 'create_transport')
@mock.patch.object(rpc, 'get_transport_url')
def _test(get_url, create_transport):
create_transport.return_value = transport
rpc.init(conf)
create_transport.assert_called_once_with(get_url.return_value)
_test()
self.assertTrue(mock_exmods.called)
self.assertIsNotNone(rpc.TRANSPORT)
self.assertIsNotNone(rpc.LEGACY_NOTIFIER)
self.assertIsNotNone(rpc.NOTIFIER)
self.assertEqual(legacy_notifier, rpc.LEGACY_NOTIFIER)
self.assertEqual(notifier, rpc.NOTIFIER)
expected_calls = []
for kwargs in expected_driver_topic_kwargs:
expected_kwargs = {'serializer': serializer}
expected_kwargs.update(kwargs)
expected_calls.append(((notif_transport,), expected_kwargs))
self.assertEqual(expected_calls, mock_notif.call_args_list,
"The calls to messaging.Notifier() did not create "
"the legacy and versioned notifiers properly.")
class TestJsonPayloadSerializer(test.NoDBTestCase):
def test_serialize_entity(self):
with mock.patch.object(jsonutils, 'to_primitive') as mock_prim:
rpc.JsonPayloadSerializer.serialize_entity('context', 'entity')
mock_prim.assert_called_once_with('entity', convert_instances=True)
class TestRequestContextSerializer(test.NoDBTestCase):
def setUp(self):
super(TestRequestContextSerializer, self).setUp()
self.mock_base = mock.Mock()
self.ser = rpc.RequestContextSerializer(self.mock_base)
self.ser_null = rpc.RequestContextSerializer(None)
def test_serialize_entity(self):
self.mock_base.serialize_entity.return_value = 'foo'
ser_ent = self.ser.serialize_entity('context', 'entity')
self.mock_base.serialize_entity.assert_called_once_with('context',
'entity')
self.assertEqual('foo', ser_ent)
def test_serialize_entity_null_base(self):
ser_ent = self.ser_null.serialize_entity('context', 'entity')
self.assertEqual('entity', ser_ent)
def test_deserialize_entity(self):
self.mock_base.deserialize_entity.return_value = 'foo'
deser_ent = self.ser.deserialize_entity('context', 'entity')
self.mock_base.deserialize_entity.assert_called_once_with('context',
'entity')
self.assertEqual('foo', deser_ent)
def test_deserialize_entity_null_base(self):
deser_ent = self.ser_null.deserialize_entity('context', 'entity')
self.assertEqual('entity', deser_ent)
def test_serialize_context(self):
context = mock.Mock()
self.ser.serialize_context(context)
context.to_dict.assert_called_once_with()
@mock.patch.object(context, 'RequestContext')
def test_deserialize_context(self, mock_req):
self.ser.deserialize_context('context')
mock_req.from_dict.assert_called_once_with('context')
class TestProfilerRequestContextSerializer(test.NoDBTestCase):
def setUp(self):
super(TestProfilerRequestContextSerializer, self).setUp()
self.ser = rpc.ProfilerRequestContextSerializer(mock.Mock())
@mock.patch('nova.rpc.profiler')
def test_serialize_context(self, mock_profiler):
prof = mock_profiler.get.return_value
prof.hmac_key = 'swordfish'
prof.get_base_id.return_value = 'baseid'
prof.get_id.return_value = 'parentid'
context = mock.Mock()
context.to_dict.return_value = {'project_id': 'test'}
self.assertEqual({'project_id': 'test',
'trace_info': {
'hmac_key': 'swordfish',
'base_id': 'baseid',
'parent_id': 'parentid'}},
self.ser.serialize_context(context))
@mock.patch('nova.rpc.profiler')
def test_deserialize_context(self, mock_profiler):
serialized = {'project_id': 'test',
'trace_info': {
'hmac_key': 'swordfish',
'base_id': 'baseid',
'parent_id': 'parentid'}}
context = self.ser.deserialize_context(serialized)
self.assertEqual('test', context.project_id)
mock_profiler.init.assert_called_once_with(
hmac_key='swordfish', base_id='baseid', parent_id='parentid')
class TestClientRouter(test.NoDBTestCase):
@mock.patch('oslo_messaging.RPCClient')
def test_by_instance(self, mock_rpcclient):
default_client = mock.Mock()
cell_client = mock.Mock()
mock_rpcclient.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = mock.sentinel.transport
router = rpc.ClientRouter(default_client)
client = router.client(ctxt)
# verify a client was created by ClientRouter
mock_rpcclient.assert_called_once_with(
mock.sentinel.transport, default_client.target,
version_cap=default_client.version_cap,
serializer=default_client.serializer)
# verify cell client was returned
self.assertEqual(cell_client, client)
@mock.patch('oslo_messaging.RPCClient')
def test_by_instance_untargeted(self, mock_rpcclient):
default_client = mock.Mock()
cell_client = mock.Mock()
mock_rpcclient.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = None
router = rpc.ClientRouter(default_client)
client = router.client(ctxt)
self.assertEqual(router.default_client, client)
self.assertFalse(mock_rpcclient.called)
class TestIsNotificationsEnabledDecorator(test.NoDBTestCase):
def setUp(self):
super(TestIsNotificationsEnabledDecorator, self).setUp()
self.f = mock.Mock()
self.f.__name__ = 'f'
self.decorated = rpc.if_notifications_enabled(self.f)
def test_call_func_if_needed(self):
self.decorated()
self.f.assert_called_once_with()
@mock.patch('nova.rpc.NOTIFIER.is_enabled', return_value=False)
def test_not_call_func_if_notifier_disabled(self, mock_is_enabled):
self.decorated()
self.assertEqual(0, len(self.f.mock_calls))
def test_not_call_func_if_only_unversioned_notifications_requested(self):
self.flags(notification_format='unversioned', group='notifications')
self.decorated()
self.assertEqual(0, len(self.f.mock_calls))
|
|
"""Test the bootstrapping."""
# pylint: disable=too-many-public-methods,protected-access
from unittest import mock
import threading
import logging
import voluptuous as vol
from homeassistant import bootstrap, loader
import homeassistant.util.dt as dt_util
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA
from tests.common import \
get_test_home_assistant, MockModule, MockPlatform, \
assert_setup_component, patch_yaml_files
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
_LOGGER = logging.getLogger(__name__)
class TestBootstrap:
"""Test the bootstrap utils."""
hass = None
backup_cache = None
# pylint: disable=invalid-name, no-self-use
def setup_method(self, method):
"""Setup the test."""
self.backup_cache = loader._COMPONENT_CACHE
if method == self.test_from_config_file:
return
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Clean up."""
if method == self.test_from_config_file:
return
dt_util.DEFAULT_TIME_ZONE = ORIG_TIMEZONE
self.hass.stop()
loader._COMPONENT_CACHE = self.backup_cache
@mock.patch(
# prevent .HA_VERISON file from being written
'homeassistant.bootstrap.conf_util.process_ha_config_upgrade',
autospec=True)
@mock.patch('homeassistant.util.location.detect_location_info',
autospec=True, return_value=None)
def test_from_config_file(self, mock_upgrade, mock_detect):
"""Test with configuration file."""
components = ['browser', 'conversation', 'script']
files = {
'config.yaml': ''.join(
'{}:\n'.format(comp)
for comp in components
)
}
with mock.patch('os.path.isfile', mock.Mock(return_value=True)), \
mock.patch('os.access', mock.Mock(return_value=True)), \
patch_yaml_files(files, True):
self.hass = bootstrap.from_config_file('config.yaml')
components.append('group')
assert sorted(components) == sorted(self.hass.config.components)
def test_handle_setup_circular_dependency(self):
"""Test the setup of circular dependencies."""
loader.set_component('comp_b', MockModule('comp_b', ['comp_a']))
def setup_a(hass, config):
"""Setup the another component."""
bootstrap.setup_component(hass, 'comp_b')
return True
loader.set_component('comp_a', MockModule('comp_a', setup=setup_a))
bootstrap.setup_component(self.hass, 'comp_a')
assert ['comp_a'] == self.hass.config.components
def test_validate_component_config(self):
"""Test validating component configuration."""
config_schema = vol.Schema({
'comp_conf': {
'hello': str
}
}, required=True)
loader.set_component(
'comp_conf', MockModule('comp_conf', config_schema=config_schema))
with assert_setup_component(0):
assert not bootstrap.setup_component(self.hass, 'comp_conf', {})
with assert_setup_component(0):
assert not bootstrap.setup_component(self.hass, 'comp_conf', {
'comp_conf': None
})
with assert_setup_component(0):
assert not bootstrap.setup_component(self.hass, 'comp_conf', {
'comp_conf': {}
})
with assert_setup_component(0):
assert not bootstrap.setup_component(self.hass, 'comp_conf', {
'comp_conf': {
'hello': 'world',
'invalid': 'extra',
}
})
with assert_setup_component(1):
assert bootstrap.setup_component(self.hass, 'comp_conf', {
'comp_conf': {
'hello': 'world',
}
})
def test_validate_platform_config(self):
"""Test validating platform configuration."""
platform_schema = PLATFORM_SCHEMA.extend({
'hello': str,
})
loader.set_component(
'platform_conf',
MockModule('platform_conf', platform_schema=platform_schema))
loader.set_component(
'platform_conf.whatever', MockPlatform('whatever'))
with assert_setup_component(0):
assert bootstrap.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'hello': 'world',
'invalid': 'extra',
}
})
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert bootstrap.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
},
'platform_conf 2': {
'invalid': True
}
})
self.hass.config.components.remove('platform_conf')
with assert_setup_component(0):
assert bootstrap.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'not_existing',
'hello': 'world',
}
})
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert bootstrap.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
}
})
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert bootstrap.setup_component(self.hass, 'platform_conf', {
'platform_conf': [{
'platform': 'whatever',
'hello': 'world',
}]
})
self.hass.config.components.remove('platform_conf')
# Any falsey platform config will be ignored (None, {}, etc)
with assert_setup_component(0) as config:
assert bootstrap.setup_component(self.hass, 'platform_conf', {
'platform_conf': None
})
assert 'platform_conf' in self.hass.config.components
assert not config['platform_conf'] # empty
assert bootstrap.setup_component(self.hass, 'platform_conf', {
'platform_conf': {}
})
assert 'platform_conf' in self.hass.config.components
assert not config['platform_conf'] # empty
def test_component_not_found(self):
"""setup_component should not crash if component doesn't exist."""
assert not bootstrap.setup_component(self.hass, 'non_existing')
def test_component_not_double_initialized(self):
"""Test we do not setup a component twice."""
mock_setup = mock.MagicMock(return_value=True)
loader.set_component('comp', MockModule('comp', setup=mock_setup))
assert bootstrap.setup_component(self.hass, 'comp')
assert mock_setup.called
mock_setup.reset_mock()
assert bootstrap.setup_component(self.hass, 'comp')
assert not mock_setup.called
@mock.patch('homeassistant.util.package.install_package',
return_value=False)
def test_component_not_installed_if_requirement_fails(self, mock_install):
"""Component setup should fail if requirement can't install."""
self.hass.config.skip_pip = False
loader.set_component(
'comp', MockModule('comp', requirements=['package==0.0.1']))
assert not bootstrap.setup_component(self.hass, 'comp')
assert 'comp' not in self.hass.config.components
def test_component_not_setup_twice_if_loaded_during_other_setup(self):
"""Test component setup while waiting for lock is not setup twice."""
loader.set_component('comp', MockModule('comp'))
result = []
def setup_component():
"""Setup the component."""
result.append(bootstrap.setup_component(self.hass, 'comp'))
thread = threading.Thread(target=setup_component)
thread.start()
self.hass.config.components.append('comp')
thread.join()
assert len(result) == 1
assert result[0]
def test_component_not_setup_missing_dependencies(self):
"""Test we do not setup a component if not all dependencies loaded."""
deps = ['non_existing']
loader.set_component('comp', MockModule('comp', dependencies=deps))
assert not bootstrap.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
loader.set_component('non_existing', MockModule('non_existing'))
assert bootstrap.setup_component(self.hass, 'comp', {})
def test_component_failing_setup(self):
"""Test component that fails setup."""
loader.set_component(
'comp', MockModule('comp', setup=lambda hass, config: False))
assert not bootstrap.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
def test_component_exception_setup(self):
"""Test component that raises exception during setup."""
def exception_setup(hass, config):
"""Setup that raises exception."""
raise Exception('fail!')
loader.set_component('comp', MockModule('comp', setup=exception_setup))
assert not bootstrap.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
def test_home_assistant_core_config_validation(self):
"""Test if we pass in wrong information for HA conf."""
# Extensive HA conf validation testing is done in test_config.py
assert None is bootstrap.from_config_dict({
'homeassistant': {
'latitude': 'some string'
}
})
def test_component_setup_with_validation_and_dependency(self):
"""Test all config is passed to dependencies."""
def config_check_setup(hass, config):
"""Setup method that tests config is passed in."""
if config.get('comp_a', {}).get('valid', False):
return True
raise Exception('Config not passed in: {}'.format(config))
loader.set_component('comp_a',
MockModule('comp_a', setup=config_check_setup))
loader.set_component('switch.platform_a', MockPlatform('comp_b',
['comp_a']))
bootstrap.setup_component(self.hass, 'switch', {
'comp_a': {
'valid': True
},
'switch': {
'platform': 'platform_a',
}
})
assert 'comp_a' in self.hass.config.components
def test_platform_specific_config_validation(self):
"""Test platform that specifies config."""
platform_schema = PLATFORM_SCHEMA.extend({
'valid': True,
}, extra=vol.PREVENT_EXTRA)
mock_setup = mock.MagicMock(spec_set=True)
loader.set_component(
'switch.platform_a',
MockPlatform(platform_schema=platform_schema,
setup_platform=mock_setup))
with assert_setup_component(0):
assert bootstrap.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'invalid': True
}
})
assert mock_setup.call_count == 0
self.hass.config.components.remove('switch')
with assert_setup_component(0):
assert bootstrap.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'valid': True,
'invalid_extra': True,
}
})
assert mock_setup.call_count == 0
self.hass.config.components.remove('switch')
with assert_setup_component(1):
assert bootstrap.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'valid': True
}
})
assert mock_setup.call_count == 1
def test_disable_component_if_invalid_return(self):
"""Test disabling component if invalid return."""
loader.set_component(
'disabled_component',
MockModule('disabled_component', setup=lambda hass, config: None))
assert not bootstrap.setup_component(self.hass, 'disabled_component')
assert loader.get_component('disabled_component') is None
assert 'disabled_component' not in self.hass.config.components
loader.set_component(
'disabled_component',
MockModule('disabled_component', setup=lambda hass, config: False))
assert not bootstrap.setup_component(self.hass, 'disabled_component')
assert loader.get_component('disabled_component') is not None
assert 'disabled_component' not in self.hass.config.components
loader.set_component(
'disabled_component',
MockModule('disabled_component', setup=lambda hass, config: True))
assert bootstrap.setup_component(self.hass, 'disabled_component')
assert loader.get_component('disabled_component') is not None
assert 'disabled_component' in self.hass.config.components
|
|
#!/usr/bin/python
"""
Copyright 2013 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import syndicate.log
import syndicate.conf as conf
import syndicate.common.api as api
import shutil
import json
log = syndicate.log.log
sys.path.append("../clients/syntool")
import syntool
TEST_ADMIN_ID = "[email protected]"
TEST_CONFIG = ("""
[syndicate]
MSAPI=http://localhost:8080/api
user_id=%s
gateway_keys=gateway_keys/
volume_keys=volume_keys/
user_keys=user_keys/
""" % TEST_ADMIN_ID).strip()
TEST_ADMIN_PKEY_PATH = "./user_test_key.pem"
USER_ATTRS = ["email", "openid_url", "max_UGs", "max_RGs", "max_AGs", "max_volumes", "signing_public_key", "verifying_public_key"]
CONFIG_PATH = None
#-------------------------------
class Suite:
def __init__(self, name, description):
self.name = name
self.description = description
def __call__(self, func):
def inner(*args, **kw):
log.info("--- Suite: %s ---" % self.name)
return func(*args, **kw)
inner.__name__ = func.__name__
inner.name = self.name
inner.description = self.description
inner.test_suite = True
return inner
#-------------------------------
def result( fmt ):
print "[RESULT] %s" % fmt
#-------------------------------
def write_config( base_dir, config_text ):
config_path = os.path.join( base_dir, "syndicate.conf" )
fd = open( config_path, "w" )
fd.write( config_text )
fd.close()
def build_argv( *args, **kw ):
return list( [str(a) for a in args] ) + list( [str(k) + "=" + str(v) for (k, v) in kw.items()] )
def test( method_name, *args, **kw ):
syntool_args = ['-c', CONFIG_PATH]
if kw.has_key("syntool_args"):
syntool_args.append( kw['syntool_args'] )
argv = ['syntool.py'] + syntool_args + [method_name] + build_argv( *args, **kw )
log.info( "Test '%s" % " ".join(argv) )
return syntool.main( argv )
def load_config( conf_file ):
argv = ['syntool.py', '-c', conf_file, "test"]
config = syntool.load_options( argv )
return config
def setup( test_config_text, test_user_id, test_user_pkey_path, testdir=None ):
global CONFIG_PATH
# set up a test directory
if testdir == None:
testdir = os.tempnam( "/tmp", "test-syntool-" )
os.mkdir( testdir )
write_config( testdir, test_config_text )
log.info("Test config in %s" % testdir )
config_path = os.path.join( testdir, "syndicate.conf" )
config = load_config( config_path )
# copy over our test admin key
shutil.copyfile( test_user_pkey_path, conf.object_signing_key_filename( config, "user", test_user_id ) )
CONFIG_PATH = config_path
return config
def abort( message, exception=None, cleanup=False ):
log.error(message)
if exception != None:
log.exception(exception)
if cleanup:
pass
sys.exit(1)
def check_result_present( result, required_keys ):
for key in required_keys:
assert result.has_key(key), "Missing key: %s" % key
result_str = str(result[key])
if len(result_str) > 50:
result_str = result_str[:50]
result(" %s == %s" % (key, result_str) )
def check_result_absent( result, absent_keys ):
for key in absent_keys:
assert not result.has_key( key ), "Present key: %s" % key
#-------------------------------
@Suite("create_user_bad_args", "Test creating users with missing or invalid arguments")
def test_create_user_bad_args():
# test no arguments
try:
log.info("Create user: Missing arguments")
ret = test( "create_user", "[email protected]" )
abort("Created user with missing arguments")
except Exception, e:
result( e.message )
pass
print ""
# test invalid arguments
try:
log.info("Create user: Invalid arguments")
ret = test( "create_user", "not an email", "http://www.vicci.org/id/foo", "MAKE_SIGNING_KEY" )
abort("Created user with invalid email address")
except Exception, e:
result( e.message )
pass
print ""
return True
#-------------------------------
@Suite("create_users", "Try creating admins and regular users, ensuring that admins can create users; users cannot create admins; duplicate users are forbidden.")
def test_create_users():
# make a normal user
try:
log.info("Create normal user")
ret = test( "create_user", "[email protected]", "http://www.vicci.org/id/[email protected]", "MAKE_SIGNING_KEY" )
check_result_present( ret, USER_ATTRS )
except Exception, e:
abort("Failed to create [email protected]", exception=e )
print ""
# make another normal user
try:
log.info("Create another normal user")
ret = test( "create_user", "[email protected]", "http://www.vicci.org/id/[email protected]", "MAKE_SIGNING_KEY" )
check_result_present( ret, USER_ATTRS )
except Exception, e:
abort("Failed to create [email protected]", exception=e )
print ""
# verify that we can't overwrite a user
try:
log.info("Create a user that already exists")
ret = test( "create_user", "[email protected]", "http://www.vicci.org/id/[email protected]", "MAKE_SIGNING_KEY" )
abort("Created a user over itself")
except Exception, e:
result(e.message)
pass
print ""
# try to create an admin
try:
log.info("Create admin by admin")
ret = test( "create_user", "[email protected]", "http://www.vicci.org/id/[email protected]", "MAKE_SIGNING_KEY" )
check_result_present( ret, USER_ATTRS )
except Exception, e:
abort("Failed to create admin [email protected]", exception=e )
print ""
# try to create an admin from a normal user (should fail)
try:
log.info("Unprivileged create admin")
ret = test("create_user", "[email protected]", "http://www.vicci.org/id/[email protected]", "MAKE_SIGNING_KEY", syntool_args=['-u', '[email protected]'], is_admin=True)
abort("Unprivileged user created an admin account")
except Exception, e:
result(e.message)
pass
print ""
#-------------------------------
@Suite("read_users", "Test reading users. Ensure the admin can read anyone, but a user can only read its account.")
def test_read_users():
# admin can read all users
try:
log.info("Read user as admin")
ret = test( "read_user", "[email protected]" )
check_result_present( ret, USER_ATTRS )
except Exception, e:
abort("Admin failed to read user [email protected]", exception=e )
print ""
# user can read himself
try:
log.info("Read user's own account")
ret = test( "read_user", "[email protected]", syntool_args=['-u', '[email protected]'] )
check_result_present( ret, USER_ATTRS )
except Exception, e:
abort("User failed to read its account", exception=e )
print ""
# user cannot read another account
try:
log.info("Read admin as user")
ret = test( "read_user", "[email protected]" )
abort("Unprivileged user read another account")
except Exception, e:
result(e.message)
pass
print ""
# cannot read users that don't exist
try:
log.info("Read nonexistent user")
ret = test( "read_user", "[email protected]" )
abort("Got data for a nonexistent account")
except Exception, e:
result(e.message)
pass
print ""
#-------------------------------
@Suite("update_users", "Test updating users. Ensure the admin can update anyone, but a user can only update itself.")
def test_update_users():
pass
#-------------------------------
@Suite("delete_users", "Test deleting users. Ensure the admin can delete anyone, but a user can only delete itself. Also, verify that keys are revoked.")
def test_delete_users():
# user can't delete an admin
try:
log.info("Delete admin with user account")
ret = test( "delete_user", "[email protected]", syntool_args=['-u', '[email protected]'] )
abort("Unprivileged user deleted an admin")
except Exception, e:
log.info(e.message)
pass
# user can delete itself
try:
log.info("User deletes itself")
ret = test("delete_user", "[email protected]", syntool_args=['-u', '[email protected]'] )
assert ret, "delete_user failed"
except Exception, e:
abort("User failed to delete itself", exception=e)
# admin can delete any user
try:
log.info("Admin deletes anyone")
ret = test("delete_user", "[email protected]" )
assert ret, "delete_user failed"
except Exception, e:
abort("Admin failed to delete another user", exception=e)
#-------------------------------
def get_test_suites( suite_list=None ):
# get all test suites
if suite_list == None:
suite_list = globals().keys()
suites = []
for suite_name in globals().keys():
obj = globals().get(suite_name, None)
if obj == None:
continue
if hasattr(obj, "test_suite"):
if obj.name in suite_list:
suites.append( obj )
return suites
#-------------------------------
def main( suites, test_config_dir=None ):
config = setup( TEST_CONFIG, TEST_ADMIN_ID, TEST_ADMIN_PKEY_PATH, testdir=test_config_dir )
if len(suites) == 0:
# run all suites
suite_funcs = get_test_suites()
suites = [sf.name for sf in suite_funcs]
if suites[0] == "list_suites":
suite_funcs = get_test_suites()
print "All test suites:"
print "\n".join( [" " + s.name + "\n " + s.description for s in suite_funcs] )
sys.exit(0)
else:
# run the suites
suite_funcs = get_test_suites( suites )
log.info("Run tests %s" % [sf.name for sf in suite_funcs])
for sf in suite_funcs:
sf()
#-------------------------------
if __name__ == "__main__":
test_dir = None
if len(sys.argv) == 1:
print "Usage: %s [-d testdir] suite [suite...]"
sys.exit(1)
suites = sys.argv[1:]
# get suites and test config directory
if len(sys.argv) > 2:
if sys.argv[1] == '-d':
test_dir = sys.argv[2]
suites = sys.argv[3:]
main( suites, test_config_dir=test_dir )
|
|
import os
import os.path as op
import shutil
import logging
from . import conf, errors, helper
logger = logging.getLogger(__name__)
names_rows_stability = [
['dg', 1], # totalEnergy
['backbone_hbond', 2],
['sidechain_hbond', 3],
['van_der_waals', 4],
['electrostatics', 5],
['solvation_polar', 6],
['solvation_hydrophobic', 7],
['van_der_waals_clashes', 8],
['entropy_sidechain', 9],
['entropy_mainchain', 10],
['sloop_entropy', 11],
['mloop_entropy', 12],
['cis_bond', 13],
['torsional_clash', 14],
['backbone_clash', 15],
['helix_dipole', 16],
['water_bridge', 17],
['disulfide', 18],
['electrostatic_kon', 19],
['partial_covalent_bonds', 20],
['energy_ionisation', 21],
['entropy_complex', 22],
['number_of_residues', 23]
]
names_stability_wt = (
[name + '_wt' for name in list(zip(*names_rows_stability))[0][:-1]] +
['number_of_residues'])
names_stability_mut = (
[name + '_mut' for name in list(zip(*names_rows_stability))[0][:-1]] +
['number_of_residues'])
names_rows_stability_complex = (
[['intraclashes_energy_1', 3], ['intraclashes_energy_2', 4], ] +
[[x[0], x[1] + 4] for x in names_rows_stability]
)
names_stability_complex_wt = (
[name + '_wt' for name in list(zip(*names_rows_stability_complex))[0][:-1]] +
['number_of_residues'])
names_stability_complex_mut = (
[name + '_mut' for name in list(zip(*names_rows_stability_complex))[0][:-1]] +
['number_of_residues'])
class FoldX(object):
def __init__(self, pdb_file, chain_id, foldx_dir=None):
"""
"""
self.pdb_filename = op.basename(pdb_file)
self.chain_id = chain_id
if foldx_dir is None:
self.foldx_dir = conf.CONFIGS['foldx_dir']
else:
self.foldx_dir = foldx_dir
self.foldx_runfile = op.join(self.foldx_dir, 'runfile_FoldX.txt')
def __call__(self, whatToRun, mutCodes=[]):
"""
Select which action should be performed by FoldX by setting `whatToRun`.
Possible values are:
- AnalyseComplex
- Stability
- RepairPDB
- BuildModel
See the `FoldX manual`_ for an explanation on what they do.
.. _FoldX manual: http://foldx.crg.es/manual3.jsp
"""
logger.debug('Running FoldX {}'.format(whatToRun))
self.__write_runfile(self.pdb_filename, self.chain_id, whatToRun, mutCodes)
self.__run_runfile()
if whatToRun == 'AnalyseComplex':
return self.__read_result(
op.join(self.foldx_dir, 'Interaction_AnalyseComplex_resultFile.txt'), whatToRun)
elif whatToRun == 'Stability':
return self.__read_result(
op.join(self.foldx_dir, 'Stability.txt'), whatToRun)
elif whatToRun == 'RepairPDB':
return op.join(self.foldx_dir, 'RepairPDB_' + self.pdb_filename)
elif whatToRun == 'BuildModel':
# see the FoldX manual for the naming of the generated structures
if conf.CONFIGS['foldx_num_of_runs'] == 1:
mutants = [op.join(self.foldx_dir, self.pdb_filename[:-4] + '_1.pdb'), ]
wiltype = [op.join(self.foldx_dir, 'WT_' + self.pdb_filename[:-4] + '_1.pdb'), ]
results = [wiltype, mutants]
else:
mutants = [
op.join(self.foldx_dir, self.pdb_filename[:-4] + '_1_' + str(x) + '.pdb')
for x in range(0, conf.CONFIGS['foldx_num_of_runs'])
]
wiltype = [
op.join(
self.foldx_dir,
'WT_' + self.pdb_filename[:-4] + '_1_' + str(x) + '.pdb')
for x in range(0, conf.CONFIGS['foldx_num_of_runs'])
]
results = [wiltype, mutants]
return results
def __write_runfile(self, pdbFile, chainID, whatToRun, mutCodes):
if whatToRun == 'AnalyseComplex':
copy_filename = 'run-analyseComplex.txt'
command_line = '<AnalyseComplex>AnalyseComplex_resultFile.txt,{chainID};'\
.format(chainID=chainID)
output_pdb = 'false'
elif whatToRun == 'Stability':
copy_filename = 'run-stability.txt'
command_line = '<Stability>Stability.txt;'
output_pdb = 'false'
elif whatToRun == 'RepairPDB':
copy_filename = 'run-repair.txt'
command_line = '<RepairPDB>#;'
output_pdb = 'true'
elif whatToRun == 'BuildModel':
copy_filename = 'run-build.txt'
# file_with_mutations = 'mutant_file.txt'
file_with_mutations = 'individual_list.txt'
with open(op.join(self.foldx_dir, file_with_mutations), 'w') as fh:
fh.writelines(','.join(mutCodes) + ';\n')
command_line = '<BuildModel>BuildModel,{file_with_mutations};'\
.format(file_with_mutations=file_with_mutations)
output_pdb = 'true'
foldX_runfile = (
'<TITLE>FOLDX_runscript;\n'
'<JOBSTART>#;\n'
'<PDBS>{pdbFile};\n'
'<BATCH>#;\n'
'<COMMANDS>FOLDX_commandfile;\n'
'{command_line}\n'
'<END>#;\n'
'<OPTIONS>FOLDX_optionfile;\n'
'<Temperature>298;\n'
'<R>#;\n'
'<pH>7;\n'
'<IonStrength>0.050;\n'
'<numberOfRuns>{buildModel_runs};\n'
'<water>{water};\n'
'<metal>-CRYSTAL;\n'
'<VdWDesign>2;\n'
'<pdb_waters>false;\n'
'<OutPDB>{output_pdb};\n'
'<pdb_hydrogens>false;\n'
'<END>#;\n'
'<JOBEND>#;\n'
'<ENDFILE>#;\n').replace(' ', '').format(
pdbFile=pdbFile,
command_line=command_line,
buildModel_runs=conf.CONFIGS['foldx_num_of_runs'],
water=conf.CONFIGS['foldx_water'],
output_pdb=output_pdb)
# This just makes copies of the runfiles for debugging...
with open(self.foldx_runfile, 'w') as f:
f.write(foldX_runfile)
shutil.copy(self.foldx_runfile, op.join(self.foldx_dir, copy_filename))
def __run_runfile(self):
""".
.. todo:: Add a fallback plan using libfaketime.
"""
import faketime.config
# system_command = './FoldX.linux64 -runfile ' + self.foldx_runfile
system_command = "foldx -runfile '{}'".format(self.foldx_runfile)
logger.debug("FoldX system command: '{}'".format(system_command))
env = os.environ.copy()
env['LD_PRELOAD'] = faketime.config.libfaketime_so_file
env['FAKETIME'] = '2015-12-26 00:00:00'
p = helper.run(system_command, cwd=self.foldx_dir, env=env)
if p.stderr.strip():
logger.debug('foldx result:\n{}'.format(p.stdout.strip()))
logger.error('foldx error message:\n{}'.format(p.stderr.strip()))
if 'Cannot allocate memory' in p.stderr:
raise errors.ResourceError(p.stderr)
if 'There was a problem' in p.stdout:
logger.error('foldx result:\n{}'.format(p.stdout.strip()))
if 'Specified residue not found.' in p.stdout:
raise errors.MutationMismatchError()
def __read_result(self, outFile, whatToRead):
with open(outFile, 'r') as f:
lines = f.readlines()
line = lines[-1].split('\t')
if whatToRead == 'BuildModel':
total_energy_difference = line[1]
return total_energy_difference
if whatToRead == 'Stability':
stability_values = [
line[x[1]].strip() for x in names_rows_stability
]
return stability_values
if whatToRead == 'AnalyseComplex':
complex_stability_values = [
line[x[1]].strip() for x in names_rows_stability_complex
]
return complex_stability_values
|
|
'''eventgui.py -- gui for programstalta.py
usage: python eventgui.py [options]
options:
-h print this
'''
version = "0.55"
lastchangedate = "2014-12-14"
import sys
import getopt
import posixpath as pp
import pprint
from Tkinter import *
# from ttk import *
import tkFont
import tkFileDialog
import tkMessageBox
import base
from serialports import serialports
from logger import log
from programstalta import main as pgmmain
from programstalta import version as pgmversion
def force_suffix(fname, suffix):
"""won't suffix a directory.
second argument should not start with a period."""
head, tail = pp.split(fname)
if len(tail) == 0:
return head
if suffix[0] == ".":
suffix = suffix[1:]
fpart, fext = pp.splitext(tail)
newp = pp.join(head, fpart + "." + suffix)
return pp.normpath(newp)
class App(Frame):
def __init__(self, master):
Frame.__init__(self, master)
self.master = master
self.truedatafile = ""
self.statefile = ""
self.isrunning = False
self.entf = tkFont.Font(size = 14)
self.lblf = tkFont.Font(size = 14)
self.btnf = tkFont.Font(size = 12, slant = tkFont.ITALIC,
weight = tkFont.BOLD)
self.opnf = tkFont.Font(size = 14)
self.bgcolor = "#EEEEEE"
self.master.config(bg = self.bgcolor)
self.option_add("*background", self.bgcolor)
self.option_add("*Label*font", self.entf)
self.option_add("*Entry*font", self.entf)
self.option_add("*Button*font", self.btnf)
self.option_add("*OptionMenu*font", self.opnf)
self.option_add("*Button*background", "#C3C3C3")
self.option_add("*Checkbutton*background", "#D0D0D0")
self.option_add("*OptionMenu*background", "#C3C3C3")
row = 0
row += 1
Label(master, text = "processing parameters", font = self.lblf,
relief = RAISED, width = 30,
).grid(row = row, column = 3, columnspan = 2, sticky = E + W)
Label(master, text = " ").grid(row = row, column = 6, sticky = W,
padx = 50)
row += 1
Label(master, text = "Tsta").grid(row = row, column = 3, sticky = E)
self.Tsta = StringVar()
self.Tsta.set("0.25")
Entry(master, width = 10, textvariable = self.Tsta
).grid(row = row, column = 4, sticky = W)
Label(master, text = "short time average window"
).grid(row = row, column = 5, sticky = W)
Label(master, text = "secs").grid(row = row, column = 6, sticky = W)
row += 1
Label(master, text = "Tlta").grid(row = row, column = 3, sticky = E)
self.Tlta = StringVar()
self.Tlta.set("90.0")
Entry(master, width = 10, textvariable = self.Tlta
).grid(row = row, column = 4, sticky = W)
Label(master, text = "long time average window"
).grid(row = row, column = 5, sticky = W)
Label(master, text = "secs").grid(row = row, column = 6, sticky = W)
row += 1
Label(master, text = "Trigger").grid(row = row, column = 3, sticky = E)
self.Triggerthreshold = StringVar()
self.Triggerthreshold.set("5.0")
Entry(master, width = 10, textvariable = self.Triggerthreshold
).grid(row = row, column = 4, sticky = W)
Label(master, text = "sta/lta trigger level"
).grid(row = row, column = 5, sticky = W)
Label(master, text = "ratio").grid(row = row, column = 6, sticky = W)
row += 1
Label(master, text = "Detrigger").grid(row = row,
column = 3, sticky = E)
self.Detriggerthreshold = StringVar()
self.Detriggerthreshold.set("2.0")
Entry(master, width = 10, textvariable = self.Detriggerthreshold
).grid(row = row, column = 4, sticky = W)
Label(master, text = "sta/lta de-trigger level"
).grid(row = row, column = 5, sticky = W)
Label(master, text = "ratio").grid(row = row, column = 6, sticky = W)
row += 1
Label(master, text = "Trigduration"
).grid(row = row, column = 3, sticky = E)
self.Trigduration = StringVar()
self.Trigduration.set("30.0")
Entry(master, width = 10, textvariable = self.Trigduration,
).grid(row = row, column = 4, sticky = W)
Label(master, text = "post-trigger event duration"
).grid(row = row, column = 5, sticky = W)
Label(master, text = "secs").grid(row = row, column = 6, sticky = W)
row += 1
Label(master, text = "Trigdesense"
).grid(row = row, column = 3, sticky = E)
self.Trigdsensetime = StringVar()
self.Trigdsensetime.set("0.0")
Entry(master, width = 10, textvariable = self.Trigdsensetime,
).grid(row = row, column = 4, sticky = W)
Label(master, text = "lta desense time scale"
).grid(row = row, column = 5, sticky = W)
Label(master, text = "secs").grid(row = row, column = 6, sticky = W)
row += 1
Label(master, text = " ").grid(row = row, column = 3, sticky = W)
row += 1
Label(master, text = "logging parameters", font = self.lblf,
relief = RAISED, width = 30,
).grid(row = row, column = 3, columnspan = 2, sticky = E + W)
row += 1
Label(master, text = "Loglevel"
).grid(row = row, column = 3, sticky = E)
self.Loglevelsel = StringVar()
self.Loglevelsel.set("debug")
self.llb = OptionMenu(master, self.Loglevelsel,
"debug", "info", "warning", "error")
self.llb.grid(row = row, column = 4, sticky = W)
Label(master, text = "logging level"
).grid(row = row, column = 5, sticky = W)
row += 1
Label(master, text = "Logfile"
).grid(row = row, column = 3, sticky = E)
self.Logfile = StringVar()
self.Logfile.set("")
Entry(master, width = 10, textvariable = self.Logfile
).grid(row = row, column = 4, sticky = W)
Label(master, text = "log (txt) filename"
).grid(row = row, column = 5, sticky = W)
row += 1
self.Outfile = StringVar()
self.Outfile.set("")
self.Outshowfile = StringVar()
self.Outshowfile.set("")
Entry(master, width = 10, textvariable = self.Outshowfile
).grid(row = row, column = 4, sticky = W)
Label(master, text = "data (sac) filename"
).grid(row = row, column = 5, sticky = W)
Button(master, text = "specify output file", command = self.OnOutBrowse,
).grid(row = row, column = 3, sticky = E + W, padx = 4)
row += 1
Label(master, text = "Eventfile"
).grid(row = row, column = 3, sticky = E)
self.Eventfile = StringVar()
self.Eventfile.set("")
Entry(master, width = 10, textvariable = self.Eventfile
).grid(row = row, column = 4, sticky = W)
Label(master, text = "event (xlsx) filename"
).grid(row = row, column = 5, sticky = W)
row += 1
Label(master, text = " ").grid(row = row, column = 3, sticky = W)
row += 1
Label(master, text = "control parameters", font = self.lblf,
relief = RAISED, width = 30,
).grid(row = row, column = 3, columnspan = 2, sticky = E + W)
row += 1
Label(master, text = "Jobduration"
).grid(row = row, column = 3, sticky = E)
self.Jobduration = StringVar()
self.Jobduration.set("")
Entry(master, width = 10, textvariable = self.Jobduration
).grid(row = row, column = 4, sticky = W)
Label(master, text = "acquisition duration"
).grid(row = row, column = 5, sticky = W)
Label(master, text = "secs").grid(row = row, column = 6, sticky = W)
row += 1
self.Doalarm = IntVar()
Checkbutton(master, text = "event alarm", variable = self.Doalarm,
font = self.entf).grid(row = row, column = 3, sticky = W)
self.Alarmduration = StringVar()
self.Alarmduration.set("2.0")
Entry(master, width = 10, textvariable = self.Alarmduration
).grid(row = row, column = 4, sticky = W)
Label(master, text = "alarm duration"
).grid(row = row, column = 5, sticky = W)
Label(master, text = "secs").grid(row = row, column = 6, sticky = W)
row += 1
Label(master, text = " ").grid(row = row, column = 3, sticky = W)
row += 1
Label(master, text = "data source", font = self.lblf,
relief = RAISED, width = 30,
).grid(row = row, column = 3, columnspan = 2, sticky = E + W)
row += 1
self.Comcheck = IntVar()
Checkbutton(master, text = "use comport", variable = self.Comcheck,
font = self.entf,
).grid(row = row, column = 3, sticky = W)
comportlist = []
for name, desc, hwid in serialports():
comportlist.append(name)
if len(comportlist) == 0:
comportlist = ["-none-", ]
self.comport = StringVar()
self.comport.set(comportlist[0])
self.ports = OptionMenu(master, self.comport, *comportlist)
self.ports.grid(row = row, column = 4, sticky = W)
Label(master, text = "active comport"
).grid(row = row, column = 5, sticky = W)
row += 1
self.datafile = StringVar()
self.datafile.set("")
self.truedatafile = StringVar()
self.truedatafile.set("")
Label(master, text = "input (sac) file"
).grid(row = row, column = 5, sticky = W)
Entry(master, textvariable = self.datafile,
).grid(row = row, column = 4, sticky = W)
Button(master, text = "select input file", command = self.OnBrowse,
).grid(row = row, column = 3, sticky = E + W, padx = 4)
row += 1
Label(master, text = " ").grid(row = row, column = 3, sticky = W)
row += 1
Label(master, text = "display control", font = self.lblf,
relief = RAISED, width = 30,
).grid(row = row, column = 3, columnspan = 2, sticky = E + W)
row += 1
self.doplot = IntVar()
Checkbutton(master, text = "plot results", variable = self.doplot,
font = self.entf,
).grid(row = row, column = 3, columnspan = 2, sticky = W)
row += 1
self.doplotavg = IntVar()
Checkbutton(master, text = "also plot running averages",
variable = self.doplotavg,
font = self.entf,
).grid(row = row, column = 3, columnspan = 2, sticky = W,
padx = 20)
row += 1
self.doploty = IntVar()
Checkbutton(master, text = "show trace subplot",
variable = self.doploty,
font = self.entf,
).grid(row = row, column = 3, columnspan = 2, sticky = W,
padx = 20)
row += 1
self.doploth = IntVar()
Checkbutton(master, text = "plot histograms",
variable = self.doploth,
font = self.entf,
).grid(row = row, column = 3, columnspan = 2, sticky = W,
padx = 20)
row += 1
self.showcommand = IntVar()
Checkbutton(master, text = "show command line (debug)",
variable = self.showcommand,
font = self.entf,
).grid(row = row, column = 3, columnspan = 2, sticky = W)
row += 1
Label(master, text = " ").grid(row = row, column = 3, sticky = W)
row += 1
col = 3
self.runb = Button(master, fg = "blue", text = "run",
command = self.OnRun)
self.runb.grid(row = row, column = col, sticky = N)
col += 1
self.finishb = Button(master, fg = "magenta", text = "finish",
state = DISABLED,
command = self.OnFinish)
self.finishb.grid(row = row, column = col, sticky = N)
col += 1
Button(master, fg = "blue", text = "save", state = DISABLED,
command = self.saveState).grid(row = row,
column = col, sticky = N)
col += 1
Button(master, fg = "blue", text = "load", state = DISABLED,
command = self.loadState).grid(row = row,
column = col, sticky = W)
col += 1
Button(master, fg = "red", text = "quit",
command = self.OnQuit).grid(row = row, column = col, sticky = N)
col += 1
Label(master, fg = "red", text = " ",
).grid(row = row, column = col, sticky = W)
def OnRun(self):
args = [
"eventgui",
"-g",
"-S", self.Tsta.get(),
"-L", self.Tlta.get(),
"-T", self.Triggerthreshold.get(),
"-D", self.Detriggerthreshold.get(),
"-P", self.Trigduration.get(),
"-F", self.Trigdsensetime.get(),
"-A", self.Alarmduration.get(),
"-l", self.Loglevelsel.get(),
"-m",
]
if self.Logfile.get() != "":
args.extend(("-w", force_suffix(self.Logfile.get(), "txt")))
if self.Outfile.get() != "":
args.extend(("-s", force_suffix(self.Outfile.get(), "sac")))
elif self.Outshowfile.get() != "":
args.extend(("-s", force_suffix(self.Outshowfile.get(), "sac")))
if self.Eventfile.get() != "":
args.extend(("-e", force_suffix(self.Eventfile.get(), "xlsx")))
if (self.doplot.get() or self.doplotavg.get()
or self.doploty.get() or self.doploth.get()):
args.append("-p")
if self.doplotavg.get():
args.append("-r")
if self.doploty.get():
args.append("-y")
if self.doploth.get():
args.append("-d")
if not self.Doalarm.get():
args.append("-q")
if self.Comcheck.get() == 0:
args.append("-q")
if self.truedatafile.get() != "":
args.append(self.truedatafile.get())
elif self.datafile.get() != "":
args.append(self.datafile.get())
else:
tkMessageBox.showerror(title = "no data source",
message = "check 'use comport' or provide a data file")
return
else:
if self.comport.get() != "-none-":
args.extend(("-c", self.comport.get()))
if self.Jobduration.get() != "":
args.extend(("-i", self.Jobduration.get()))
else:
tkMessageBox.showerror(title = "no available serial port",
message = "you must choose a data file")
self.Comcheck.set(0)
return
if self.showcommand.get():
print >> sys.stderr, "--------command line-----------"
pprint.pprint(args, stream = sys.stderr)
print >> sys.stderr, "-------------------------------"
base.Globs["quitflag"] = False
base.Globs["finishflag"] = False
self.runb.config(state = DISABLED)
self.finishb.config(state = NORMAL)
self.isrunning = True
r = pgmmain(args)
self.isrunning = False
self.finishb.config(state = DISABLED)
self.runb.config(state = NORMAL)
if r != 0:
log().error("pgmmain returned %s" % r)
self.reallyquit()
if base.Globs["quitflag"]:
log().debug("quitting on global quitflag")
self.reallyquit()
base.Globs["quitflag"] = True
base.Globs["finishflag"] = True
def OnOutBrowse(self):
self.Outfile.set(tkFileDialog.asksaveasfilename(
filetypes = [('sac data file', '*.sac')]))
if self.Outfile.get() != "":
self.Outshowfile.set(pp.basename(self.Outfile.get()))
def OnBrowse(self):
self.truedatafile.set(tkFileDialog.askopenfilename())
if self.truedatafile.get() != "":
self.datafile.set(pp.basename(self.truedatafile.get()))
def loadState(self):
pass
def saveState(self):
pass
def reallyquit(self):
self.quit()
def OnFinish(self):
base.Globs["finishflag"] = True
def OnQuit(self):
if not self.isrunning:
self.reallyquit()
if base.Globs["quitflag"]:
self.reallyquit()
base.Globs["quitflag"] = True
def main(argv=None):
if argv is None:
argv = sys.argv
options = "h"
try:
try:
opts, datafiles = getopt.getopt(argv[1:], options, ["help"])
except getopt.error, msg:
raise Usage(msg)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__ + "\n\n" + version
sys.exit(0)
root = Tk()
app = App(root)
base.Globs["predatacallback"] = app.update
base.Globs["version"] = "%.2f" % (float(version) + float(pgmversion))
app.master.title("sta/lta event detection"
+ " version " + base.Globs["version"]
+ " "
+ " [gui " + version
+ " algorithm " + pgmversion + "]")
app.mainloop()
try:
root.destroy()
except:
pass
except Exception, e:
log().exception("gui error")
print >> sys.stderr, e
return 3
if __name__ == "__main__":
sys.exit(main())
|
|
import sys
import re
import os
import warnings
import glob
import numpy as np
import openmc
import openmc.checkvalue as cv
class StatePoint(object):
"""State information on a simulation at a certain point in time (at the end
of a given batch). Statepoints can be used to analyze tally results as well
as restart a simulation.
Parameters
----------
filename : str
Path to file to load
autolink : bool, optional
Whether to automatically link in metadata from a summary.h5 file and
stochastic volume calculation results from volume_*.h5 files. Defaults
to True.
Attributes
----------
cmfd_on : bool
Indicate whether CMFD is active
cmfd_balance : numpy.ndarray
Residual neutron balance for each batch
cmfd_dominance
Dominance ratio for each batch
cmfd_entropy : numpy.ndarray
Shannon entropy of CMFD fission source for each batch
cmfd_indices : numpy.ndarray
Number of CMFD mesh cells and energy groups. The first three indices
correspond to the x-, y-, and z- spatial directions and the fourth index
is the number of energy groups.
cmfd_srccmp : numpy.ndarray
Root-mean-square difference between OpenMC and CMFD fission source for
each batch
cmfd_src : numpy.ndarray
CMFD fission source distribution over all mesh cells and energy groups.
current_batch : int
Number of batches simulated
date_and_time : str
Date and time when simulation began
entropy : numpy.ndarray
Shannon entropy of fission source at each batch
gen_per_batch : Integral
Number of fission generations per batch
global_tallies : numpy.ndarray of compound datatype
Global tallies for k-effective estimates and leakage. The compound
datatype has fields 'name', 'sum', 'sum_sq', 'mean', and 'std_dev'.
k_combined : list
Combined estimator for k-effective and its uncertainty
k_col_abs : float
Cross-product of collision and absorption estimates of k-effective
k_col_tra : float
Cross-product of collision and tracklength estimates of k-effective
k_abs_tra : float
Cross-product of absorption and tracklength estimates of k-effective
k_generation : numpy.ndarray
Estimate of k-effective for each batch/generation
meshes : dict
Dictionary whose keys are mesh IDs and whose values are Mesh objects
n_batches : int
Number of batches
n_inactive : int
Number of inactive batches
n_particles : int
Number of particles per generation
n_realizations : int
Number of tally realizations
path : str
Working directory for simulation
run_mode : str
Simulation run mode, e.g. 'k-eigenvalue'
runtime : dict
Dictionary whose keys are strings describing various runtime metrics
and whose values are time values in seconds.
seed : int
Pseudorandom number generator seed
source : numpy.ndarray of compound datatype
Array of source sites. The compound datatype has fields 'wgt', 'xyz',
'uvw', and 'E' corresponding to the weight, position, direction, and
energy of the source site.
source_present : bool
Indicate whether source sites are present
sparse : bool
Whether or not the tallies uses SciPy's LIL sparse matrix format for
compressed data storage
tallies : dict
Dictionary whose keys are tally IDs and whose values are Tally objects
tallies_present : bool
Indicate whether user-defined tallies are present
version: tuple of Integral
Version of OpenMC
summary : None or openmc.Summary
A summary object if the statepoint has been linked with a summary file
"""
def __init__(self, filename, autolink=True):
import h5py
if h5py.__version__ == '2.6.0':
raise ImportError("h5py 2.6.0 has a known bug which makes it "
"incompatible with OpenMC's HDF5 files. "
"Please switch to a different version.")
self._f = h5py.File(filename, 'r')
# Ensure filetype and revision are correct
try:
if 'filetype' not in self._f or self._f[
'filetype'].value.decode() != 'statepoint':
raise IOError('{} is not a statepoint file.'.format(filename))
except AttributeError:
raise IOError('Could not read statepoint file. This most likely '
'means the statepoint file was produced by a '
'different version of OpenMC than the one you are '
'using.')
if self._f['revision'].value != 15:
raise IOError('Statepoint file has a file revision of {} '
'which is not consistent with the revision this '
'version of OpenMC expects ({}).'.format(
self._f['revision'].value, 15))
# Set flags for what data has been read
self._meshes_read = False
self._tallies_read = False
self._summary = None
self._global_tallies = None
self._sparse = False
# Automatically link in a summary file if one exists
if autolink:
path_summary = os.path.join(os.path.dirname(filename), 'summary.h5')
if os.path.exists(path_summary):
su = openmc.Summary(path_summary)
self.link_with_summary(su)
path_volume = os.path.join(os.path.dirname(filename), 'volume_*.h5')
for path_i in glob.glob(path_volume):
if re.search(r'volume_\d+\.h5', path_i):
vol = openmc.VolumeCalculation.from_hdf5(path_i)
self.add_volume_information(vol)
def close(self):
self._f.close()
@property
def cmfd_on(self):
return self._f['cmfd_on'].value > 0
@property
def cmfd_balance(self):
return self._f['cmfd/cmfd_balance'].value if self.cmfd_on else None
@property
def cmfd_dominance(self):
return self._f['cmfd/cmfd_dominance'].value if self.cmfd_on else None
@property
def cmfd_entropy(self):
return self._f['cmfd/cmfd_entropy'].value if self.cmfd_on else None
@property
def cmfd_indices(self):
return self._f['cmfd/indices'].value if self.cmfd_on else None
@property
def cmfd_src(self):
if self.cmfd_on:
data = self._f['cmfd/cmfd_src'].value
return np.reshape(data, tuple(self.cmfd_indices), order='F')
else:
return None
@property
def cmfd_srccmp(self):
return self._f['cmfd/cmfd_srccmp'].value if self.cmfd_on else None
@property
def current_batch(self):
return self._f['current_batch'].value
@property
def date_and_time(self):
return self._f['date_and_time'].value.decode()
@property
def entropy(self):
if self.run_mode == 'k-eigenvalue':
return self._f['entropy'].value
else:
return None
@property
def gen_per_batch(self):
if self.run_mode == 'k-eigenvalue':
return self._f['gen_per_batch'].value
else:
return None
@property
def global_tallies(self):
if self._global_tallies is None:
data = self._f['global_tallies'].value
gt = np.zeros_like(data, dtype=[
('name', 'a14'), ('sum', 'f8'), ('sum_sq', 'f8'),
('mean', 'f8'), ('std_dev', 'f8')])
gt['name'] = ['k-collision', 'k-absorption', 'k-tracklength',
'leakage']
gt['sum'] = data['sum']
gt['sum_sq'] = data['sum_sq']
# Calculate mean and sample standard deviation of mean
n = self.n_realizations
gt['mean'] = gt['sum']/n
gt['std_dev'] = np.sqrt((gt['sum_sq']/n - gt['mean']**2)/(n - 1))
self._global_tallies = gt
return self._global_tallies
@property
def k_cmfd(self):
if self.cmfd_on:
return self._f['cmfd/k_cmfd'].value
else:
return None
@property
def k_generation(self):
if self.run_mode == 'k-eigenvalue':
return self._f['k_generation'].value
else:
return None
@property
def k_combined(self):
if self.run_mode == 'k-eigenvalue':
return self._f['k_combined'].value
else:
return None
@property
def k_col_abs(self):
if self.run_mode == 'k-eigenvalue':
return self._f['k_col_abs'].value
else:
return None
@property
def k_col_tra(self):
if self.run_mode == 'k-eigenvalue':
return self._f['k_col_tra'].value
else:
return None
@property
def k_abs_tra(self):
if self.run_mode == 'k-eigenvalue':
return self._f['k_abs_tra'].value
else:
return None
@property
def meshes(self):
if not self._meshes_read:
# Initialize dictionaries for the Meshes
# Keys - Mesh IDs
# Values - Mesh objects
self._meshes = {}
# Read the number of Meshes
n_meshes = self._f['tallies/meshes/n_meshes'].value
# Read a list of the IDs for each Mesh
if n_meshes > 0:
# User-defined Mesh IDs
mesh_keys = self._f['tallies/meshes/keys'].value
else:
mesh_keys = []
# Build dictionary of Meshes
base = 'tallies/meshes/mesh '
# Iterate over all Meshes
for mesh_key in mesh_keys:
# Read the mesh type
mesh_type = self._f['{0}{1}/type'.format(base, mesh_key)].value.decode()
# Read the mesh dimensions, lower-left coordinates,
# upper-right coordinates, and width of each mesh cell
dimension = self._f['{0}{1}/dimension'.format(base, mesh_key)].value
lower_left = self._f['{0}{1}/lower_left'.format(base, mesh_key)].value
upper_right = self._f['{0}{1}/upper_right'.format(base, mesh_key)].value
width = self._f['{0}{1}/width'.format(base, mesh_key)].value
# Create the Mesh and assign properties to it
mesh = openmc.Mesh(mesh_key)
mesh.dimension = dimension
mesh.width = width
mesh.lower_left = lower_left
mesh.upper_right = upper_right
mesh.type = mesh_type
# Add mesh to the global dictionary of all Meshes
self._meshes[mesh_key] = mesh
self._meshes_read = True
return self._meshes
@property
def n_batches(self):
return self._f['n_batches'].value
@property
def n_inactive(self):
if self.run_mode == 'k-eigenvalue':
return self._f['n_inactive'].value
else:
return None
@property
def n_particles(self):
return self._f['n_particles'].value
@property
def n_realizations(self):
return self._f['n_realizations'].value
@property
def path(self):
return self._f['path'].value.decode()
@property
def run_mode(self):
return self._f['run_mode'].value.decode()
@property
def runtime(self):
return {name: dataset.value
for name, dataset in self._f['runtime'].items()}
@property
def seed(self):
return self._f['seed'].value
@property
def source(self):
return self._f['source_bank'].value if self.source_present else None
@property
def source_present(self):
return self._f['source_present'].value > 0
@property
def sparse(self):
return self._sparse
@property
def tallies(self):
if not self._tallies_read:
# Initialize dictionary for tallies
self._tallies = {}
# Read the number of tallies
n_tallies = self._f['tallies/n_tallies'].value
# Read a list of the IDs for each Tally
if n_tallies > 0:
# OpenMC Tally IDs (redefined internally from user definitions)
tally_keys = self._f['tallies/keys'].value
else:
tally_keys = []
base = 'tallies/tally '
# Iterate over all Tallies
for tally_key in tally_keys:
# Read the Tally size specifications
n_realizations = \
self._f['{0}{1}/n_realizations'.format(base, tally_key)].value
# Create Tally object and assign basic properties
tally = openmc.Tally(tally_id=tally_key)
tally._sp_filename = self._f.filename
tally.estimator = self._f['{0}{1}/estimator'.format(
base, tally_key)].value.decode()
tally.num_realizations = n_realizations
# Read the number of Filters
n_filters = \
self._f['{0}{1}/n_filters'.format(base, tally_key)].value
subbase = '{0}{1}/filter '.format(base, tally_key)
# Read all filters
for j in range(1, n_filters+1):
subsubbase = '{0}{1}'.format(subbase, j)
new_filter = openmc.Filter.from_hdf5(self._f[subsubbase],
meshes=self.meshes)
tally.filters.append(new_filter)
# Read Nuclide bins
nuclide_names = \
self._f['{0}{1}/nuclides'.format(base, tally_key)].value
# Add all Nuclides to the Tally
for name in nuclide_names:
nuclide = openmc.Nuclide(name.decode().strip())
tally.nuclides.append(nuclide)
scores = self._f['{0}{1}/score_bins'.format(
base, tally_key)].value
n_score_bins = self._f['{0}{1}/n_score_bins'
.format(base, tally_key)].value
# Compute and set the filter strides
for i in range(n_filters):
tally_filter = tally.filters[i]
tally_filter.stride = n_score_bins * len(nuclide_names)
for j in range(i+1, n_filters):
tally_filter.stride *= tally.filters[j].num_bins
# Read scattering moment order strings (e.g., P3, Y1,2, etc.)
moments = self._f['{0}{1}/moment_orders'.format(
base, tally_key)].value
# Add the scores to the Tally
for j, score in enumerate(scores):
score = score.decode()
# If this is a moment, use generic moment order
pattern = r'-n$|-pn$|-yn$'
score = re.sub(pattern, '-' + moments[j].decode(), score)
tally.scores.append(score)
# Add Tally to the global dictionary of all Tallies
tally.sparse = self.sparse
self._tallies[tally_key] = tally
self._tallies_read = True
return self._tallies
@property
def tallies_present(self):
return self._f['tallies/tallies_present'].value
@property
def version(self):
return (self._f['version_major'].value,
self._f['version_minor'].value,
self._f['version_release'].value)
@property
def summary(self):
return self._summary
@sparse.setter
def sparse(self, sparse):
"""Convert tally data from NumPy arrays to SciPy list of lists (LIL)
sparse matrices, and vice versa.
This property may be used to reduce the amount of data in memory during
tally data processing. The tally data will be stored as SciPy LIL
matrices internally within each Tally object. All tally data access
properties and methods will return data as a dense NumPy array.
"""
cv.check_type('sparse', sparse, bool)
self._sparse = sparse
# Update tally sparsities
if self._tallies_read:
for tally_id in self.tallies:
self.tallies[tally_id].sparse = self.sparse
def add_volume_information(self, volume_calc):
"""Add volume information to the geometry within the file
Parameters
----------
volume_calc : openmc.VolumeCalculation
Results from a stochastic volume calculation
"""
if self.summary is not None:
self.summary.add_volume_information(volume_calc)
def get_tally(self, scores=[], filters=[], nuclides=[],
name=None, id=None, estimator=None, exact_filters=False,
exact_nuclides=False, exact_scores=False):
"""Finds and returns a Tally object with certain properties.
This routine searches the list of Tallies and returns the first Tally
found which satisfies all of the input parameters.
NOTE: If any of the "exact" parameters are False (default), the input
parameters do not need to match the complete Tally specification and
may only represent a subset of the Tally's properties. If an "exact"
parameter is True then number of scores, filters, or nuclides in the
parameters must precisely match those of any matching Tally.
Parameters
----------
scores : list, optional
A list of one or more score strings (default is []).
filters : list, optional
A list of Filter objects (default is []).
nuclides : list, optional
A list of Nuclide objects (default is []).
name : str, optional
The name specified for the Tally (default is None).
id : Integral, optional
The id specified for the Tally (default is None).
estimator: str, optional
The type of estimator ('tracklength', 'analog'; default is None).
exact_filters : bool
If True, the number of filters in the parameters must be identical
to those in the matching Tally. If False (default), the filters in
the parameters may be a subset of those in the matching Tally.
exact_nuclides : bool
If True, the number of nuclides in the parameters must be identical
to those in the matching Tally. If False (default), the nuclides in
the parameters may be a subset of those in the matching Tally.
exact_scores : bool
If True, the number of scores in the parameters must be identical
to those in the matching Tally. If False (default), the scores
in the parameters may be a subset of those in the matching Tally.
Returns
-------
tally : openmc.Tally
A tally matching the specified criteria
Raises
------
LookupError
If a Tally meeting all of the input parameters cannot be found in
the statepoint.
"""
tally = None
# Iterate over all tallies to find the appropriate one
for test_tally in self.tallies.values():
# Determine if Tally has queried name
if name and name != test_tally.name:
continue
# Determine if Tally has queried id
if id and id != test_tally.id:
continue
# Determine if Tally has queried estimator
if estimator and estimator != test_tally.estimator:
continue
# The number of filters, nuclides and scores must exactly match
if exact_scores and len(scores) != test_tally.num_scores:
continue
if exact_nuclides and len(nuclides) != test_tally.num_nuclides:
continue
if exact_filters and len(filters) != test_tally.num_filters:
continue
# Determine if Tally has the queried score(s)
if scores:
contains_scores = True
# Iterate over the scores requested by the user
for score in scores:
if score not in test_tally.scores:
contains_scores = False
break
if not contains_scores:
continue
# Determine if Tally has the queried Filter(s)
if filters:
contains_filters = True
# Iterate over the Filters requested by the user
for outer_filter in filters:
contains_filters = False
# Test if requested filter is a subset of any of the test
# tally's filters and if so continue to next filter
for inner_filter in test_tally.filters:
if inner_filter.is_subset(outer_filter):
contains_filters = True
break
if not contains_filters:
break
if not contains_filters:
continue
# Determine if Tally has the queried Nuclide(s)
if nuclides:
contains_nuclides = True
# Iterate over the Nuclides requested by the user
for nuclide in nuclides:
if nuclide not in test_tally.nuclides:
contains_nuclides = False
break
if not contains_nuclides:
continue
# If the current Tally met user's request, break loop and return it
tally = test_tally
break
# If we did not find the Tally, return an error message
if tally is None:
raise LookupError('Unable to get Tally')
return tally
def link_with_summary(self, summary):
"""Links Tallies and Filters with Summary model information.
This routine retrieves model information (materials, geometry) from a
Summary object populated with an HDF5 'summary.h5' file and inserts it
into the Tally objects. This can be helpful when viewing and
manipulating large scale Tally data. Note that it is necessary to link
against a summary to populate the Tallies with any user-specified "name"
XML tags.
Parameters
----------
summary : openmc.Summary
A Summary object.
Raises
------
ValueError
An error when the argument passed to the 'summary' parameter is not
an openmc.Summary object.
"""
if self.summary is not None:
warnings.warn('A Summary object has already been linked.',
RuntimeWarning)
return
if not isinstance(summary, openmc.summary.Summary):
msg = 'Unable to link statepoint with "{0}" which ' \
'is not a Summary object'.format(summary)
raise ValueError(msg)
for tally_id, tally in self.tallies.items():
tally.name = summary.tally_names[tally_id]
tally.with_summary = True
for tally_filter in tally.filters:
if isinstance(tally_filter, (openmc.CellFilter,
openmc.DistribcellFilter)):
distribcell_ids = []
for bin in tally_filter.bins:
distribcell_ids.append(summary.cells[bin].id)
tally_filter.bins = distribcell_ids
if isinstance(tally_filter, (openmc.DistribcellFilter)):
cell_id = tally_filter.bins[0]
cell = summary.get_cell_by_id(cell_id)
tally_filter.distribcell_paths = cell.distribcell_paths
if isinstance(tally_filter, openmc.UniverseFilter):
universe_ids = []
for bin in tally_filter.bins:
universe_ids.append(summary.universes[bin].id)
tally_filter.bins = universe_ids
if isinstance(tally_filter, openmc.MaterialFilter):
material_ids = []
for bin in tally_filter.bins:
material_ids.append(summary.materials[bin].id)
tally_filter.bins = material_ids
self._summary = summary
|
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import copy
from datetime import datetime
from functools import partial
import json
import os
import posixpath
import re
import sys
from code import Code
import json_parse
# The template for the header file of the generated FeatureProvider.
HEADER_FILE_TEMPLATE = """
// Copyright %(year)s The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// GENERATED FROM THE FEATURES FILE:
// %(source_files)s
// by tools/json_schema_compiler.
// DO NOT EDIT.
#ifndef %(header_guard)s
#define %(header_guard)s
namespace extensions {
class FeatureProvider;
void %(method_name)s(FeatureProvider* provider);
} // namespace extensions
#endif // %(header_guard)s
"""
# The beginning of the .cc file for the generated FeatureProvider.
CC_FILE_BEGIN = """
// Copyright %(year)s The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// GENERATED FROM THE FEATURES FILE:
// %(source_files)s
// by tools/json_schema_compiler.
// DO NOT EDIT.
#include "%(header_file_path)s"
#include "extensions/common/features/complex_feature.h"
#include "extensions/common/features/feature_provider.h"
#include "extensions/common/features/manifest_feature.h"
#include "extensions/common/features/permission_feature.h"
#include "extensions/common/mojom/feature_session_type.mojom.h"
namespace extensions {
void %(method_name)s(FeatureProvider* provider) {
"""
# The end of the .cc file for the generated FeatureProvider.
CC_FILE_END = """
}
} // namespace extensions
"""
def ToPosixPath(path):
"""Returns |path| with separator converted to POSIX style.
This is needed to generate C++ #include paths.
"""
return path.replace(os.path.sep, posixpath.sep)
# Returns true if the list 'l' only contains strings that are a hex-encoded SHA1
# hashes.
def ListContainsOnlySha1Hashes(l):
return len(list(filter(lambda s: not re.match("^[A-F0-9]{40}$", s), l))) == 0
# A "grammar" for what is and isn't allowed in the features.json files. This
# grammar has to list all possible keys and the requirements for each. The
# format of each entry is:
# 'key': {
# allowed_type_1: optional_properties,
# allowed_type_2: optional_properties,
# }
# |allowed_types| are the types of values that can be used for a given key. The
# possible values are list, str, bool, and int.
# |optional_properties| provide more restrictions on the given type. The options
# are:
# 'subtype': Only applicable for lists. If provided, this enforces that each
# entry in the list is of the specified type.
# 'enum_map': A map of strings to C++ enums. When the compiler sees the given
# enum string, it will replace it with the C++ version in the
# compiled code. For instance, if a feature specifies
# 'channel': 'stable', the generated C++ will assign
# version_info::Channel::STABLE to channel. The keys in this map
# also serve as a list all of possible values.
# 'allow_all': Only applicable for lists. If present, this will check for
# a value of "all" for a list value, and will replace it with
# the collection of all possible values. For instance, if a
# feature specifies 'contexts': 'all', the generated C++ will
# assign the list of Feature::BLESSED_EXTENSION_CONTEXT,
# Feature::BLESSED_WEB_PAGE_CONTEXT et al for contexts. If not
# specified, defaults to false.
# 'allow_empty': Only applicable for lists. Whether an empty list is a valid
# value. If omitted, empty lists are prohibited.
# 'validators': A list of (function, str) pairs with a function to run on the
# value for a feature. Validators allow for more flexible or
# one-off style validation than just what's in the grammar (such
# as validating the content of a string). The validator function
# should return True if the value is valid, and False otherwise.
# If the value is invalid, the specified error will be added for
# that key.
# 'values': A list of all possible allowed values for a given key.
# 'shared': Boolean that, if set, ensures that only one of the associated
# features has the feature property set. Used primarily for complex
# features - for simple features, there is always at most one feature
# setting an option.
# If a type definition does not have any restrictions (beyond the type itself),
# an empty definition ({}) is used.
FEATURE_GRAMMAR = ({
'alias': {
str: {},
'shared': True
},
'allowlist': {
list: {
'subtype':
str,
'validators':
[(ListContainsOnlySha1Hashes,
'list should only have hex-encoded SHA1 hashes of extension ids')]
}
},
'blocklist': {
list: {
'subtype':
str,
'validators':
[(ListContainsOnlySha1Hashes,
'list should only have hex-encoded SHA1 hashes of extension ids')]
}
},
'channel': {
str: {
'enum_map': {
'trunk': 'version_info::Channel::UNKNOWN',
'canary': 'version_info::Channel::CANARY',
'dev': 'version_info::Channel::DEV',
'beta': 'version_info::Channel::BETA',
'stable': 'version_info::Channel::STABLE',
}
}
},
'command_line_switch': {
str: {}
},
'component_extensions_auto_granted': {
bool: {}
},
'contexts': {
list: {
'enum_map': {
'blessed_extension': 'Feature::BLESSED_EXTENSION_CONTEXT',
'blessed_web_page': 'Feature::BLESSED_WEB_PAGE_CONTEXT',
'content_script': 'Feature::CONTENT_SCRIPT_CONTEXT',
'lock_screen_extension':
'Feature::LOCK_SCREEN_EXTENSION_CONTEXT',
'web_page': 'Feature::WEB_PAGE_CONTEXT',
'webui': 'Feature::WEBUI_CONTEXT',
'webui_untrusted': 'Feature::WEBUI_UNTRUSTED_CONTEXT',
'unblessed_extension': 'Feature::UNBLESSED_EXTENSION_CONTEXT',
},
'allow_all': True,
'allow_empty': True
},
},
'default_parent': {
bool: {
'values': [True]
}
},
'dependencies': {
list: {
# We allow an empty list of dependencies for child features that
# want to override their parents' dependency set.
'allow_empty': True,
'subtype': str
}
},
'disallow_for_service_workers': {
bool: {}
},
'extension_types': {
list: {
'enum_map': {
'extension': 'Manifest::TYPE_EXTENSION',
'hosted_app': 'Manifest::TYPE_HOSTED_APP',
'legacy_packaged_app': 'Manifest::TYPE_LEGACY_PACKAGED_APP',
'platform_app': 'Manifest::TYPE_PLATFORM_APP',
'shared_module': 'Manifest::TYPE_SHARED_MODULE',
'theme': 'Manifest::TYPE_THEME',
'login_screen_extension':
'Manifest::TYPE_LOGIN_SCREEN_EXTENSION',
'chromeos_system_extension':
'Manifest::TYPE_CHROMEOS_SYSTEM_EXTENSION',
},
'allow_all': True
},
},
'feature_flag': {
str: {}
},
'location': {
str: {
'enum_map': {
'component': 'SimpleFeature::COMPONENT_LOCATION',
'external_component':
'SimpleFeature::EXTERNAL_COMPONENT_LOCATION',
'policy': 'SimpleFeature::POLICY_LOCATION',
'unpacked': 'SimpleFeature::UNPACKED_LOCATION',
}
}
},
'internal': {
bool: {
'values': [True]
}
},
'matches': {
list: {
'subtype': str
}
},
'max_manifest_version': {
int: {
'values': [1, 2]
}
},
'min_manifest_version': {
int: {
'values': [2, 3]
}
},
'noparent': {
bool: {
'values': [True]
}
},
'platforms': {
list: {
'enum_map': {
'chromeos': 'Feature::CHROMEOS_PLATFORM',
'lacros': 'Feature::LACROS_PLATFORM',
'linux': 'Feature::LINUX_PLATFORM',
'mac': 'Feature::MACOSX_PLATFORM',
'win': 'Feature::WIN_PLATFORM',
'fuchsia': 'Feature::FUCHSIA_PLATFORM',
}
}
},
'session_types': {
list: {
'enum_map': {
'regular': 'mojom::FeatureSessionType::kRegular',
'kiosk': 'mojom::FeatureSessionType::kKiosk',
'kiosk.autolaunched':
'mojom::FeatureSessionType::kAutolaunchedKiosk',
}
}
},
'source': {
str: {},
'shared': True
},
})
FEATURE_TYPES = ['APIFeature', 'BehaviorFeature',
'ManifestFeature', 'PermissionFeature']
def HasProperty(property_name, value):
return property_name in value
def HasAtLeastOneProperty(property_names, value):
return any([HasProperty(name, value) for name in property_names])
def DoesNotHaveAllProperties(property_names, value):
return not all([HasProperty(name, value) for name in property_names])
def DoesNotHaveProperty(property_name, value):
return property_name not in value
def IsEmptyContextsAllowed(feature, all_features):
# An alias feature wouldn't have the 'contexts' feature value.
if feature.GetValue('source'):
return True
if type(feature) is ComplexFeature:
for child_feature in feature.feature_list:
if not IsEmptyContextsAllowed(child_feature, all_features):
return False
return True
contexts = feature.GetValue('contexts')
assert contexts, 'contexts must have been specified for the APIFeature'
allowlisted_empty_context_namespaces = [
'manifestTypes',
'extensionsManifestTypes',
'empty_contexts' # Only added for testing.
]
return (contexts != '{}' or
feature.name in allowlisted_empty_context_namespaces)
def IsFeatureCrossReference(property_name, reverse_property_name, feature,
all_features):
""" Verifies that |property_name| on |feature| references a feature that
references |feature| back using |reverse_property_name| property.
|property_name| and |reverse_property_name| are expected to have string
values.
"""
value = feature.GetValue(property_name)
if not value:
return True
# String property values will be wrapped in "", strip those.
value_regex = re.compile('^"(.+)"$')
parsed_value = value_regex.match(value)
assert parsed_value, (
'IsFeatureCrossReference should only be used on unicode properties')
referenced_feature = all_features.get(parsed_value.group(1))
if not referenced_feature:
return False
reverse_reference_value = referenced_feature.GetValue(reverse_property_name)
if not reverse_reference_value:
return False
# Don't validate reverse reference value for child features - chances are that
# the value was inherited from a feature parent, in which case it won't match
# current feature name.
if feature.has_parent:
return True
return reverse_reference_value == ('"%s"' % feature.name)
# Verifies that a feature with an allowlist is not available to hosted apps,
# returning true on success.
def DoesNotHaveAllowlistForHostedApps(value):
if not 'allowlist' in value:
return True
# Hack Alert: |value| here has the code for the generated C++ feature. Since
# we're looking at the individual values, we do a bit of yucky back-parsing
# to get a better look at the feature. This would be cleaner if we were
# operating on the JSON feature itself, but we currently never generate a
# JSON-based feature object that has all the values inherited from its
# parents. Since this is the only scenario we need this type of validation,
# doing it in a slightly ugly way isn't too bad. If we need more of these,
# we should find a smoother way to do it (e.g. first generate JSON-based
# features with inherited properties, do any necessary validation, then
# generate the C++ code strings).
# The feature did not specify extension types; this is fine for e.g.
# API features (which would typically rely on a permission feature, which
# is required to specify types).
if not 'extension_types' in value:
return True
types = value['extension_types']
# |types| looks like "{Manifest::TYPE_1, Manifest::TYPE_2}", so just looking
# for the "TYPE_HOSTED_APP substring is sufficient.
if 'TYPE_HOSTED_APP' not in types:
return True
# Helper to convert our C++ string array like "{\"aaa\", \"bbb\"}" (which is
# what the allowlist looks like) to a python list of strings.
def cpp_list_to_list(cpp_list):
assert type(cpp_list) is str
assert cpp_list[0] == '{'
assert cpp_list[-1] == '}'
new_list = json.loads('[%s]' % cpp_list[1:-1])
assert type(new_list) is list
return new_list
# Exceptions (see the feature files).
# DO NOT ADD MORE.
HOSTED_APP_EXCEPTIONS = [
'B44D08FD98F1523ED5837D78D0A606EA9D6206E5',
'2653F6F6C39BC6EEBD36A09AFB92A19782FF7EB4',
]
allowlist = cpp_list_to_list(value['allowlist'])
for entry in allowlist:
if entry not in HOSTED_APP_EXCEPTIONS:
return False
return True
SIMPLE_FEATURE_CPP_CLASSES = ({
'APIFeature': 'SimpleFeature',
'ManifestFeature': 'ManifestFeature',
'PermissionFeature': 'PermissionFeature',
'BehaviorFeature': 'SimpleFeature',
})
VALIDATION = ({
'all': [
(partial(HasAtLeastOneProperty, ['channel', 'dependencies']),
'Features must specify either a channel or dependencies'),
(DoesNotHaveAllowlistForHostedApps,
'Hosted apps are not allowed to use restricted features')
],
'APIFeature': [
(partial(HasProperty, 'contexts'),
'APIFeatures must specify the contexts property'),
(partial(DoesNotHaveAllProperties, ['alias', 'source']),
'Features cannot specify both alias and source.')
],
'ManifestFeature': [
(partial(HasProperty, 'extension_types'),
'ManifestFeatures must specify at least one extension type'),
(partial(DoesNotHaveProperty, 'contexts'),
'ManifestFeatures do not support contexts.'),
(partial(DoesNotHaveProperty, 'alias'),
'ManifestFeatures do not support alias.'),
(partial(DoesNotHaveProperty, 'source'),
'ManifestFeatures do not support source.'),
],
'BehaviorFeature': [
(partial(DoesNotHaveProperty, 'alias'),
'BehaviorFeatures do not support alias.'),
(partial(DoesNotHaveProperty, 'source'),
'BehaviorFeatures do not support source.'),
],
'PermissionFeature': [
(partial(HasProperty, 'extension_types'),
'PermissionFeatures must specify at least one extension type'),
(partial(DoesNotHaveProperty, 'contexts'),
'PermissionFeatures do not support contexts.'),
(partial(DoesNotHaveProperty, 'alias'),
'PermissionFeatures do not support alias.'),
(partial(DoesNotHaveProperty, 'source'),
'PermissionFeatures do not support source.'),
],
})
FINAL_VALIDATION = ({
'all': [],
'APIFeature': [
(partial(IsFeatureCrossReference, 'alias', 'source'),
'A feature alias property should reference a feature whose source '
'property references it back.'),
(partial(IsFeatureCrossReference, 'source', 'alias'),
'A feature source property should reference a feature whose alias '
'property references it back.'),
(IsEmptyContextsAllowed,
'An empty contexts list is not allowed for this feature.')
],
'ManifestFeature': [],
'BehaviorFeature': [],
'PermissionFeature': []
})
# These keys are used to find the parents of different features, but are not
# compiled into the features themselves.
IGNORED_KEYS = ['default_parent']
# By default, if an error is encountered, assert to stop the compilation. This
# can be disabled for testing.
ENABLE_ASSERTIONS = True
def GetCodeForFeatureValues(feature_values):
""" Gets the Code object for setting feature values for this object. """
c = Code()
for key in sorted(feature_values.keys()):
if key in IGNORED_KEYS:
continue;
c.Append('feature->set_%s(%s);' % (key, feature_values[key]))
return c
class Feature(object):
"""A representation of a single simple feature that can handle all parsing,
validation, and code generation.
"""
def __init__(self, name):
self.name = name
self.has_parent = False
self.errors = []
self.feature_values = {}
self.shared_values = {}
def _GetType(self, value):
"""Returns the type of the given value.
"""
# For Py3 compatibility we use str in the grammar and treat unicode as str
# in Py2.
if sys.version_info.major == 2 and type(value) is unicode:
return str
return type(value)
def AddError(self, error):
"""Adds an error to the feature. If ENABLE_ASSERTIONS is active, this will
also assert to stop the compilation process (since errors should never be
found in production).
"""
self.errors.append(error)
if ENABLE_ASSERTIONS:
assert False, error
def _AddKeyError(self, key, error):
"""Adds an error relating to a particular key in the feature.
"""
self.AddError('Error parsing feature "%s" at key "%s": %s' %
(self.name, key, error))
def _GetCheckedValue(self, key, expected_type, expected_values,
enum_map, value):
"""Returns a string to be used in the generated C++ code for a given key's
python value, or None if the value is invalid. For example, if the python
value is True, this returns 'true', for a string foo, this returns "foo",
and for an enum, this looks up the C++ definition in the enum map.
key: The key being parsed.
expected_type: The expected type for this value, or None if any type is
allowed.
expected_values: The list of allowed values for this value, or None if any
value is allowed.
enum_map: The map from python value -> cpp value for all allowed values,
or None if no special mapping should be made.
value: The value to check.
"""
valid = True
if expected_values and value not in expected_values:
self._AddKeyError(key, 'Illegal value: "%s"' % value)
valid = False
t = self._GetType(value)
if expected_type and t is not expected_type:
self._AddKeyError(key, 'Illegal value: "%s"' % value)
valid = False
if not valid:
return None
if enum_map:
return enum_map[value]
if t is str:
return '"%s"' % str(value)
if t is int:
return str(value)
if t is bool:
return 'true' if value else 'false'
assert False, 'Unsupported type: %s' % value
def _ParseKey(self, key, value, shared_values, grammar):
"""Parses the specific key according to the grammar rule for that key if it
is present in the json value.
key: The key to parse.
value: The full value for this feature.
shared_values: Set of shared vfalues associated with this feature.
grammar: The rule for the specific key.
"""
if key not in value:
return
v = value[key]
is_all = False
if v == 'all' and list in grammar and 'allow_all' in grammar[list]:
assert grammar[list]['allow_all'], '`allow_all` only supports `True`.'
v = []
is_all = True
if 'shared' in grammar and key in shared_values:
self._AddKeyError(key, 'Key can be set at most once per feature.')
return
value_type = self._GetType(v)
if value_type not in grammar:
self._AddKeyError(key, 'Illegal value: "%s"' % v)
return
if value_type is list and not is_all and len(v) == 0:
if 'allow_empty' in grammar[list]:
assert grammar[list]['allow_empty'], \
'`allow_empty` only supports `True`.'
else:
self._AddKeyError(key, 'List must specify at least one element.')
return
expected = grammar[value_type]
expected_values = None
enum_map = None
if 'values' in expected:
expected_values = expected['values']
elif 'enum_map' in expected:
enum_map = expected['enum_map']
expected_values = list(enum_map)
if is_all:
v = copy.deepcopy(expected_values)
expected_type = None
if value_type is list and 'subtype' in expected:
expected_type = expected['subtype']
cpp_value = None
# If this value is a list, iterate over each entry and validate. Otherwise,
# validate the single value.
if value_type is list:
cpp_value = []
for sub_value in v:
cpp_sub_value = self._GetCheckedValue(key, expected_type,
expected_values, enum_map,
sub_value)
if cpp_sub_value:
cpp_value.append(cpp_sub_value)
cpp_value = '{' + ','.join(cpp_value) + '}'
else:
cpp_value = self._GetCheckedValue(key, expected_type, expected_values,
enum_map, v)
if 'validators' in expected:
validators = expected['validators']
for validator, error in validators:
if not validator(v):
self._AddKeyError(key, error)
if cpp_value:
if 'shared' in grammar:
shared_values[key] = cpp_value
else:
self.feature_values[key] = cpp_value
elif key in self.feature_values:
# If the key is empty and this feature inherited a value from its parent,
# remove the inherited value.
del self.feature_values[key]
def SetParent(self, parent):
"""Sets the parent of this feature, and inherits all properties from that
parent.
"""
assert not self.feature_values, 'Parents must be set before parsing'
self.feature_values = copy.deepcopy(parent.feature_values)
self.has_parent = True
def SetSharedValues(self, values):
self.shared_values = values
def Parse(self, parsed_json, shared_values):
"""Parses the feature from the given json value."""
for key in parsed_json.keys():
if key not in FEATURE_GRAMMAR:
self._AddKeyError(key, 'Unrecognized key')
for key, key_grammar in FEATURE_GRAMMAR.items():
self._ParseKey(key, parsed_json, shared_values, key_grammar)
def Validate(self, feature_type, shared_values):
feature_values = self.feature_values.copy()
feature_values.update(shared_values)
for validator, error in (VALIDATION[feature_type] + VALIDATION['all']):
if not validator(feature_values):
self.AddError(error)
def GetCode(self, feature_type):
"""Returns the Code object for generating this feature."""
c = Code()
cpp_feature_class = SIMPLE_FEATURE_CPP_CLASSES[feature_type]
c.Append('%s* feature = new %s();' % (cpp_feature_class, cpp_feature_class))
c.Append('feature->set_name("%s");' % self.name)
c.Concat(GetCodeForFeatureValues(self.GetAllFeatureValues()))
return c
def AsParent(self):
""" Returns the feature values that should be inherited by children features
when this feature is set as parent.
"""
return self
def GetValue(self, key):
""" Gets feature value for the specified key """
value = self.feature_values.get(key)
return value if value else self.shared_values.get(key)
def GetAllFeatureValues(self):
""" Gets all values set for this feature. """
values = self.feature_values.copy()
values.update(self.shared_values)
return values
def GetErrors(self):
return self.errors;
class ComplexFeature(Feature):
""" Complex feature - feature that is comprised of list of features.
Overall complex feature is available if any of contained
feature is available.
"""
def __init__(self, name):
Feature.__init__(self, name)
self.feature_list = []
def GetCode(self, feature_type):
c = Code()
c.Append('std::vector<Feature*> features;')
for f in self.feature_list:
# Sanity check that components of complex features have no shared values
# set.
assert not f.shared_values
c.Sblock('{')
c.Concat(f.GetCode(feature_type))
c.Append('features.push_back(feature);')
c.Eblock('}')
c.Append('ComplexFeature* feature(new ComplexFeature(&features));')
c.Append('feature->set_name("%s");' % self.name)
c.Concat(GetCodeForFeatureValues(self.shared_values))
return c
def AsParent(self):
parent = None
for p in self.feature_list:
if 'default_parent' in p.feature_values:
parent = p
break
assert parent, 'No default parent found for %s' % self.name
return parent
def GetErrors(self):
errors = copy.copy(self.errors)
for feature in self.feature_list:
errors.extend(feature.GetErrors())
return errors
class FeatureCompiler(object):
"""A compiler to load, parse, and generate C++ code for a number of
features.json files."""
def __init__(self, chrome_root, source_files, feature_type,
method_name, out_root, gen_dir_relpath, out_base_filename):
# See __main__'s ArgumentParser for documentation on these properties.
self._chrome_root = chrome_root
self._source_files = source_files
self._feature_type = feature_type
self._method_name = method_name
self._out_root = out_root
self._out_base_filename = out_base_filename
self._gen_dir_relpath = gen_dir_relpath
# The json value for the feature files.
self._json = {}
# The parsed features.
self._features = {}
def Load(self):
"""Loads and parses the source from each input file and puts the result in
self._json."""
for f in self._source_files:
abs_source_file = os.path.join(self._chrome_root, f)
try:
with open(abs_source_file, 'r') as f:
f_json = json_parse.Parse(f.read())
except:
print('FAILED: Exception encountered while loading "%s"' %
abs_source_file)
raise
dupes = set(f_json) & set(self._json)
assert not dupes, 'Duplicate keys found: %s' % list(dupes)
self._json.update(f_json)
def _FindParent(self, feature_name, feature_value):
"""Checks to see if a feature has a parent. If it does, returns the
parent."""
no_parent = False
if type(feature_value) is list:
no_parent_values = ['noparent' in v for v in feature_value]
no_parent = all(no_parent_values)
assert no_parent or not any(no_parent_values), (
'"%s:" All child features must contain the same noparent value' %
feature_name)
else:
no_parent = 'noparent' in feature_value
sep = feature_name.rfind('.')
if sep == -1 or no_parent:
return None
parent_name = feature_name[:sep]
while sep != -1 and parent_name not in self._features:
# This recursion allows for a feature to have a parent that isn't a direct
# ancestor. For instance, we could have feature 'alpha', and feature
# 'alpha.child.child', where 'alpha.child.child' inherits from 'alpha'.
# TODO(devlin): Is this useful? Or logical?
sep = feature_name.rfind('.', 0, sep)
parent_name = feature_name[:sep]
if sep == -1:
# TODO(devlin): It'd be kind of nice to be able to assert that the
# deduced parent name is in our features, but some dotted features don't
# have parents and also don't have noparent, e.g. system.cpu. We should
# probably just noparent them so that we can assert this.
# raise KeyError('Could not find parent "%s" for feature "%s".' %
# (parent_name, feature_name))
return None
return self._features[parent_name].AsParent()
def _CompileFeature(self, feature_name, feature_value):
"""Parses a single feature."""
if 'nocompile' in feature_value:
assert feature_value['nocompile'], (
'nocompile should only be true; otherwise omit this key.')
return
def parse_and_validate(name, value, parent, shared_values):
try:
feature = Feature(name)
if parent:
feature.SetParent(parent)
feature.Parse(value, shared_values)
feature.Validate(self._feature_type, shared_values)
return feature
except:
print('Failure to parse feature "%s"' % feature_name)
raise
parent = self._FindParent(feature_name, feature_value)
shared_values = {}
# Handle complex features, which are lists of simple features.
if type(feature_value) is list:
feature = ComplexFeature(feature_name)
# This doesn't handle nested complex features. I think that's probably for
# the best.
for v in feature_value:
feature.feature_list.append(
parse_and_validate(feature_name, v, parent, shared_values))
self._features[feature_name] = feature
else:
self._features[feature_name] = parse_and_validate(
feature_name, feature_value, parent, shared_values)
# Apply parent shared values at the end to enable child features to
# override parent shared value - if parent shared values are added to
# shared value set before a child feature is parsed, the child feature
# overriding shared values set by its parent would cause an error due to
# shared values being set twice.
final_shared_values = copy.deepcopy(parent.shared_values) if parent else {}
final_shared_values.update(shared_values)
self._features[feature_name].SetSharedValues(final_shared_values)
def _FinalValidation(self):
validators = FINAL_VALIDATION['all'] + FINAL_VALIDATION[self._feature_type]
for name, feature in self._features.items():
for validator, error in validators:
if not validator(feature, self._features):
feature.AddError(error)
def Compile(self):
"""Parses all features after loading the input files."""
# Iterate over in sorted order so that parents come first.
for k in sorted(self._json.keys()):
self._CompileFeature(k, self._json[k])
self._FinalValidation()
def Render(self):
"""Returns the Code object for the body of the .cc file, which handles the
initialization of all features."""
c = Code()
c.Sblock()
for k in sorted(self._features.keys()):
c.Sblock('{')
feature = self._features[k]
c.Concat(feature.GetCode(self._feature_type))
c.Append('provider->AddFeature("%s", feature);' % k)
c.Eblock('}')
c.Eblock()
return c
def Write(self):
"""Writes the output."""
header_file = self._out_base_filename + '.h'
cc_file = self._out_base_filename + '.cc'
include_file_root = self._out_root[len(self._gen_dir_relpath)+1:]
header_file_path = '%s/%s' % (include_file_root, header_file)
cc_file_path = '%s/%s' % (include_file_root, cc_file)
substitutions = ({
'header_file_path': header_file_path,
'header_guard': (header_file_path.replace('/', '_').
replace('.', '_').upper()),
'method_name': self._method_name,
'source_files': str([ToPosixPath(f) for f in self._source_files]),
'year': str(datetime.now().year)
})
if not os.path.exists(self._out_root):
os.makedirs(self._out_root)
# Write the .h file.
with open(os.path.join(self._out_root, header_file), 'w') as f:
header_file = Code()
header_file.Append(HEADER_FILE_TEMPLATE)
header_file.Substitute(substitutions)
f.write(header_file.Render().strip())
# Write the .cc file.
with open(os.path.join(self._out_root, cc_file), 'w') as f:
cc_file = Code()
cc_file.Append(CC_FILE_BEGIN)
cc_file.Substitute(substitutions)
cc_file.Concat(self.Render())
cc_end = Code()
cc_end.Append(CC_FILE_END)
cc_end.Substitute(substitutions)
cc_file.Concat(cc_end)
f.write(cc_file.Render().strip())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compile json feature files')
parser.add_argument('chrome_root', type=str,
help='The root directory of the chrome checkout')
parser.add_argument(
'feature_type', type=str,
help='The name of the class to use in feature generation ' +
'(e.g. APIFeature, PermissionFeature)')
parser.add_argument('method_name', type=str,
help='The name of the method to populate the provider')
parser.add_argument('out_root', type=str,
help='The root directory to generate the C++ files into')
parser.add_argument('gen_dir_relpath', default='gen', help='Path of the '
'gen directory relative to the out/. If running in the default '
'toolchain, the path is gen, otherwise $toolchain_name/gen')
parser.add_argument(
'out_base_filename', type=str,
help='The base filename for the C++ files (.h and .cc will be appended)')
parser.add_argument('source_files', type=str, nargs='+',
help='The source features.json files')
args = parser.parse_args()
if args.feature_type not in FEATURE_TYPES:
raise NameError('Unknown feature type: %s' % args.feature_type)
c = FeatureCompiler(args.chrome_root, args.source_files, args.feature_type,
args.method_name, args.out_root, args.gen_dir_relpath,
args.out_base_filename)
c.Load()
c.Compile()
c.Write()
|
|
import os
import sys
import pickle
import copy
import warnings
import platform
import textwrap
from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
from distutils.sysconfig import get_config_var
from numpy._build_utils.apple_accelerate import (
uses_accelerate_framework, get_sgemv_fix
)
from numpy.compat import npy_load_module
from setup_common import * # noqa: F403
# Set to True to enable relaxed strides checking. This (mostly) means
# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a
# bogus value for affected strides in order to help smoke out bad stride usage
# when relaxed stride checking is enabled.
NPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', "0") != "0")
NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
# config.h generation inside an Extension class, and as such sharing
# configuration information between extensions is not easy.
# Using a pickled-based memoize does not work because config_cmd is an instance
# method, which cPickle does not like.
#
# Use pickle in all cases, as cPickle is gone in python3 and the difference
# in time is only in build. -- Charles Harris, 2013-03-30
class CallOnceOnly:
def __init__(self):
self._check_types = None
self._check_ieee_macros = None
self._check_complex = None
def check_types(self, *a, **kw):
if self._check_types is None:
out = check_types(*a, **kw)
self._check_types = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_types))
return out
def check_ieee_macros(self, *a, **kw):
if self._check_ieee_macros is None:
out = check_ieee_macros(*a, **kw)
self._check_ieee_macros = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
return out
def check_complex(self, *a, **kw):
if self._check_complex is None:
out = check_complex(*a, **kw)
self._check_complex = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_complex))
return out
def pythonlib_dir():
"""return path where libpython* is."""
if sys.platform == 'win32':
return os.path.join(sys.prefix, "libs")
else:
return get_config_var('LIBDIR')
def is_npy_no_signal():
"""Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
header."""
return sys.platform == 'win32'
def is_npy_no_smp():
"""Return True if the NPY_NO_SMP symbol must be defined in public
header (when SMP support cannot be reliably enabled)."""
# Perhaps a fancier check is in order here.
# so that threads are only enabled if there
# are actually multiple CPUS? -- but
# threaded code can be nice even on a single
# CPU so that long-calculating code doesn't
# block.
return 'NPY_NOSMP' in os.environ
def win32_checks(deflist):
from numpy.distutils.misc_util import get_build_architecture
a = get_build_architecture()
# Distutils hack on AMD64 on windows
print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
(a, os.name, sys.platform))
if a == 'AMD64':
deflist.append('DISTUTILS_USE_SDK')
# On win32, force long double format string to be 'g', not
# 'Lg', since the MS runtime does not support long double whose
# size is > sizeof(double)
if a == "Intel" or a == "AMD64":
deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
def check_math_capabilities(config, moredefs, mathlibs):
def check_func(func_name):
return config.check_func(func_name, libraries=mathlibs,
decl=True, call=True)
def check_funcs_once(funcs_name):
decl = dict([(f, True) for f in funcs_name])
st = config.check_funcs_once(funcs_name, libraries=mathlibs,
decl=decl, call=decl)
if st:
moredefs.extend([(fname2def(f), 1) for f in funcs_name])
return st
def check_funcs(funcs_name):
# Use check_funcs_once first, and if it does not work, test func per
# func. Return success only if all the functions are available
if not check_funcs_once(funcs_name):
# Global check failed, check func per func
for f in funcs_name:
if check_func(f):
moredefs.append((fname2def(f), 1))
return 0
else:
return 1
#use_msvc = config.check_decl("_MSC_VER")
if not check_funcs_once(MANDATORY_FUNCS):
raise SystemError("One of the required function to build numpy is not"
" available (the list is %s)." % str(MANDATORY_FUNCS))
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
for f in OPTIONAL_STDFUNCS_MAYBE:
if config.check_decl(fname2def(f),
headers=["Python.h", "math.h"]):
OPTIONAL_STDFUNCS.remove(f)
check_funcs(OPTIONAL_STDFUNCS)
for h in OPTIONAL_HEADERS:
if config.check_func("", decl=False, call=False, headers=[h]):
h = h.replace(".", "_").replace(os.path.sep, "_")
moredefs.append((fname2def(h), 1))
for tup in OPTIONAL_INTRINSICS:
headers = None
if len(tup) == 2:
f, args, m = tup[0], tup[1], fname2def(tup[0])
elif len(tup) == 3:
f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0])
else:
f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3])
if config.check_func(f, decl=False, call=True, call_args=args,
headers=headers):
moredefs.append((m, 1))
for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
if config.check_gcc_function_attribute(dec, fn):
moredefs.append((fname2def(fn), 1))
for dec, fn, code, header in OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS:
if config.check_gcc_function_attribute_with_intrinsics(dec, fn, code,
header):
moredefs.append((fname2def(fn), 1))
for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
if config.check_gcc_variable_attribute(fn):
m = fn.replace("(", "_").replace(")", "_")
moredefs.append((fname2def(m), 1))
# C99 functions: float and long double versions
check_funcs(C99_FUNCS_SINGLE)
check_funcs(C99_FUNCS_EXTENDED)
def check_complex(config, mathlibs):
priv = []
pub = []
try:
if os.uname()[0] == "Interix":
warnings.warn("Disabling broken complex support. See #1365", stacklevel=2)
return priv, pub
except Exception:
# os.uname not available on all platforms. blanket except ugly but safe
pass
# Check for complex support
st = config.check_header('complex.h')
if st:
priv.append(('HAVE_COMPLEX_H', 1))
pub.append(('NPY_USE_C99_COMPLEX', 1))
for t in C99_COMPLEX_TYPES:
st = config.check_type(t, headers=["complex.h"])
if st:
pub.append(('NPY_HAVE_%s' % type2def(t), 1))
def check_prec(prec):
flist = [f + prec for f in C99_COMPLEX_FUNCS]
decl = dict([(f, True) for f in flist])
if not config.check_funcs_once(flist, call=decl, decl=decl,
libraries=mathlibs):
for f in flist:
if config.check_func(f, call=True, decl=True,
libraries=mathlibs):
priv.append((fname2def(f), 1))
else:
priv.extend([(fname2def(f), 1) for f in flist])
check_prec('')
check_prec('f')
check_prec('l')
return priv, pub
def check_ieee_macros(config):
priv = []
pub = []
macros = []
def _add_decl(f):
priv.append(fname2def("decl_%s" % f))
pub.append('NPY_%s' % fname2def("decl_%s" % f))
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
_macros = ["isnan", "isinf", "signbit", "isfinite"]
for f in _macros:
py_symbol = fname2def("decl_%s" % f)
already_declared = config.check_decl(py_symbol,
headers=["Python.h", "math.h"])
if already_declared:
if config.check_macro_true(py_symbol,
headers=["Python.h", "math.h"]):
pub.append('NPY_%s' % fname2def("decl_%s" % f))
else:
macros.append(f)
# Normally, isnan and isinf are macro (C99), but some platforms only have
# func, or both func and macro version. Check for macro only, and define
# replacement ones if not found.
# Note: including Python.h is necessary because it modifies some math.h
# definitions
for f in macros:
st = config.check_decl(f, headers=["Python.h", "math.h"])
if st:
_add_decl(f)
return priv, pub
def check_types(config_cmd, ext, build_dir):
private_defines = []
public_defines = []
# Expected size (in number of bytes) for each type. This is an
# optimization: those are only hints, and an exhaustive search for the size
# is done if the hints are wrong.
expected = {'short': [2], 'int': [4], 'long': [8, 4],
'float': [4], 'double': [8], 'long double': [16, 12, 8],
'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8],
'off_t': [8, 4]}
# Check we have the python header (-dev* packages on Linux)
result = config_cmd.check_header('Python.h')
if not result:
python = 'python'
if '__pypy__' in sys.builtin_module_names:
python = 'pypy'
raise SystemError(
"Cannot compile 'Python.h'. Perhaps you need to "
"install {0}-dev|{0}-devel.".format(python))
res = config_cmd.check_header("endian.h")
if res:
private_defines.append(('HAVE_ENDIAN_H', 1))
public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
res = config_cmd.check_header("sys/endian.h")
if res:
private_defines.append(('HAVE_SYS_ENDIAN_H', 1))
public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1))
# Check basic types sizes
for type in ('short', 'int', 'long'):
res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers=["Python.h"])
if res:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
else:
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
for type in ('float', 'double', 'long double'):
already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
headers=["Python.h"])
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
if not already_declared and not type == 'long double':
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# Compute size of corresponding complex type: used to check that our
# definition is binary compatible with C99 complex type (check done at
# build time in npy_common.h)
complex_def = "struct {%s __x; %s __y;}" % (type, type)
res = config_cmd.check_type_size(complex_def,
expected=[2 * x for x in expected[type]])
if res >= 0:
public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % complex_def)
for type in ('Py_intptr_t', 'off_t'):
res = config_cmd.check_type_size(type, headers=["Python.h"],
library_dirs=[pythonlib_dir()],
expected=expected[type])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# We check declaration AND type because that's how distutils does it.
if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
library_dirs=[pythonlib_dir()],
expected=expected['PY_LONG_LONG'])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
res = config_cmd.check_type_size('long long',
expected=expected['long long'])
if res >= 0:
#private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'long long')
if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
raise RuntimeError(
"Config wo CHAR_BIT is not supported"
", please contact the maintainers")
return private_defines, public_defines
def check_mathlib(config_cmd):
# Testing the C math library
mathlibs = []
mathlibs_choices = [[], ['m'], ['cpml']]
mathlib = os.environ.get('MATHLIB')
if mathlib:
mathlibs_choices.insert(0, mathlib.split(','))
for libs in mathlibs_choices:
if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):
mathlibs = libs
break
else:
raise EnvironmentError("math library missing; rerun "
"setup.py after setting the "
"MATHLIB env variable")
return mathlibs
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
hide = '__attribute__((visibility("hidden")))'
if config.check_gcc_function_attribute(hide, 'hideme'):
return hide
else:
return ''
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, dot_join
from numpy.distutils.system_info import get_info
config = Configuration('core', parent_package, top_path)
local_dir = config.local_path
codegen_dir = join(local_dir, 'code_generators')
if is_released(config):
warnings.simplefilter('error', MismatchCAPIWarning)
# Check whether we have a mismatch between the set C API VERSION and the
# actual C API VERSION
check_api_version(C_API_VERSION, codegen_dir)
generate_umath_py = join(codegen_dir, 'generate_umath.py')
n = dot_join(config.name, 'generate_umath')
generate_umath = npy_load_module('_'.join(n.split('.')),
generate_umath_py, ('.py', 'U', 1))
header_dir = 'include/numpy' # this is relative to config.path_in_package
cocache = CallOnceOnly()
def generate_config_h(ext, build_dir):
target = join(build_dir, header_dir, 'config.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
# Check math library and C99 math funcs availability
mathlibs = check_mathlib(config_cmd)
moredefs.append(('MATHLIB', ','.join(mathlibs)))
check_math_capabilities(config_cmd, moredefs, mathlibs)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
# Signal check
if is_npy_no_signal():
moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
# Windows checks
if sys.platform == 'win32' or os.name == 'nt':
win32_checks(moredefs)
# C99 restrict keyword
moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))
# Inline check
inline = config_cmd.check_inline()
# Use relaxed stride checking
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Use bogus stride debug aid when relaxed strides are enabled
if NPY_RELAXED_STRIDES_DEBUG:
moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
# Get long double representation
rep = check_long_double_representation(config_cmd)
moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
if check_for_right_shift_internal_compiler_error(config_cmd):
moredefs.append('NPY_DO_NOT_OPTIMIZE_LONG_right_shift')
moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONG_right_shift')
moredefs.append('NPY_DO_NOT_OPTIMIZE_LONGLONG_right_shift')
moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONGLONG_right_shift')
# Generate the config.h file from moredefs
with open(target, 'w') as target_f:
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# define inline to our keyword, or nothing
target_f.write('#ifndef __cplusplus\n')
if inline == 'inline':
target_f.write('/* #undef inline */\n')
else:
target_f.write('#define inline %s\n' % inline)
target_f.write('#endif\n')
# add the guard to make sure config.h is never included directly,
# but always through npy_config.h
target_f.write(textwrap.dedent("""
#ifndef _NPY_NPY_CONFIG_H_
#error config.h should never be included directly, include npy_config.h instead
#endif
"""))
log.info('File: %s' % target)
with open(target) as target_f:
log.info(target_f.read())
log.info('EOF')
else:
mathlibs = []
with open(target) as target_f:
for line in target_f:
s = '#define MATHLIB'
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
# Ugly: this can be called within a library and not an extension,
# in which case there is no libraries attributes (and none is
# needed).
if hasattr(ext, 'libraries'):
ext.libraries.extend(mathlibs)
incl_dir = os.path.dirname(target)
if incl_dir not in config.numpy_include_dirs:
config.numpy_include_dirs.append(incl_dir)
return target
def generate_numpyconfig_h(ext, build_dir):
"""Depends on config.h: generate_config_h has to be called before !"""
# put common include directory in build_dir on search path
# allows using code generation in headers
config.add_include_dirs(join(build_dir, "src", "common"))
config.add_include_dirs(join(build_dir, "src", "npymath"))
target = join(build_dir, header_dir, '_numpyconfig.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
if is_npy_no_signal():
moredefs.append(('NPY_NO_SIGNAL', 1))
if is_npy_no_smp():
moredefs.append(('NPY_NO_SMP', 1))
else:
moredefs.append(('NPY_NO_SMP', 0))
mathlibs = check_mathlib(config_cmd)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
if NPY_RELAXED_STRIDES_DEBUG:
moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
# Check whether we can use inttypes (C99) formats
if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):
moredefs.append(('NPY_USE_C99_FORMATS', 1))
# visibility check
hidden_visibility = visibility_define(config_cmd)
moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
# Add the C API/ABI versions
moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
# Add moredefs to header
with open(target, 'w') as target_f:
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# Define __STDC_FORMAT_MACROS
target_f.write(textwrap.dedent("""
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
"""))
# Dump the numpyconfig.h header to stdout
log.info('File: %s' % target)
with open(target) as target_f:
log.info(target_f.read())
log.info('EOF')
config.add_data_files((header_dir, target))
return target
def generate_api_func(module_name):
def generate_api(ext, build_dir):
script = join(codegen_dir, module_name + '.py')
sys.path.insert(0, codegen_dir)
try:
m = __import__(module_name)
log.info('executing %s', script)
h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
finally:
del sys.path[0]
config.add_data_files((header_dir, h_file),
(header_dir, doc_file))
return (h_file,)
return generate_api
generate_numpy_api = generate_api_func('generate_numpy_api')
generate_ufunc_api = generate_api_func('generate_ufunc_api')
config.add_include_dirs(join(local_dir, "src", "common"))
config.add_include_dirs(join(local_dir, "src"))
config.add_include_dirs(join(local_dir))
config.add_data_dir('include/numpy')
config.add_include_dirs(join('src', 'npymath'))
config.add_include_dirs(join('src', 'multiarray'))
config.add_include_dirs(join('src', 'umath'))
config.add_include_dirs(join('src', 'npysort'))
config.add_define_macros([("NPY_INTERNAL_BUILD", "1")]) # this macro indicates that Numpy build is in process
config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
if sys.platform[:3] == "aix":
config.add_define_macros([("_LARGE_FILES", None)])
else:
config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])
config.numpy_include_dirs.extend(config.paths('include'))
deps = [join('src', 'npymath', '_signbit.c'),
join('include', 'numpy', '*object.h'),
join(codegen_dir, 'genapi.py'),
]
#######################################################################
# npymath library #
#######################################################################
subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
# update the substitution dictionary during npymath build
config_cmd = config.get_config_cmd()
# Check that the toolchain works, to fail early if it doesn't
# (avoid late errors with MATHLIB which are confusing if the
# compiler does not work).
st = config_cmd.try_link('int main(void) { return 0;}')
if not st:
# rerun the failing command in verbose mode
config_cmd.compiler.verbose = True
config_cmd.try_link('int main(void) { return 0;}')
raise RuntimeError("Broken toolchain: cannot link a simple C program")
mlibs = check_mathlib(config_cmd)
posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
subst_dict["posix_mathlib"] = posix_mlib
subst_dict["msvc_mathlib"] = msvc_mlib
npymath_sources = [join('src', 'npymath', 'npy_math_internal.h.src'),
join('src', 'npymath', 'npy_math.c'),
join('src', 'npymath', 'ieee754.c.src'),
join('src', 'npymath', 'npy_math_complex.c.src'),
join('src', 'npymath', 'halffloat.c')
]
# Must be true for CRT compilers but not MinGW/cygwin. See gh-9977.
# Intel and Clang also don't seem happy with /GL
is_msvc = (platform.platform().startswith('Windows') and
platform.python_compiler().startswith('MS'))
config.add_installed_library('npymath',
sources=npymath_sources + [get_mathlib_info],
install_dir='lib',
build_info={
'include_dirs' : [], # empty list required for creating npy_math_internal.h
'extra_compiler_args' : (['/GL-'] if is_msvc else []),
})
config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
subst_dict)
config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
subst_dict)
#######################################################################
# npysort library #
#######################################################################
# This library is created for the build but it is not installed
npysort_sources = [join('src', 'common', 'npy_sort.h.src'),
join('src', 'npysort', 'quicksort.c.src'),
join('src', 'npysort', 'mergesort.c.src'),
join('src', 'npysort', 'timsort.c.src'),
join('src', 'npysort', 'heapsort.c.src'),
join('src', 'npysort', 'radixsort.c.src'),
join('src', 'common', 'npy_partition.h.src'),
join('src', 'npysort', 'selection.c.src'),
join('src', 'common', 'npy_binsearch.h.src'),
join('src', 'npysort', 'binsearch.c.src'),
]
config.add_library('npysort',
sources=npysort_sources,
include_dirs=[])
#######################################################################
# multiarray_tests module #
#######################################################################
config.add_extension('_multiarray_tests',
sources=[join('src', 'multiarray', '_multiarray_tests.c.src'),
join('src', 'common', 'mem_overlap.c')],
depends=[join('src', 'common', 'mem_overlap.h'),
join('src', 'common', 'npy_extint128.h')],
libraries=['npymath'])
#######################################################################
# _multiarray_umath module - common part #
#######################################################################
common_deps = [
join('src', 'common', 'array_assign.h'),
join('src', 'common', 'binop_override.h'),
join('src', 'common', 'cblasfuncs.h'),
join('src', 'common', 'lowlevel_strided_loops.h'),
join('src', 'common', 'mem_overlap.h'),
join('src', 'common', 'npy_cblas.h'),
join('src', 'common', 'npy_config.h'),
join('src', 'common', 'npy_ctypes.h'),
join('src', 'common', 'npy_extint128.h'),
join('src', 'common', 'npy_import.h'),
join('src', 'common', 'npy_longdouble.h'),
join('src', 'common', 'templ_common.h.src'),
join('src', 'common', 'ucsnarrow.h'),
join('src', 'common', 'ufunc_override.h'),
join('src', 'common', 'umathmodule.h'),
join('src', 'common', 'numpyos.h'),
]
common_src = [
join('src', 'common', 'array_assign.c'),
join('src', 'common', 'mem_overlap.c'),
join('src', 'common', 'npy_longdouble.c'),
join('src', 'common', 'templ_common.h.src'),
join('src', 'common', 'ucsnarrow.c'),
join('src', 'common', 'ufunc_override.c'),
join('src', 'common', 'numpyos.c'),
join('src', 'common', 'npy_cpu_features.c.src'),
]
if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0":
blas_info = get_info('blas_ilp64_opt', 2)
else:
blas_info = get_info('blas_opt', 0)
have_blas = blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', [])
if have_blas:
extra_info = blas_info
# These files are also in MANIFEST.in so that they are always in
# the source distribution independently of HAVE_CBLAS.
common_src.extend([join('src', 'common', 'cblasfuncs.c'),
join('src', 'common', 'python_xerbla.c'),
])
if uses_accelerate_framework(blas_info):
common_src.extend(get_sgemv_fix())
else:
extra_info = {}
#######################################################################
# _multiarray_umath module - multiarray part #
#######################################################################
multiarray_deps = [
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
join('src', 'multiarray', 'arrayfunction_override.h'),
join('src', 'multiarray', 'npy_buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'convert_datatype.h'),
join('src', 'multiarray', 'convert.h'),
join('src', 'multiarray', 'conversion_utils.h'),
join('src', 'multiarray', 'ctors.h'),
join('src', 'multiarray', 'descriptor.h'),
join('src', 'multiarray', 'dragon4.h'),
join('src', 'multiarray', 'getset.h'),
join('src', 'multiarray', 'hashdescr.h'),
join('src', 'multiarray', 'iterators.h'),
join('src', 'multiarray', 'mapping.h'),
join('src', 'multiarray', 'methods.h'),
join('src', 'multiarray', 'multiarraymodule.h'),
join('src', 'multiarray', 'nditer_impl.h'),
join('src', 'multiarray', 'number.h'),
join('src', 'multiarray', 'refcount.h'),
join('src', 'multiarray', 'scalartypes.h'),
join('src', 'multiarray', 'sequence.h'),
join('src', 'multiarray', 'shape.h'),
join('src', 'multiarray', 'strfuncs.h'),
join('src', 'multiarray', 'typeinfo.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'multiarray', 'vdot.h'),
join('include', 'numpy', 'arrayobject.h'),
join('include', 'numpy', '_neighborhood_iterator_imp.h'),
join('include', 'numpy', 'npy_endian.h'),
join('include', 'numpy', 'arrayscalars.h'),
join('include', 'numpy', 'noprefix.h'),
join('include', 'numpy', 'npy_interrupt.h'),
join('include', 'numpy', 'npy_3kcompat.h'),
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('include', 'numpy', 'npy_common.h'),
join('include', 'numpy', 'npy_os.h'),
join('include', 'numpy', 'utils.h'),
join('include', 'numpy', 'ndarrayobject.h'),
join('include', 'numpy', 'npy_cpu.h'),
join('include', 'numpy', 'numpyconfig.h'),
join('include', 'numpy', 'ndarraytypes.h'),
join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
# add library sources as distuils does not consider libraries
# dependencies
] + npysort_sources + npymath_sources
multiarray_src = [
join('src', 'multiarray', 'alloc.c'),
join('src', 'multiarray', 'arrayobject.c'),
join('src', 'multiarray', 'arraytypes.c.src'),
join('src', 'multiarray', 'array_assign_scalar.c'),
join('src', 'multiarray', 'array_assign_array.c'),
join('src', 'multiarray', 'arrayfunction_override.c'),
join('src', 'multiarray', 'buffer.c'),
join('src', 'multiarray', 'calculation.c'),
join('src', 'multiarray', 'compiled_base.c'),
join('src', 'multiarray', 'common.c'),
join('src', 'multiarray', 'convert.c'),
join('src', 'multiarray', 'convert_datatype.c'),
join('src', 'multiarray', 'conversion_utils.c'),
join('src', 'multiarray', 'ctors.c'),
join('src', 'multiarray', 'datetime.c'),
join('src', 'multiarray', 'datetime_strings.c'),
join('src', 'multiarray', 'datetime_busday.c'),
join('src', 'multiarray', 'datetime_busdaycal.c'),
join('src', 'multiarray', 'descriptor.c'),
join('src', 'multiarray', 'dragon4.c'),
join('src', 'multiarray', 'dtype_transfer.c'),
join('src', 'multiarray', 'einsum.c.src'),
join('src', 'multiarray', 'flagsobject.c'),
join('src', 'multiarray', 'getset.c'),
join('src', 'multiarray', 'hashdescr.c'),
join('src', 'multiarray', 'item_selection.c'),
join('src', 'multiarray', 'iterators.c'),
join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
join('src', 'multiarray', 'mapping.c'),
join('src', 'multiarray', 'methods.c'),
join('src', 'multiarray', 'multiarraymodule.c'),
join('src', 'multiarray', 'nditer_templ.c.src'),
join('src', 'multiarray', 'nditer_api.c'),
join('src', 'multiarray', 'nditer_constr.c'),
join('src', 'multiarray', 'nditer_pywrap.c'),
join('src', 'multiarray', 'number.c'),
join('src', 'multiarray', 'refcount.c'),
join('src', 'multiarray', 'sequence.c'),
join('src', 'multiarray', 'shape.c'),
join('src', 'multiarray', 'scalarapi.c'),
join('src', 'multiarray', 'scalartypes.c.src'),
join('src', 'multiarray', 'strfuncs.c'),
join('src', 'multiarray', 'temp_elide.c'),
join('src', 'multiarray', 'typeinfo.c'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'vdot.c'),
]
#######################################################################
# _multiarray_umath module - umath part #
#######################################################################
def generate_umath_c(ext, build_dir):
target = join(build_dir, header_dir, '__umath_generated.c')
dir = os.path.dirname(target)
if not os.path.exists(dir):
os.makedirs(dir)
script = generate_umath_py
if newer(script, target):
with open(target, 'w') as f:
f.write(generate_umath.make_code(generate_umath.defdict,
generate_umath.__file__))
return []
umath_src = [
join('src', 'umath', 'umathmodule.c'),
join('src', 'umath', 'reduction.c'),
join('src', 'umath', 'funcs.inc.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'loops.h.src'),
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'matmul.h.src'),
join('src', 'umath', 'matmul.c.src'),
join('src', 'umath', 'clip.h.src'),
join('src', 'umath', 'clip.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'extobj.c'),
join('src', 'umath', 'scalarmath.c.src'),
join('src', 'umath', 'ufunc_type_resolution.c'),
join('src', 'umath', 'override.c'),
]
umath_deps = [
generate_umath_py,
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'number.h'),
join('src', 'common', 'templ_common.h.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'override.h'),
join(codegen_dir, 'generate_ufunc_api.py'),
]
config.add_extension('_multiarray_umath',
sources=multiarray_src + umath_src +
npymath_sources + common_src +
[generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
join(codegen_dir, 'generate_numpy_api.py'),
join('*.py'),
generate_umath_c,
generate_ufunc_api,
],
depends=deps + multiarray_deps + umath_deps +
common_deps,
libraries=['npymath', 'npysort'],
extra_info=extra_info)
#######################################################################
# umath_tests module #
#######################################################################
config.add_extension('_umath_tests',
sources=[join('src', 'umath', '_umath_tests.c.src')])
#######################################################################
# custom rational dtype module #
#######################################################################
config.add_extension('_rational_tests',
sources=[join('src', 'umath', '_rational_tests.c.src')])
#######################################################################
# struct_ufunc_test module #
#######################################################################
config.add_extension('_struct_ufunc_tests',
sources=[join('src', 'umath', '_struct_ufunc_tests.c.src')])
#######################################################################
# operand_flag_tests module #
#######################################################################
config.add_extension('_operand_flag_tests',
sources=[join('src', 'umath', '_operand_flag_tests.c.src')])
config.add_subpackage('tests')
config.add_data_dir('tests/data')
config.make_svn_version_py()
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
|
# import time
import numpy as np
# from pycqed.analysis import analysis_toolbox as a_tools
# import pycqed.analysis_v2.base_analysis as ba
# import dataprep for tomography module
# import tomography module
# using the data prep module of analysis V2
# from pycqed.analysis_v2 import tomography_dataprep as dataprep
from pycqed.analysis import measurement_analysis as ma
def reshape_block(shots_data, segments_per_block=16, block_size=4092, mode='truncate'):
"""
inputs: shots_data 1D array of dimension N
organizes data in blocks of dimension block_size.
num of blocks is N/block_size
"""
N = len(shots_data)
# Data dimension needs to be an integer multiple of block_size
assert(N%block_size==0)
num_blocks = N//block_size
full_segments = block_size//segments_per_block
orfan_segments = block_size % segments_per_block
missing_segments = segments_per_block - orfan_segments
# print(N,num_blocks,full_segments,orfan_segments,missing_segments)
reshaped_data = shots_data.reshape((num_blocks,block_size))
if mode.lower()=='truncate':
truncate_idx = full_segments*segments_per_block
return reshaped_data[:,:truncate_idx]
elif mode.lower()=='padd':
padd_dim = (full_segments+1)*segments_per_block
return_block = np.nan*np.ones((num_blocks,padd_dim))
return_block[:,:block_size] = reshaped_data
return return_block
else:
raise ValueError('Mode not understood. Needs to be truncate or padd')
def all_repetitions(shots_data,segments_per_block=16):
flat_dim = shots_data.shape[0]*shots_data.shape[1]
# Data dimension needs to divide the segments_per_block
assert(flat_dim%segments_per_block==0)
num_blocks = flat_dim // segments_per_block
block_data = shots_data.reshape((num_blocks,segments_per_block))
return block_data
def get_segments_average(shots_data, segments_per_block=16, block_size=4092, mode='truncate', average=True):
reshaped_data = reshape_block(shots_data=shots_data,
segments_per_block=segments_per_block,
block_size=block_size,
mode=mode)
all_reps = all_repetitions(shots_data=reshaped_data,
segments_per_block=segments_per_block)
if average:
return np.mean(all_reps,axis=0)
else:
return all_reps
class ExpectationValueCalculation:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
# self.exp_name = os.path.split(self.folder)[-1][7:]
avg_h1 = self.ma_obj.measured_values[0]
avg_h2 = self.ma_obj.measured_values[1]
avg_h12 = self.ma_obj.measured_values[2]
# Binning all the points required for the tomo
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
self.measurements_tomo = (
np.array([avg_h1[0:8], avg_h2[0:8],
avg_h12[0:8]])).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
# print(len(self.measurements_cal))
# print(self.measurements_cal)
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
# print(self.measurements_cal[0:4])
betas[0:4] = np.dot(np.linalg.inv(cal_matrix), self.measurements_cal[0:4])
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
# print(self.measurements_cal[0:4])
# print(betas[0:4])
betas[4:8] = np.dot(np.linalg.inv(cal_matrix), self.measurements_cal[4:8])
# print(betas[4:8])
betas[8:] = np.dot(np.linalg.inv(cal_matrix), self.measurements_cal[8:12])
# print(betas[8:])
return betas
def expectation_value_calculation_IdenZ(self):
betas = self._calibrate_betas()
#inverting the unprimed beta matrix
#up is unprimed
self.betas = betas
# print(self.betas[0:4], self.betas[4:8], self.betas[8:])
beta_0_up =self.betas[0]
beta_1_up =self.betas[1]
beta_2_up =self.betas[2]
beta_3_up =self.betas[3]
beta_matrix_up = np.array([[beta_0_up,beta_1_up,beta_2_up,beta_3_up],
[beta_0_up,-1*beta_1_up,beta_2_up,-1*beta_3_up],
[beta_0_up,beta_1_up,-1*beta_2_up,-1*beta_3_up],
[beta_0_up,-1*beta_1_up,-1*beta_2_up,beta_3_up]])
#assuming 0:4 are
# expect_value_IdenZ_up = np.dot(np.linalg.inv(beta_matrix_up), self.measurements_tomo[1:4])
expect_value_IdenZ_up = np.dot(np.linalg.inv(beta_matrix_up), self.measurements_tomo[0:4])
#inverting the primed beta matrix
#p is primed
beta_0_p =self.betas[4]
beta_1_p =self.betas[5]
beta_2_p =self.betas[6]
beta_3_p =self.betas[7]
beta_matrix_p = np.array([[beta_0_p,beta_1_p,beta_2_p,beta_3_p],
[beta_0_p,-1*beta_1_p,beta_2_p,-1*beta_3_p],
[beta_0_p,beta_1_p,-1*beta_2_p,-1*beta_3_p],
[beta_0_p,-1*beta_1_p,-1*beta_2_p,beta_3_p]])
# beta_matrix_p = np.array([[-1*beta_1_p,beta_2_p,-1*beta_3_p],
# [beta_1_p,-1*beta_2_p,-1*beta_3_p],
# [-1*beta_1_p,-1*beta_2_p,beta_3_p]])
#assuming 0:4 are
expect_value_IdenZ_p = np.dot(np.linalg.inv(beta_matrix_p), self.measurements_tomo[8:12])
# expect_value_IdenZ_p = np.dot(np.linalg.inv(beta_matrix_p), self.measurements_tomo[1:4])
#inverting the unprimed beta matrix
#up is unprimed
beta_0_pp =self.betas[8]
beta_1_pp =self.betas[9]
beta_2_pp =self.betas[10]
beta_3_pp =self.betas[11]
beta_matrix_pp = np.array([[beta_0_pp,beta_1_pp,beta_2_pp,beta_3_pp],
[beta_0_pp,-1*beta_1_pp,beta_2_pp,-1*beta_3_pp],
[beta_0_pp,beta_1_pp,-1*beta_2_pp,-1*beta_3_pp],
[beta_0_pp,-1*beta_1_pp,-1*beta_2_pp,beta_3_pp]])
# beta_matrix_pp = np.array([[-1*beta_1_pp,beta_2_pp,-1*beta_3_pp],
# [beta_1_pp,-1*beta_2_pp,-1*beta_3_pp],
# [-1*beta_1_pp,-1*beta_2_pp,beta_3_pp]])
#assuming 0:4 are
expect_value_IdenZ_pp = np.dot(np.linalg.inv(beta_matrix_pp), self.measurements_tomo[16:20])
# expect_value_IdenZ_pp = np.dot(np.linalg.inv(beta_matrix_p), self.measurements_tomo[1:4])
#take the mean of calculated expectation values of II, IZ, ZI, ZZ
#for three different beta vectors
expect_value_IdenZ = np.mean( np.array([expect_value_IdenZ_up,
expect_value_IdenZ_p,
expect_value_IdenZ_pp]),
axis=0 )
print(expect_value_IdenZ_up)
print(expect_value_IdenZ_p)
print(expect_value_IdenZ_pp)
return expect_value_IdenZ
def expectation_value_calculation_XX(self):
expect_value_XX_up = ((self.measurements_tomo[4] + self.measurements_tomo[5]) -2*self.betas[0])/2*self.betas[3]
expect_value_XX_p = ((self.measurements_tomo[12] + self.measurements_tomo[13])-2*self.betas[4])/2*self.betas[7]
expect_value_XX_pp = ((self.measurements_tomo[20] + self.measurements_tomo[21]) - 2*self.betas[8])/2*self.betas[11]
expectation_value_XX = (expect_value_XX_up + expect_value_XX_p + expect_value_XX_pp)/3
# print(expect_value_XX_up, expect_value_XX_p, expect_value_XX_pp)
return expectation_value_XX
def expectation_value_calculation_YY(self):
expect_value_YY_up = ((self.measurements_tomo[6] + self.measurements_tomo[7]) -2*self.betas[0])/2*self.betas[3]
expect_value_YY_p = ((self.measurements_tomo[14] + self.measurements_tomo[15])-2*self.betas[4])/2*self.betas[7]
expect_value_YY_pp = ((self.measurements_tomo[22] + self.measurements_tomo[23]) - 2*self.betas[8])/2*self.betas[11]
# print(expect_value_YY_up, expect_value_YY_p, expect_value_YY_pp)
expectation_value_YY = (expect_value_YY_up + expect_value_YY_p + expect_value_YY_pp)/3
return expectation_value_YY
def execute_expectation_value_calculation(self):
expect_values = np.zeros(6)
expect_values[0:4] = self.expectation_value_calculation_IdenZ()
# print(self.expectation_value_calculation_IdenZ())
expect_values[4] = self.expectation_value_calculation_XX()
# print(self.expectation_value_calculation_XX())
expect_values[5] = self.expectation_value_calculation_YY()
# print(self.expectation_value_calculation_YY())
return expect_values, self.betas
class ExpectationValueCalculation2:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
# self.exp_name = os.path.split(self.folder)[-1][7:]
avg_h1 = self.ma_obj.measured_values[0]
avg_h2 = self.ma_obj.measured_values[1]
avg_h12 = self.ma_obj.measured_values[2]
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
self.measurements_tomo = (
np.array([avg_h1[0:8], avg_h2[0:8],
avg_h12[0:8]])).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
# print(self.measurements_cal[0:4])
betas[0:4] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[0:4])
self.betas_up = betas[0:4]
betas[4:8] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[4:8])
self.betas_p = betas[4:8]
betas[8:] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[8:12])
self.betas_pp = betas[8:]
return betas
def assemble_M_matrix_single_block(self, beta_array):
M_matrix_single_block_row_1 = np.array([beta_array[0], beta_array[1],
beta_array[2], beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_2 = np.array([beta_array[0],
-1*beta_array[1],
beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_3 = np.array([beta_array[0],
beta_array[1],
-1*beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_4 = np.array([beta_array[0],
-1*beta_array[1],
-1*beta_array[2],
beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_5 = np.array([beta_array[0],
0, 0, 0, -beta_array[1],
-beta_array[2],
beta_array[3], 0, 0, 0])
M_matrix_single_block_row_6 = np.array([beta_array[0], 0, 0, 0,
beta_array[1],
beta_array[2],
beta_array[3],
0, 0, 0])
M_matrix_single_block_row_7 = np.array([beta_array[0], 0, 0,
0, 0, 0, 0, beta_array[1],
beta_array[2],
beta_array[3]])
M_matrix_single_block_row_8 = np.array([beta_array[0], 0, 0, 0, 0,
0, 0, -beta_array[1],
-beta_array[2],
beta_array[3]])
M_matrix_single_block = np.vstack((M_matrix_single_block_row_1,
M_matrix_single_block_row_2,
M_matrix_single_block_row_3,
M_matrix_single_block_row_4,
M_matrix_single_block_row_5,
M_matrix_single_block_row_6,
M_matrix_single_block_row_7,
M_matrix_single_block_row_8))
M_matrix_single_block = M_matrix_single_block.reshape(8, 10)
return M_matrix_single_block
def assemble_M_matrix(self):
Block1 = self.assemble_M_matrix_single_block(self.betas_up)
Block2 = self.assemble_M_matrix_single_block(self.betas_p)
Block3 = self.assemble_M_matrix_single_block(self.betas_pp)
self.M_matrix = np.vstack((Block1, Block2, Block3)).reshape(24, 10)
return self.M_matrix
def invert_M_matrix(self):
self.inverse_matrix = np.linalg.pinv(self.M_matrix)
return self.inverse_matrix
def execute_error_signalling(self, ev):
II = (ev[0] - ev[3])/(1 - ev[3])
IZ = (ev[1] - ev[2])/(1 - ev[3])
ZI = (ev[1] - ev[2])/(1 - ev[3])
ZZ = (ev[3] - ev[0])/(1 - ev[3])
XX = (ev[4] + ev[5])/(1 - ev[3])
YY = (ev[4] + ev[5])/(1 - ev[3])
ev_error_signalling = np.array([II, IZ, ZI, ZZ, XX, YY])
return ev_error_signalling
def execute_expectation_value_calculation(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
return expect_values_VQE
def execute_expectation_value_calculation_traceone(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
return expect_values_VQE
def execute_expectation_value_calculation_T1signaling(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
expect_values_VQE = self.execute_error_signalling(expect_values_VQE)
return expect_values_VQE
class ExpectationValueCalculation3_shots:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
shots_I_q0 = get_segments_average(self.ma_obj.measured_values[0],
segments_per_block=16,
block_size=4094,
average=False)
shots_I_q1 = get_segments_average(self.ma_obj.measured_values[1],
segments_per_block=16,
block_size=4094,
average=False)
shots_I_q0q1 = np.multiply(shots_I_q0/(np.max(shots_I_q0)-np.min(shots_I_q0)),shots_I_q1/(np.max(shots_I_q1)-np.min(shots_I_q1)))
avg_h1 = np.mean(shots_I_q0,axis=0)
avg_h2 = np.mean(shots_I_q1,axis=0)
avg_h12 = np.mean(shots_I_q0q1,axis=0)
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
mean_h1 = (h1_00+h1_10+h1_01+h1_11)/4
mean_h2 = (h2_00+h2_01+h2_10+h2_11)/4
mean_h12 = (h12_00+h12_11+h12_01+h12_10)/4
#subtract beta 0 from all measurements
#rescale them
avg_h1 -= mean_h1
avg_h2 -= mean_h2
avg_h12 -= mean_h12
scale_h1 = (h1_00+h1_10-h1_01-h1_11)/4
scale_h2 = (h2_00+h2_01-h2_10-h2_11)/4
scale_h12 = (h12_00+h12_11-h12_01-h12_10)/4
avg_h1 = (avg_h1)/scale_h1
avg_h2 = (avg_h2)/scale_h2
avg_h12 = (avg_h12)/scale_h12
#The averages have been redefined so redefine the cal terms
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
self.measurements_tomo = (
np.array([avg_h1[0:8], avg_h2[0:8],
avg_h12[0:8]])).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
betas[0:4] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[0:4])
self.betas_up = betas[0:4]
betas[4:8] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[4:8])
self.betas_p = betas[4:8]
betas[8:] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[8:12])
self.betas_pp = betas[8:]
return betas
def assemble_M_matrix_single_block(self, beta_array):
# II IZ ZI ZZ IX XI XX IY YI YY
M_matrix_single_block_row_1 = np.array([beta_array[0], beta_array[1],
beta_array[2], beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_2 = np.array([beta_array[0],
-1*beta_array[1],
beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_3 = np.array([beta_array[0],
beta_array[1],
-1*beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_4 = np.array([beta_array[0],
-1*beta_array[1],
-1*beta_array[2],
beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_5 = np.array([beta_array[0], # 36
0, 0, 0, -1*beta_array[1],
-1*beta_array[2],
beta_array[3], 0, 0, 0])
M_matrix_single_block_row_6 = np.array([beta_array[0], 0, 0, 0, # 29
beta_array[1],
beta_array[2],
beta_array[3],
0, 0, 0])
M_matrix_single_block_row_7 = np.array([beta_array[0], 0, 0,
0, 0, 0, 0, beta_array[1],
beta_array[2],
beta_array[3]])
M_matrix_single_block_row_8 = np.array([beta_array[0], 0, 0, 0, 0,
0, 0, -1*beta_array[1],
-1*beta_array[2],
beta_array[3]])
M_matrix_single_block = np.vstack((M_matrix_single_block_row_1,
M_matrix_single_block_row_2,
M_matrix_single_block_row_3,
M_matrix_single_block_row_4,
M_matrix_single_block_row_5,
M_matrix_single_block_row_6,
M_matrix_single_block_row_7,
M_matrix_single_block_row_8))
M_matrix_single_block = M_matrix_single_block.reshape(8, 10)
return M_matrix_single_block
def assemble_M_matrix(self):
Block1 = self.assemble_M_matrix_single_block(self.betas_up)
Block2 = self.assemble_M_matrix_single_block(self.betas_p)
Block3 = self.assemble_M_matrix_single_block(self.betas_pp)
self.M_matrix = np.vstack((Block1, Block2, Block3)).reshape(24, 10)
return self.M_matrix
def invert_M_matrix(self):
self.inverse_matrix = np.linalg.pinv(self.M_matrix)
return self.inverse_matrix
def execute_error_signalling(self, ev):
II = (ev[0] - ev[3])/(1 - ev[3])
IZ = (ev[1] - ev[2])/(1 - ev[3])
ZI = (ev[2] - ev[1])/(1 - ev[3])
ZZ = (ev[3] - ev[0])/(1 - ev[3])
XX = (ev[4] + ev[5])/(1 - ev[3])
YY = (ev[5] + ev[4])/(1 - ev[3])
ev_error_signalling = np.array([II, IZ, ZI, ZZ, XX, YY])
return ev_error_signalling
def execute_expectation_value_calculation(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
print(self.expect_values)
expect_values_VQE = np.array([1,
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
self.expect_values = expect_values_VQE
return expect_values_VQE
def execute_expectation_value_calculation_traceone(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
beta_0_vec = np.repeat([self.betas_up[0],
self.betas_p[0],
self.betas_pp[0]], 8)
rescaled_measurements_tomo = self.measurements_tomo - beta_0_vec
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
self.expect_values = expect_values_VQE
print(self.expect_values)
return expect_values_VQE
def execute_expectation_value_calculation_T1signaling(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
expect_values_VQE = self.execute_error_signalling(expect_values_VQE)
self.expect_values = expect_values_VQE
return expect_values_VQE
class ExpectationValueCalculation2_shots:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
# self.exp_name = os.path.split(self.folder)[-1][7:]
shots_I_q0 = get_segments_average(self.ma_obj.measured_values[0],
segments_per_block=16,
block_size=4094,
average=False)
shots_I_q1 = get_segments_average(self.ma_obj.measured_values[1],
segments_per_block=16,
block_size=4094,
average=False)
shots_I_q0q1 = np.multiply(shots_I_q0/(np.max(shots_I_q0)-np.min(shots_I_q0)),shots_I_q1/(np.max(shots_I_q1)-np.min(shots_I_q1)))
avg_h1 = np.mean(shots_I_q0,axis=0)
avg_h2 = np.mean(shots_I_q1,axis=0)
avg_h12 = np.mean(shots_I_q0q1,axis=0)
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
self.measurements_tomo = (
np.array([avg_h1[0:8], avg_h2[0:8],
avg_h12[0:8]])).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
# print(self.measurements_cal[0:4])
betas[0:4] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[0:4])
self.betas_up = betas[0:4]
betas[4:8] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[4:8])
self.betas_p = betas[4:8]
betas[8:] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[8:12])
self.betas_pp = betas[8:]
return betas
def assemble_M_matrix_single_block(self, beta_array):
M_matrix_single_block_row_1 = np.array([beta_array[0], beta_array[1],
beta_array[2], beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_2 = np.array([beta_array[0],
-1*beta_array[1],
beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_3 = np.array([beta_array[0],
beta_array[1],
-1*beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_4 = np.array([beta_array[0],
-1*beta_array[1],
-1*beta_array[2],
beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_5 = np.array([beta_array[0],
0, 0, 0, -beta_array[1],
-beta_array[2],
beta_array[3], 0, 0, 0])
M_matrix_single_block_row_6 = np.array([beta_array[0], 0, 0, 0,
beta_array[1],
beta_array[2],
beta_array[3],
0, 0, 0])
M_matrix_single_block_row_7 = np.array([beta_array[0], 0, 0,
0, 0, 0, 0, beta_array[1],
beta_array[2],
beta_array[3]])
M_matrix_single_block_row_8 = np.array([beta_array[0], 0, 0, 0, 0,
0, 0, -beta_array[1],
-beta_array[2],
beta_array[3]])
M_matrix_single_block = np.vstack((M_matrix_single_block_row_1,
M_matrix_single_block_row_2,
M_matrix_single_block_row_3,
M_matrix_single_block_row_4,
M_matrix_single_block_row_5,
M_matrix_single_block_row_6,
M_matrix_single_block_row_7,
M_matrix_single_block_row_8))
M_matrix_single_block = M_matrix_single_block.reshape(8, 10)
return M_matrix_single_block
def assemble_M_matrix(self):
Block1 = self.assemble_M_matrix_single_block(self.betas_up)
Block2 = self.assemble_M_matrix_single_block(self.betas_p)
Block3 = self.assemble_M_matrix_single_block(self.betas_pp)
self.M_matrix = np.vstack((Block1, Block2, Block3)).reshape(24, 10)
return self.M_matrix
def invert_M_matrix(self):
self.inverse_matrix = np.linalg.pinv(self.M_matrix)
return self.inverse_matrix
def execute_error_signalling(self, ev):
II = (ev[0] - ev[3])/(1 - ev[3])
IZ = (ev[1] - ev[2])/(1 - ev[3])
ZI = (ev[1] - ev[2])/(1 - ev[3])
ZZ = (ev[3] - ev[0])/(1 - ev[3])
XX = (ev[4] + ev[5])/(1 - ev[3])
YY = (ev[4] + ev[5])/(1 - ev[3])
ev_error_signalling = np.array([II, IZ, ZI, ZZ, XX, YY])
return ev_error_signalling
def execute_expectation_value_calculation(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
return expect_values_VQE
def execute_expectation_value_calculation_traceone(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
return expect_values_VQE
def execute_expectation_value_calculation_T1signaling(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
expect_values_VQE = self.execute_error_signalling(expect_values_VQE)
return expect_values_VQE
|
|
"""
Allow to setup simple automation rules via the config file.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/automation/
"""
import asyncio
from functools import partial
import logging
import os
import voluptuous as vol
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.core import CoreState
from homeassistant.loader import bind_hass
from homeassistant import config as conf_util
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_PLATFORM, STATE_ON, SERVICE_TURN_ON, SERVICE_TURN_OFF,
SERVICE_TOGGLE, SERVICE_RELOAD, EVENT_HOMEASSISTANT_START, CONF_ID)
from homeassistant.components import logbook
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import extract_domain_configs, script, condition
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import async_get_last_state
from homeassistant.loader import get_platform
from homeassistant.util.dt import utcnow
import homeassistant.helpers.config_validation as cv
DOMAIN = 'automation'
DEPENDENCIES = ['group']
ENTITY_ID_FORMAT = DOMAIN + '.{}'
GROUP_NAME_ALL_AUTOMATIONS = 'all automations'
CONF_ALIAS = 'alias'
CONF_HIDE_ENTITY = 'hide_entity'
CONF_CONDITION = 'condition'
CONF_ACTION = 'action'
CONF_TRIGGER = 'trigger'
CONF_CONDITION_TYPE = 'condition_type'
CONF_INITIAL_STATE = 'initial_state'
CONDITION_USE_TRIGGER_VALUES = 'use_trigger_values'
CONDITION_TYPE_AND = 'and'
CONDITION_TYPE_OR = 'or'
DEFAULT_CONDITION_TYPE = CONDITION_TYPE_AND
DEFAULT_HIDE_ENTITY = False
DEFAULT_INITIAL_STATE = True
ATTR_LAST_TRIGGERED = 'last_triggered'
ATTR_VARIABLES = 'variables'
SERVICE_TRIGGER = 'trigger'
_LOGGER = logging.getLogger(__name__)
def _platform_validator(config):
"""Validate it is a valid platform."""
platform = get_platform(DOMAIN, config[CONF_PLATFORM])
if not hasattr(platform, 'TRIGGER_SCHEMA'):
return config
return getattr(platform, 'TRIGGER_SCHEMA')(config)
_TRIGGER_SCHEMA = vol.All(
cv.ensure_list,
[
vol.All(
vol.Schema({
vol.Required(CONF_PLATFORM): cv.platform_validator(DOMAIN)
}, extra=vol.ALLOW_EXTRA),
_platform_validator
),
]
)
_CONDITION_SCHEMA = vol.All(cv.ensure_list, [cv.CONDITION_SCHEMA])
PLATFORM_SCHEMA = vol.Schema({
# str on purpose
CONF_ID: str,
CONF_ALIAS: cv.string,
vol.Optional(CONF_INITIAL_STATE): cv.boolean,
vol.Optional(CONF_HIDE_ENTITY, default=DEFAULT_HIDE_ENTITY): cv.boolean,
vol.Required(CONF_TRIGGER): _TRIGGER_SCHEMA,
vol.Optional(CONF_CONDITION): _CONDITION_SCHEMA,
vol.Required(CONF_ACTION): cv.SCRIPT_SCHEMA,
})
SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
TRIGGER_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_VARIABLES, default={}): dict,
})
RELOAD_SERVICE_SCHEMA = vol.Schema({})
@bind_hass
def is_on(hass, entity_id):
"""
Return true if specified automation entity_id is on.
Async friendly.
"""
return hass.states.is_state(entity_id, STATE_ON)
@bind_hass
def turn_on(hass, entity_id=None):
"""Turn on specified automation or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TURN_ON, data)
@bind_hass
def turn_off(hass, entity_id=None):
"""Turn off specified automation or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TURN_OFF, data)
@bind_hass
def toggle(hass, entity_id=None):
"""Toggle specified automation or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TOGGLE, data)
@bind_hass
def trigger(hass, entity_id=None):
"""Trigger specified automation or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TRIGGER, data)
@bind_hass
def reload(hass):
"""Reload the automation from config."""
hass.services.call(DOMAIN, SERVICE_RELOAD)
@bind_hass
def async_reload(hass):
"""Reload the automation from config.
Returns a coroutine object.
"""
return hass.services.async_call(DOMAIN, SERVICE_RELOAD)
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the automation."""
component = EntityComponent(_LOGGER, DOMAIN, hass,
group_name=GROUP_NAME_ALL_AUTOMATIONS)
yield from _async_process_config(hass, config, component)
descriptions = yield from hass.async_add_job(
conf_util.load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml')
)
@asyncio.coroutine
def trigger_service_handler(service_call):
"""Handle automation triggers."""
tasks = []
for entity in component.async_extract_from_service(service_call):
tasks.append(entity.async_trigger(
service_call.data.get(ATTR_VARIABLES), True))
if tasks:
yield from asyncio.wait(tasks, loop=hass.loop)
@asyncio.coroutine
def turn_onoff_service_handler(service_call):
"""Handle automation turn on/off service calls."""
tasks = []
method = 'async_{}'.format(service_call.service)
for entity in component.async_extract_from_service(service_call):
tasks.append(getattr(entity, method)())
if tasks:
yield from asyncio.wait(tasks, loop=hass.loop)
@asyncio.coroutine
def toggle_service_handler(service_call):
"""Handle automation toggle service calls."""
tasks = []
for entity in component.async_extract_from_service(service_call):
if entity.is_on:
tasks.append(entity.async_turn_off())
else:
tasks.append(entity.async_turn_on())
if tasks:
yield from asyncio.wait(tasks, loop=hass.loop)
@asyncio.coroutine
def reload_service_handler(service_call):
"""Remove all automations and load new ones from config."""
conf = yield from component.async_prepare_reload()
if conf is None:
return
yield from _async_process_config(hass, conf, component)
hass.services.async_register(
DOMAIN, SERVICE_TRIGGER, trigger_service_handler,
descriptions.get(SERVICE_TRIGGER), schema=TRIGGER_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_RELOAD, reload_service_handler,
descriptions.get(SERVICE_RELOAD), schema=RELOAD_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_TOGGLE, toggle_service_handler,
descriptions.get(SERVICE_TOGGLE), schema=SERVICE_SCHEMA)
for service in (SERVICE_TURN_ON, SERVICE_TURN_OFF):
hass.services.async_register(
DOMAIN, service, turn_onoff_service_handler,
descriptions.get(service), schema=SERVICE_SCHEMA)
return True
class AutomationEntity(ToggleEntity):
"""Entity to show status of entity."""
def __init__(self, automation_id, name, async_attach_triggers, cond_func,
async_action, hidden, initial_state):
"""Initialize an automation entity."""
self._id = automation_id
self._name = name
self._async_attach_triggers = async_attach_triggers
self._async_detach_triggers = None
self._cond_func = cond_func
self._async_action = async_action
self._last_triggered = None
self._hidden = hidden
self._initial_state = initial_state
@property
def name(self):
"""Name of the automation."""
return self._name
@property
def should_poll(self):
"""No polling needed for automation entities."""
return False
@property
def state_attributes(self):
"""Return the entity state attributes."""
return {
ATTR_LAST_TRIGGERED: self._last_triggered
}
@property
def hidden(self) -> bool:
"""Return True if the automation entity should be hidden from UIs."""
return self._hidden
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._async_detach_triggers is not None
@asyncio.coroutine
def async_added_to_hass(self) -> None:
"""Startup with initial state or previous state."""
if self._initial_state is not None:
enable_automation = self._initial_state
_LOGGER.debug("Automation %s initial state %s from config "
"initial_state", self.entity_id, enable_automation)
else:
state = yield from async_get_last_state(self.hass, self.entity_id)
if state:
enable_automation = state.state == STATE_ON
self._last_triggered = state.attributes.get('last_triggered')
_LOGGER.debug("Automation %s initial state %s from recorder "
"last state %s", self.entity_id,
enable_automation, state)
else:
enable_automation = DEFAULT_INITIAL_STATE
_LOGGER.debug("Automation %s initial state %s from default "
"initial state", self.entity_id,
enable_automation)
if not enable_automation:
return
# HomeAssistant is starting up
elif self.hass.state == CoreState.not_running:
@asyncio.coroutine
def async_enable_automation(event):
"""Start automation on startup."""
yield from self.async_enable()
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_enable_automation)
# HomeAssistant is running
else:
yield from self.async_enable()
@asyncio.coroutine
def async_turn_on(self, **kwargs) -> None:
"""Turn the entity on and update the state."""
if self.is_on:
return
yield from self.async_enable()
@asyncio.coroutine
def async_turn_off(self, **kwargs) -> None:
"""Turn the entity off."""
if not self.is_on:
return
self._async_detach_triggers()
self._async_detach_triggers = None
yield from self.async_update_ha_state()
@asyncio.coroutine
def async_trigger(self, variables, skip_condition=False):
"""Trigger automation.
This method is a coroutine.
"""
if skip_condition or self._cond_func(variables):
yield from self._async_action(self.entity_id, variables)
self._last_triggered = utcnow()
yield from self.async_update_ha_state()
@asyncio.coroutine
def async_remove(self):
"""Remove automation from HASS."""
yield from self.async_turn_off()
yield from super().async_remove()
@asyncio.coroutine
def async_enable(self):
"""Enable this automation entity.
This method is a coroutine.
"""
if self.is_on:
return
self._async_detach_triggers = yield from self._async_attach_triggers(
self.async_trigger)
yield from self.async_update_ha_state()
@property
def device_state_attributes(self):
"""Return automation attributes."""
if self._id is None:
return None
return {
CONF_ID: self._id
}
@asyncio.coroutine
def _async_process_config(hass, config, component):
"""Process config and add automations.
This method is a coroutine.
"""
entities = []
for config_key in extract_domain_configs(config, DOMAIN):
conf = config[config_key]
for list_no, config_block in enumerate(conf):
automation_id = config_block.get(CONF_ID)
name = config_block.get(CONF_ALIAS) or "{} {}".format(config_key,
list_no)
hidden = config_block[CONF_HIDE_ENTITY]
initial_state = config_block.get(CONF_INITIAL_STATE)
action = _async_get_action(hass, config_block.get(CONF_ACTION, {}),
name)
if CONF_CONDITION in config_block:
cond_func = _async_process_if(hass, config, config_block)
if cond_func is None:
continue
else:
def cond_func(variables):
"""Condition will always pass."""
return True
async_attach_triggers = partial(
_async_process_trigger, hass, config,
config_block.get(CONF_TRIGGER, []), name
)
entity = AutomationEntity(
automation_id, name, async_attach_triggers, cond_func, action,
hidden, initial_state)
entities.append(entity)
if entities:
yield from component.async_add_entities(entities)
def _async_get_action(hass, config, name):
"""Return an action based on a configuration."""
script_obj = script.Script(hass, config, name)
@asyncio.coroutine
def action(entity_id, variables):
"""Execute an action."""
_LOGGER.info('Executing %s', name)
logbook.async_log_entry(
hass, name, 'has been triggered', DOMAIN, entity_id)
yield from script_obj.async_run(variables)
return action
def _async_process_if(hass, config, p_config):
"""Process if checks."""
if_configs = p_config.get(CONF_CONDITION)
checks = []
for if_config in if_configs:
try:
checks.append(condition.async_from_config(if_config, False))
except HomeAssistantError as ex:
_LOGGER.warning('Invalid condition: %s', ex)
return None
def if_action(variables=None):
"""AND all conditions."""
return all(check(hass, variables) for check in checks)
return if_action
@asyncio.coroutine
def _async_process_trigger(hass, config, trigger_configs, name, action):
"""Set up the triggers.
This method is a coroutine.
"""
removes = []
for conf in trigger_configs:
platform = yield from async_prepare_setup_platform(
hass, config, DOMAIN, conf.get(CONF_PLATFORM))
if platform is None:
return None
remove = yield from platform.async_trigger(hass, conf, action)
if not remove:
_LOGGER.error("Error setting up trigger %s", name)
continue
_LOGGER.info("Initialized trigger %s", name)
removes.append(remove)
if not removes:
return None
def remove_triggers():
"""Remove attached triggers."""
for remove in removes:
remove()
return remove_triggers
|
|
# -*- coding: utf-8 -*-
"""
bbofuser: apps.v1api.views
FILE: ogets
Created: 9/27/15 7:04 PM
"""
__author__ = 'Mark Scrimshire:@ekivemark'
import requests
from xml.dom import minidom
from oauth2_provider.decorators import protected_resource
from oauth2_provider.models import AbstractApplication, AccessToken
from oauth2_provider.views.generic import ProtectedResourceView, View
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.views.generic import ListView
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from apps.v1api.views.patient import get_patient, get_eob
from apps.v1api.views.crosswalk import lookup_xwalk
from apps.v1api.utils import (build_params)
class Hello(View):
def get(self, request, *args, **kwargs):
return HttpResponse('Hello, OAuth2! %s' % kwargs)
class Patients(ProtectedResourceView):
#class Patients(ListView):
scopes = ['read', 'write']
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if settings.DEBUG:
print("in Patients Class - dispatch" )
return super(Patients, self).dispatch(*args, **kwargs)
@method_decorator(login_required)
def get(self, request, patient_id, *args, **kwargs):
# This is a patient profile GET
#
# use request.user to lookup a crosswalk
# get the FHIR Patient ID
# Call the FHIR Patient Profile
# Return the result
print("in the get!")
if settings.DEBUG:
print("in Patients.get with", patient_id)
xwalk_id = lookup_xwalk(request, )
if settings.DEBUG:
print("crosswalk:", xwalk_id)
if xwalk_id == None:
return HttpResponseRedirect(reverse_lazy('api:v1:home'))
if settings.DEBUG:
print("now we need to evaluate the parameters and arguments"
" to work with ", xwalk_id, "and ", request.user)
print("GET Parameters:", request.GET, ":")
if patient_id == xwalk_id:
key = patient_id
else:
key = xwalk_id.strip()
in_fmt = "json"
Txn = {'name': "Patient",
'display': 'Patient',
'mask': True,
'server': settings.FHIR_SERVER,
'locn': "/baseDstu2/Patient/",
'template': 'v1api/patient.html',
'in_fmt': in_fmt,
}
skip_parm = ['_id',
'access_token', 'client_id', 'response_type', 'state']
# access_token can be passed in as a part of OAuth protected request.
# as can: state=random_state_string&response_type=code&client_id=ABCDEF
# Remove it before passing url through to FHIR Server
pass_params = build_params(request.GET, skip_parm)
pass_to = Txn['server'] + Txn['locn'] + key + "/"
print("Here is the URL to send, %s now get parameters" % pass_to)
if pass_params != "":
pass_to = pass_to + pass_params
try:
r = requests.get(pass_to)
except requests.ConnectionError:
if settings.DEBUG:
print("Problem connecting to FHIR Server")
messages.error(request, "FHIR Server is unreachable." )
return HttpResponseRedirect(reverse_lazy('api:v1:home'))
text_out = ""
if '_format=xml' in pass_to:
text_out= minidom.parseString(r.text).toprettyxml()
else:
text_out = r.json()
if settings.DEBUG:
print("What we got back was:", text_out)
return HttpResponse('This is the Patient Pass Thru %s using %s '
'and with response of %s ' % (xwalk_id,
pass_to,
text_out ))
# """
# Class-based view for Patient Resource
#
# GET needs to mask patient elements.
# GET must use the user info to lookup in a CrossWalk
#
# """
# @cache_page(60 * 15)
# @csrf_protect
# def post(self, request,patient_id, *args, **kwargs):
#
# messages.info(request, "POST not implemented")
# if settings.DEBUG:
# print("We are in the apps.v1api.views.oget.Patients.post")
# return HttpResponseRedirect(reverse_lazy('ap:v1:home'))
def post(self,request, patient_id, *args, **kwargs):
# This is a patient profile POST
#
# use request.user to lookup a crosswalk
# get the FHIR Patient ID
# Call the FHIR Patient Profile
# Return the result
print("in Patients.post with", patient_id)
if settings.DEBUG:
print("in Patients.post with", patient_id)
xwalk_id = lookup_xwalk(request, )
if settings.DEBUG:
print("crosswalk:", xwalk_id)
template_name = 'v1api/patient.html'
form = self.form_class(request.POST)
if form.is_valid():
# <process form cleaned data>
return HttpResponseRedirect('/success/')
return render(request, self.template_name, {'form': form})
#@protected_resource()
@login_required
def o_patient(request, *args, **kwargs):
if settings.DEBUG:
print("in apps.v1api.views.ogets.Patient")
print("request:", request)
result = get_patient(request, *args, **kwargs)
# if settings.DEBUG:
# print("Results:", result)
return result
#@protected_resource()
@login_required
def o_explanationofbenefit(request, *args, **kwargs):
if settings.DEBUG:
print("in apps.v1api.views.ogets.o_explanationofbenefit")
print("request:", request)
result = get_eob(request, *args, **kwargs)
# if settings.DEBUG:
# print("Results:", result)
return result
@login_required
def open_patient(request, patient_id, *args, **kwargs):
if settings.DEBUG:
print("in apps.v1api.views.ogets.open_patient")
print("request :", request)
print("Patient_id:", patient_id)
kwargs['patient_id'] = patient_id
result = get_patient(request, Access_Mode="OPEN", *args, **kwargs)
# if settings.DEBUG:
# print("Results:", result)
return result
@login_required
def open_explanationofbenefit(request, eob_id, *args, **kwargs):
if settings.DEBUG:
print("in apps.v1api.views.ogets.open_explanationofbenefit")
print("request:", request)
p_id = request.GET.get('patient', '')
if p_id != '':
patient_id = p_id.split('/')[1]
else:
patient_id=""
kwargs['patient_id'] = patient_id
print("Patient_id:", patient_id)
print("p_id:", p_id)
result = get_eob(request, eob_id, Access_Mode="OPEN", *args, **kwargs)
# if settings.DEBUG:
# print("Results:", result)
return result
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Output writers for MapReduce."""
from __future__ import with_statement
__all__ = [
"BlobstoreOutputWriter",
"BlobstoreOutputWriterBase",
"BlobstoreRecordsOutputWriter",
"FileOutputWriter",
"FileOutputWriterBase",
"FileRecordsOutputWriter",
"KeyValueBlobstoreOutputWriter",
"KeyValueFileOutputWriter",
"COUNTER_IO_WRITE_BYTES",
"COUNTER_IO_WRITE_MSEC",
"OutputWriter",
"RecordsPool",
]
import gc
import itertools
import logging
import time
from google.appengine.api import files
from google.appengine.api.files import file_service_pb
from google.appengine.api.files import records
from google.appengine.ext.mapreduce import errors
from google.appengine.ext.mapreduce import model
from google.appengine.ext.mapreduce import operation
COUNTER_IO_WRITE_BYTES = "io-write-bytes"
COUNTER_IO_WRITE_MSEC = "io-write-msec"
class OutputWriter(model.JsonMixin):
"""Abstract base class for output writers.
Output writers process all mapper handler output, which is not
the operation.
OutputWriter's lifecycle is the following:
0) validate called to validate mapper specification.
1) init_job is called to initialize any job-level state.
2) create() is called, which should create a new instance of output
writer for a given shard
3) from_json()/to_json() are used to persist writer's state across
multiple slices.
4) write() method is called to write data.
5) finalize() is called when shard processing is done.
5) finalize_job() is called when job is completed.
"""
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper specification.
Output writer parameters are expected to be passed as "output_writer"
subdictionary of mapper_spec.params. To be compatible with previous
API output writer is advised to check mapper_spec.params and issue
a warning if "output_writer" subdicationary is not present.
_get_params helper method can be used to simplify implementation.
Args:
mapper_spec: an instance of model.MapperSpec to validate.
"""
raise NotImplementedError("validate() not implemented in %s" % cls)
@classmethod
def init_job(cls, mapreduce_state):
"""Initialize job-level writer state.
Args:
mapreduce_state: an instance of model.MapreduceState describing current
job. State can be modified during initialization.
"""
raise NotImplementedError("init_job() not implemented in %s" % cls)
@classmethod
def finalize_job(cls, mapreduce_state):
"""Finalize job-level writer state.
Args:
mapreduce_state: an instance of model.MapreduceState describing current
job. State can be modified during finalization.
"""
raise NotImplementedError("finalize_job() not implemented in %s" % cls)
@classmethod
def from_json(cls, state):
"""Creates an instance of the OutputWriter for the given json state.
Args:
state: The OutputWriter state as a dict-like object.
Returns:
An instance of the OutputWriter configured using the values of json.
"""
raise NotImplementedError("from_json() not implemented in %s" % cls)
def to_json(self):
"""Returns writer state to serialize in json.
Returns:
A json-izable version of the OutputWriter state.
"""
raise NotImplementedError("to_json() not implemented in %s" %
self.__class__)
@classmethod
def create(cls, mapreduce_state, shard_number):
"""Create new writer for a shard.
Args:
mapreduce_state: an instance of model.MapreduceState describing current
job. State can be modified.
shard_number: shard number as integer.
"""
raise NotImplementedError("create() not implemented in %s" % cls)
def write(self, data, ctx):
"""Write data.
Args:
data: actual data yielded from handler. Type is writer-specific.
ctx: an instance of context.Context.
"""
raise NotImplementedError("write() not implemented in %s" %
self.__class__)
def finalize(self, ctx, shard_number):
"""Finalize writer shard-level state.
Args:
ctx: an instance of context.Context.
shard_number: shard number as integer.
"""
raise NotImplementedError("finalize() not implemented in %s" %
self.__class__)
@classmethod
def get_filenames(cls, mapreduce_state):
"""Obtain output filenames from mapreduce state.
Args:
mapreduce_state: an instance of model.MapreduceState
Returns:
list of filenames this writer writes to or None if writer
doesn't write to a file.
"""
raise NotImplementedError("get_filenames() not implemented in %s" % cls)
_FILES_API_FLUSH_SIZE = 128*1024
_FILES_API_MAX_SIZE = 1000*1024
def _get_params(mapper_spec, allowed_keys=None):
"""Obtain output writer parameters.
Utility function for output writer implementation. Fetches parameters
from mapreduce specification giving appropriate usage warnings.
Args:
mapper_spec: The MapperSpec for the job
allowed_keys: set of all allowed keys in parameters as strings. If it is not
None, then parameters are expected to be in a separate "output_writer"
subdictionary of mapper_spec parameters.
Returns:
mapper parameters as dict
Raises:
BadWriterParamsError: if parameters are invalid/missing or not allowed.
"""
if "output_writer" not in mapper_spec.params:
message = (
"Output writer's parameters should be specified in "
"output_writer subdictionary.")
if allowed_keys:
raise errors.BadWriterParamsError(message)
else:
logging.warning(message)
params = mapper_spec.params
params = dict((str(n), v) for n, v in params.iteritems())
else:
if not isinstance(mapper_spec.params.get("output_writer"), dict):
raise BadWriterParamsError(
"Output writer parameters should be a dictionary")
params = mapper_spec.params.get("output_writer")
params = dict((str(n), v) for n, v in params.iteritems())
if allowed_keys:
params_diff = set(params.keys()) - allowed_keys
if params_diff:
raise errors.BadWriterParamsError(
"Invalid output_writer parameters: %s" % ",".join(params_diff))
return params
class _FilePool(object):
"""Pool of file append operations."""
def __init__(self, flush_size_chars=_FILES_API_FLUSH_SIZE, ctx=None):
"""Constructor.
Args:
flush_size_chars: buffer flush size in bytes as int. Internal buffer
will be flushed once this size is reached.
ctx: mapreduce context as context.Context. Can be null.
"""
self._flush_size = flush_size_chars
self._append_buffer = {}
self._size = 0
self._ctx = ctx
def __append(self, filename, data):
"""Append data to the filename's buffer without checks and flushes."""
self._append_buffer[filename] = (
self._append_buffer.get(filename, "") + data)
self._size += len(data)
def append(self, filename, data):
"""Append data to a file.
Args:
filename: the name of the file as string.
data: data as string.
"""
if self._size + len(data) > self._flush_size:
self.flush()
if len(data) > _FILES_API_MAX_SIZE:
raise errors.Error(
"Can't write more than %s bytes in one request: "
"risk of writes interleaving." % _FILES_API_MAX_SIZE)
else:
self.__append(filename, data)
if self._size > self._flush_size:
self.flush()
def flush(self):
"""Flush pool contents."""
start_time = time.time()
for filename, data in self._append_buffer.iteritems():
with files.open(filename, "a") as f:
if len(data) > _FILES_API_MAX_SIZE:
raise errors.Error("Bad data of length: %s" % len(data))
if self._ctx:
operation.counters.Increment(
COUNTER_IO_WRITE_BYTES, len(data))(self._ctx)
f.write(data)
if self._ctx:
operation.counters.Increment(
COUNTER_IO_WRITE_MSEC,
int((time.time() - start_time) * 1000))(self._ctx)
self._append_buffer = {}
self._size = 0
class _StringWriter(object):
"""Simple writer for records api that writes to a string buffer."""
def __init__(self):
self._buffer = ""
def to_string(self):
"""Convert writer buffer to string."""
return self._buffer
def write(self, data):
"""Write data.
Args:
data: data to append to the buffer as string.
"""
self._buffer += data
class RecordsPool(object):
"""Pool of append operations for records files."""
_RECORD_OVERHEAD_BYTES = 10
def __init__(self, filename,
flush_size_chars=_FILES_API_FLUSH_SIZE,
ctx=None,
exclusive=False):
"""Constructor.
Args:
filename: file name to write data to as string.
flush_size_chars: buffer flush threshold as int.
ctx: mapreduce context as context.Context.
exclusive: a boolean flag indicating if the pool has an exclusive
access to the file. If it is True, then it's possible to write
bigger chunks of data.
"""
self._flush_size = flush_size_chars
self._buffer = []
self._size = 0
self._filename = filename
self._ctx = ctx
self._exclusive = exclusive
def append(self, data):
"""Append data to a file."""
data_length = len(data)
if self._size + data_length > self._flush_size:
self.flush()
if not self._exclusive and data_length > _FILES_API_MAX_SIZE:
raise errors.Error(
"Too big input %s (%s)." % (data_length, _FILES_API_MAX_SIZE))
else:
self._buffer.append(data)
self._size += data_length
if self._size > self._flush_size:
self.flush()
def flush(self):
"""Flush pool contents."""
try:
buf = _StringWriter()
with records.RecordsWriter(buf) as w:
for record in self._buffer:
w.write(record)
str_buf = buf.to_string()
if not self._exclusive and len(str_buf) > _FILES_API_MAX_SIZE:
raise errors.Error(
"Buffer too big. Can't write more than %s bytes in one request: "
"risk of writes interleaving. Got: %s" %
(_FILES_API_MAX_SIZE, len(str_buf)))
start_time = time.time()
with files.open(self._filename, "a", exclusive_lock=self._exclusive) as f:
f.write(str_buf)
if self._ctx:
operation.counters.Increment(
COUNTER_IO_WRITE_BYTES, len(str_buf))(self._ctx)
if self._ctx:
operation.counters.Increment(
COUNTER_IO_WRITE_MSEC,
int((time.time() - start_time) * 1000))(self._ctx)
self._buffer = []
self._size = 0
gc.collect()
except (files.UnknownError), e:
logging.warning("UnknownError: %s", e)
raise errors.RetrySliceError()
except (files.ExistenceError), e:
logging.warning("ExistenceError: %s", e)
raise errors.FailJobError("Existence error: %s" % (e))
def __enter__(self):
return self
def __exit__(self, atype, value, traceback):
self.flush()
class FileOutputWriterBase(OutputWriter):
"""Base class for all file output writers."""
OUTPUT_SHARDING_PARAM = "output_sharding"
OUTPUT_SHARDING_NONE = "none"
OUTPUT_SHARDING_INPUT_SHARDS = "input"
OUTPUT_FILESYSTEM_PARAM = "filesystem"
GS_BUCKET_NAME_PARAM = "gs_bucket_name"
GS_ACL_PARAM = "gs_acl"
class _State(object):
"""Writer state. Stored in MapreduceState.
State list all files which were created for the job.
"""
def __init__(self, filenames, request_filenames):
"""State initializer.
Args:
filenames: writable or finalized filenames as returned by the files api.
request_filenames: filenames as given to the files create api.
"""
self.filenames = filenames
self.request_filenames = request_filenames
def to_json(self):
return {
"filenames": self.filenames,
"request_filenames": self.request_filenames
}
@classmethod
def from_json(cls, json):
return cls(json["filenames"], json["request_filenames"])
def __init__(self, filename):
self._filename = filename
@classmethod
def _get_output_sharding(cls, mapreduce_state=None, mapper_spec=None):
"""Get output sharding parameter value from mapreduce state or mapper spec.
At least one of the parameters should not be None.
Args:
mapreduce_state: mapreduce state as model.MapreduceState.
mapper_spec: mapper specification as model.MapperSpec
"""
if mapper_spec:
return _get_params(mapper_spec).get(
FileOutputWriterBase.OUTPUT_SHARDING_PARAM,
FileOutputWriterBase.OUTPUT_SHARDING_NONE).lower()
if mapreduce_state:
mapper_spec = mapreduce_state.mapreduce_spec.mapper
return cls._get_output_sharding(mapper_spec=mapper_spec)
raise errors.Error("Neither mapreduce_state nor mapper_spec specified.")
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper specification.
Args:
mapper_spec: an instance of model.MapperSpec to validate.
"""
if mapper_spec.output_writer_class() != cls:
raise errors.BadWriterParamsError("Output writer class mismatch")
output_sharding = cls._get_output_sharding(mapper_spec=mapper_spec)
if (output_sharding != cls.OUTPUT_SHARDING_NONE and
output_sharding != cls.OUTPUT_SHARDING_INPUT_SHARDS):
raise errors.BadWriterParamsError(
"Invalid output_sharding value: %s" % output_sharding)
params = _get_params(mapper_spec)
filesystem = cls._get_filesystem(mapper_spec)
if filesystem not in files.FILESYSTEMS:
raise errors.BadWriterParamsError(
"Filesystem '%s' is not supported. Should be one of %s" %
(filesystem, files.FILESYSTEMS))
if filesystem == files.GS_FILESYSTEM:
if not cls.GS_BUCKET_NAME_PARAM in params:
raise errors.BadWriterParamsError(
"%s is required for Google store filesystem" %
cls.GS_BUCKET_NAME_PARAM)
else:
if params.get(cls.GS_BUCKET_NAME_PARAM) is not None:
raise errors.BadWriterParamsError(
"%s can only be provided for Google store filesystem" %
cls.GS_BUCKET_NAME_PARAM)
@classmethod
def init_job(cls, mapreduce_state):
"""Initialize job-level writer state.
Args:
mapreduce_state: an instance of model.MapreduceState describing current
job.
"""
output_sharding = cls._get_output_sharding(mapreduce_state=mapreduce_state)
mapper_spec = mapreduce_state.mapreduce_spec.mapper
params = _get_params(mapper_spec)
mime_type = params.get("mime_type", "application/octet-stream")
filesystem = cls._get_filesystem(mapper_spec=mapper_spec)
bucket = params.get(cls.GS_BUCKET_NAME_PARAM)
acl = params.get(cls.GS_ACL_PARAM)
if output_sharding == cls.OUTPUT_SHARDING_INPUT_SHARDS:
number_of_files = mapreduce_state.mapreduce_spec.mapper.shard_count
else:
number_of_files = 1
filenames = []
request_filenames = []
for i in range(number_of_files):
filename = (mapreduce_state.mapreduce_spec.name + "-" +
mapreduce_state.mapreduce_spec.mapreduce_id + "-output")
if number_of_files > 1:
filename += "-" + str(i)
if bucket is not None:
filename = "%s/%s" % (bucket, filename)
request_filenames.append(filename)
filenames.append(cls._create_file(filesystem, filename, mime_type,
acl=acl))
mapreduce_state.writer_state = cls._State(
filenames, request_filenames).to_json()
@classmethod
def _get_filesystem(cls, mapper_spec):
return _get_params(mapper_spec).get(cls.OUTPUT_FILESYSTEM_PARAM, "").lower()
@classmethod
def _create_file(cls, filesystem, filename, mime_type, **kwargs):
"""Creates a file and returns its created filename."""
if filesystem == files.BLOBSTORE_FILESYSTEM:
return files.blobstore.create(mime_type, filename)
elif filesystem == files.GS_FILESYSTEM:
return files.gs.create("/gs/%s" % filename, mime_type, **kwargs)
else:
raise errors.BadWriterParamsError(
"Filesystem '%s' is not supported" % filesystem)
@classmethod
def _get_finalized_filename(cls, fs, create_filename, request_filename):
"""Returns the finalized filename for the created filename."""
if fs == "blobstore":
return files.blobstore.get_file_name(
files.blobstore.get_blob_key(create_filename))
elif fs == "gs":
return "/gs/" + request_filename
else:
raise errors.BadWriterParamsError(
"Filesystem '%s' is not supported" % fs)
@classmethod
def finalize_job(cls, mapreduce_state):
"""Finalize job-level writer state.
Args:
mapreduce_state: an instance of model.MapreduceState describing current
job.
"""
state = cls._State.from_json(mapreduce_state.writer_state)
output_sharding = cls._get_output_sharding(mapreduce_state=mapreduce_state)
filesystem = cls._get_filesystem(mapreduce_state.mapreduce_spec.mapper)
finalized_filenames = []
for create_filename, request_filename in itertools.izip(
state.filenames, state.request_filenames):
if output_sharding != cls.OUTPUT_SHARDING_INPUT_SHARDS:
files.finalize(create_filename)
finalized_filenames.append(cls._get_finalized_filename(filesystem,
create_filename,
request_filename))
state.filenames = finalized_filenames
state.request_filenames = []
mapreduce_state.writer_state = state.to_json()
@classmethod
def from_json(cls, state):
"""Creates an instance of the OutputWriter for the given json state.
Args:
state: The OutputWriter state as a json object (dict like).
Returns:
An instance of the OutputWriter configured using the values of json.
"""
return cls(state["filename"])
def to_json(self):
"""Returns writer state to serialize in json.
Returns:
A json-izable version of the OutputWriter state.
"""
return {"filename": self._filename}
@classmethod
def create(cls, mapreduce_state, shard_number):
"""Create new writer for a shard.
Args:
mapreduce_state: an instance of model.MapreduceState describing current
job.
shard_number: shard number as integer.
"""
file_index = 0
output_sharding = cls._get_output_sharding(mapreduce_state=mapreduce_state)
if output_sharding == cls.OUTPUT_SHARDING_INPUT_SHARDS:
file_index = shard_number
state = cls._State.from_json(mapreduce_state.writer_state)
return cls(state.filenames[file_index])
def finalize(self, ctx, shard_number):
"""Finalize writer shard-level state.
Args:
ctx: an instance of context.Context.
shard_number: shard number as integer.
"""
mapreduce_spec = ctx.mapreduce_spec
output_sharding = self.__class__._get_output_sharding(
mapper_spec=mapreduce_spec.mapper)
if output_sharding == self.OUTPUT_SHARDING_INPUT_SHARDS:
files.finalize(self._filename)
@classmethod
def get_filenames(cls, mapreduce_state):
"""Obtain output filenames from mapreduce state.
Args:
mapreduce_state: an instance of model.MapreduceState
Returns:
list of filenames this writer writes to.
"""
state = cls._State.from_json(mapreduce_state.writer_state)
return state.filenames
class FileOutputWriter(FileOutputWriterBase):
"""An implementation of OutputWriter which outputs data into file."""
def write(self, data, ctx):
"""Write data.
Args:
data: actual data yielded from handler. Type is writer-specific.
ctx: an instance of context.Context.
"""
if ctx.get_pool("file_pool") is None:
ctx.register_pool("file_pool", _FilePool(ctx=ctx))
ctx.get_pool("file_pool").append(self._filename, str(data))
class FileRecordsOutputWriter(FileOutputWriterBase):
"""A File OutputWriter which outputs data using leveldb log format."""
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper specification.
Args:
mapper_spec: an instance of model.MapperSpec to validate.
"""
if cls.OUTPUT_SHARDING_PARAM in _get_params(mapper_spec):
raise errors.BadWriterParamsError(
"output_sharding should not be specified for %s" % cls.__name__)
super(FileRecordsOutputWriter, cls).validate(mapper_spec)
@classmethod
def _get_output_sharding(cls, mapreduce_state=None, mapper_spec=None):
return cls.OUTPUT_SHARDING_INPUT_SHARDS
def write(self, data, ctx):
"""Write data.
Args:
data: actual data yielded from handler. Type is writer-specific.
ctx: an instance of context.Context.
"""
if ctx.get_pool("records_pool") is None:
ctx.register_pool("records_pool",
RecordsPool(self._filename, ctx=ctx, exclusive=True))
ctx.get_pool("records_pool").append(str(data))
class KeyValueFileOutputWriter(FileRecordsOutputWriter):
"""A file output writer for KeyValue records."""
def write(self, data, ctx):
if len(data) != 2:
logging.error("Got bad tuple of length %d (2-tuple expected): %s",
len(data), data)
try:
key = str(data[0])
value = str(data[1])
except TypeError:
logging.error("Expecting a tuple, but got %s: %s",
data.__class__.__name__, data)
proto = file_service_pb.KeyValue()
proto.set_key(key)
proto.set_value(value)
FileRecordsOutputWriter.write(self, proto.Encode(), ctx)
class BlobstoreOutputWriterBase(FileOutputWriterBase):
"""A base class of OutputWriter which outputs data into blobstore."""
@classmethod
def _get_filesystem(cls, mapper_spec):
return "blobstore"
class BlobstoreOutputWriter(FileOutputWriter, BlobstoreOutputWriterBase):
"""An implementation of OutputWriter which outputs data into blobstore."""
class BlobstoreRecordsOutputWriter(FileRecordsOutputWriter,
BlobstoreOutputWriterBase):
"""An OutputWriter which outputs data into records format."""
class KeyValueBlobstoreOutputWriter(KeyValueFileOutputWriter,
BlobstoreOutputWriterBase):
"""Output writer for KeyValue records files in blobstore."""
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Converts checkpoint variables into Const ops in a standalone GraphDef file.
This script is designed to take a GraphDef proto, a SaverDef proto, and a set of
variable values stored in a checkpoint file, and output a GraphDef with all of
the variable ops converted into const ops containing the values of the
variables.
It's useful to do this when we need to load a single file in C++, especially in
environments like mobile or embedded where we may not have access to the
RestoreTensor ops and file loading calls that they rely on.
An example of command-line usage is:
bazel build tensorflow/python/tools:freeze_graph && \
bazel-bin/tensorflow/python/tools/freeze_graph \
--input_graph=some_graph_def.pb \
--input_checkpoint=model.ckpt-8361242 \
--output_graph=/tmp/frozen_graph.pb --output_node_names=softmax
You can also look at freeze_graph_test.py for an example of how to use it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.core.protobuf.meta_graph_pb2 import MetaGraphDef
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.platform import app
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.tools import saved_model_utils
from tensorflow.python.training import saver as saver_lib
FLAGS = None
def freeze_graph_with_def_protos(input_graph_def,
input_saver_def,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph,
clear_devices,
initializer_nodes,
variable_names_blacklist="",
input_meta_graph_def=None,
input_saved_model_dir=None,
saved_model_tags=None):
"""Converts all variables in a graph and checkpoint into constants."""
del restore_op_name, filename_tensor_name # Unused by updated loading code.
# 'input_checkpoint' may be a prefix if we're using Saver V2 format
if (not input_saved_model_dir and
not saver_lib.checkpoint_exists(input_checkpoint)):
print("Input checkpoint '" + input_checkpoint + "' doesn't exist!")
return -1
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
if input_meta_graph_def:
for node in input_meta_graph_def.graph_def.node:
node.device = ""
elif input_graph_def:
for node in input_graph_def.node:
node.device = ""
if input_graph_def:
_ = importer.import_graph_def(input_graph_def, name="")
with session.Session() as sess:
if input_saver_def:
saver = saver_lib.Saver(saver_def=input_saver_def)
saver.restore(sess, input_checkpoint)
elif input_meta_graph_def:
restorer = saver_lib.import_meta_graph(
input_meta_graph_def, clear_devices=True)
restorer.restore(sess, input_checkpoint)
if initializer_nodes:
sess.run(initializer_nodes.split(","))
elif input_saved_model_dir:
if saved_model_tags is None:
saved_model_tags = []
loader.load(sess, saved_model_tags, input_saved_model_dir)
else:
var_list = {}
reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
try:
tensor = sess.graph.get_tensor_by_name(key + ":0")
except KeyError:
# This tensor doesn't exist in the graph (for example it's
# 'global_step' or a similar housekeeping element) so skip it.
continue
var_list[key] = tensor
saver = saver_lib.Saver(var_list=var_list)
saver.restore(sess, input_checkpoint)
if initializer_nodes:
sess.run(initializer_nodes.split(","))
variable_names_blacklist = (variable_names_blacklist.split(",")
if variable_names_blacklist else None)
if input_meta_graph_def:
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_meta_graph_def.graph_def,
output_node_names.split(","),
variable_names_blacklist=variable_names_blacklist)
else:
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names.split(","),
variable_names_blacklist=variable_names_blacklist)
# Write GraphDef to file if output path has been given.
if output_graph:
with gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
return output_graph_def
def _parse_input_graph_proto(input_graph, input_binary):
"""Parser input tensorflow graph into GraphDef proto."""
if not gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return -1
input_graph_def = graph_pb2.GraphDef()
mode = "rb" if input_binary else "r"
with gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_graph_def)
return input_graph_def
def _parse_input_meta_graph_proto(input_graph, input_binary):
"""Parser input tensorflow graph into MetaGraphDef proto."""
if not gfile.Exists(input_graph):
print("Input meta graph file '" + input_graph + "' does not exist!")
return -1
input_meta_graph_def = MetaGraphDef()
mode = "rb" if input_binary else "r"
with gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_meta_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_meta_graph_def)
print("Loaded meta graph file '" + input_graph)
return input_meta_graph_def
def _parse_input_saver_proto(input_saver, input_binary):
"""Parser input tensorflow Saver into SaverDef proto."""
if not gfile.Exists(input_saver):
print("Input saver file '" + input_saver + "' does not exist!")
return -1
mode = "rb" if input_binary else "r"
with gfile.FastGFile(input_saver, mode) as f:
saver_def = saver_pb2.SaverDef()
if input_binary:
saver_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), saver_def)
return saver_def
def freeze_graph(input_graph,
input_saver,
input_binary,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph,
clear_devices,
initializer_nodes,
variable_names_blacklist="",
input_meta_graph=None,
input_saved_model_dir=None,
saved_model_tags=tag_constants.SERVING):
"""Converts all variables in a graph and checkpoint into constants."""
input_graph_def = None
if input_saved_model_dir:
input_graph_def = saved_model_utils.get_meta_graph_def(
input_saved_model_dir, saved_model_tags).graph_def
elif input_graph:
input_graph_def = _parse_input_graph_proto(input_graph, input_binary)
input_meta_graph_def = None
if input_meta_graph:
input_meta_graph_def = _parse_input_meta_graph_proto(
input_meta_graph, input_binary)
input_saver_def = None
if input_saver:
input_saver_def = _parse_input_saver_proto(input_saver, input_binary)
freeze_graph_with_def_protos(
input_graph_def, input_saver_def, input_checkpoint, output_node_names,
restore_op_name, filename_tensor_name, output_graph, clear_devices,
initializer_nodes, variable_names_blacklist, input_meta_graph_def,
input_saved_model_dir, saved_model_tags.split(","))
def main(unused_args):
freeze_graph(FLAGS.input_graph, FLAGS.input_saver, FLAGS.input_binary,
FLAGS.input_checkpoint, FLAGS.output_node_names,
FLAGS.restore_op_name, FLAGS.filename_tensor_name,
FLAGS.output_graph, FLAGS.clear_devices, FLAGS.initializer_nodes,
FLAGS.variable_names_blacklist, FLAGS.input_meta_graph,
FLAGS.input_saved_model_dir, FLAGS.saved_model_tags)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--input_graph",
type=str,
default="",
help="TensorFlow \'GraphDef\' file to load.")
parser.add_argument(
"--input_saver",
type=str,
default="",
help="TensorFlow saver file to load.")
parser.add_argument(
"--input_checkpoint",
type=str,
default="",
help="TensorFlow variables file to load.")
parser.add_argument(
"--output_graph",
type=str,
default="",
help="Output \'GraphDef\' file name.")
parser.add_argument(
"--input_binary",
nargs="?",
const=True,
type="bool",
default=False,
help="Whether the input files are in binary format.")
parser.add_argument(
"--output_node_names",
type=str,
default="",
help="The name of the output nodes, comma separated.")
parser.add_argument(
"--restore_op_name",
type=str,
default="save/restore_all",
help="The name of the master restore operator.")
parser.add_argument(
"--filename_tensor_name",
type=str,
default="save/Const:0",
help="The name of the tensor holding the save path.")
parser.add_argument(
"--clear_devices",
nargs="?",
const=True,
type="bool",
default=True,
help="Whether to remove device specifications.")
parser.add_argument(
"--initializer_nodes",
type=str,
default="",
help="comma separated list of initializer nodes to run before freezing.")
parser.add_argument(
"--variable_names_blacklist",
type=str,
default="",
help="""\
comma separated list of variables to skip converting to constants\
""")
parser.add_argument(
"--input_meta_graph",
type=str,
default="",
help="TensorFlow \'MetaGraphDef\' file to load.")
parser.add_argument(
"--input_saved_model_dir",
type=str,
default="",
help="Path to the dir with TensorFlow \'SavedModel\' file and variables.")
parser.add_argument(
"--saved_model_tags",
type=str,
default="serve",
help="""\
Group of tag(s) of the MetaGraphDef to load, in string format,\
separated by \',\'. For tag-set contains multiple tags, all tags \
must be passed in.\
""")
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
|
#################################################################
# seGeometry.py
# Originally from DirectGeometry.py
# Altered by Yi-Hong Lin, [email protected], 2004
#
# We didn't change anything essential.
# Just because we customized the seSession from DirectSession,
# So we need related files can follow the change.
# However, we don't want to change anything inside the original directool
# to let them can work with our scene editor.
# (If we do change original directools, it will force user has to install the latest version of OUR Panda)
#
#################################################################
from pandac.PandaModules import *
from direct.directtools.DirectGlobals import *
from direct.directtools.DirectUtil import *
import math
class LineNodePath(NodePath):
def __init__(self, parent = None, name = None,
thickness = 1.0, colorVec = VBase4(1)):
# Initialize the superclass
NodePath.__init__(self)
if parent is None:
parent = hidden
# Attach a geomNode to the parent and set self to be
# the resulting node path
self.lineNode = GeomNode("lineNode")
self.assign(parent.attachNewNode( self.lineNode ))
if name:
self.setName(name)
# Create a lineSegs object to hold the line
ls = self.lineSegs = LineSegs()
# Initialize the lineSegs parameters
ls.setThickness(thickness)
ls.setColor(colorVec)
def moveTo( self, *_args ):
apply( self.lineSegs.moveTo, _args )
def drawTo( self, *_args ):
apply( self.lineSegs.drawTo, _args )
def create( self, frameAccurate = 0 ):
self.lineSegs.create( self.lineNode, frameAccurate )
def reset( self ):
self.lineSegs.reset()
self.lineNode.removeAllGeoms()
def isEmpty( self ):
return self.lineSegs.isEmpty()
def setThickness( self, thickness ):
self.lineSegs.setThickness( thickness )
def setColor( self, *_args ):
apply( self.lineSegs.setColor, _args )
def setVertex( self, *_args):
apply( self.lineSegs.setVertex, _args )
def setVertexColor( self, vertex, *_args ):
apply( self.lineSegs.setVertexColor, (vertex,) + _args )
def getCurrentPosition( self ):
return self.lineSegs.getCurrentPosition()
def getNumVertices( self ):
return self.lineSegs.getNumVertices()
def getVertex( self, index ):
return self.lineSegs.getVertex(index)
def getVertexColor( self ):
return self.lineSegs.getVertexColor()
def drawArrow(self, sv, ev, arrowAngle, arrowLength):
"""
Do the work of moving the cursor around to draw an arrow from
sv to ev. Hack: the arrows take the z value of the end point
"""
self.moveTo(sv)
self.drawTo(ev)
v = sv - ev
# Find the angle of the line
angle = math.atan2(v[1], v[0])
# Get the arrow angles
a1 = angle + deg2Rad(arrowAngle)
a2 = angle - deg2Rad(arrowAngle)
# Get the arrow points
a1x = arrowLength * math.cos(a1)
a1y = arrowLength * math.sin(a1)
a2x = arrowLength * math.cos(a2)
a2y = arrowLength * math.sin(a2)
z = ev[2]
self.moveTo(ev)
self.drawTo(Point3(ev + Point3(a1x, a1y, z)))
self.moveTo(ev)
self.drawTo(Point3(ev + Point3(a2x, a2y, z)))
def drawArrow2d(self, sv, ev, arrowAngle, arrowLength):
"""
Do the work of moving the cursor around to draw an arrow from
sv to ev. Hack: the arrows take the z value of the end point
"""
self.moveTo(sv)
self.drawTo(ev)
v = sv - ev
# Find the angle of the line
angle = math.atan2(v[2], v[0])
# Get the arrow angles
a1 = angle + deg2Rad(arrowAngle)
a2 = angle - deg2Rad(arrowAngle)
# Get the arrow points
a1x = arrowLength * math.cos(a1)
a1y = arrowLength * math.sin(a1)
a2x = arrowLength * math.cos(a2)
a2y = arrowLength * math.sin(a2)
self.moveTo(ev)
self.drawTo(Point3(ev + Point3(a1x, 0.0, a1y)))
self.moveTo(ev)
self.drawTo(Point3(ev + Point3(a2x, 0.0, a2y)))
def drawLines(self, lineList):
"""
Given a list of lists of points, draw a separate line for each list
"""
for pointList in lineList:
apply(self.moveTo, pointList[0])
for point in pointList[1:]:
apply(self.drawTo, point)
##
## Given a point in space, and a direction, find the point of intersection
## of that ray with a plane at the specified origin, with the specified normal
def planeIntersect (lineOrigin, lineDir, planeOrigin, normal):
t = 0
offset = planeOrigin - lineOrigin
t = offset.dot(normal) / lineDir.dot(normal)
hitPt = lineDir * t
return hitPt + lineOrigin
def getNearProjectionPoint(nodePath):
# Find the position of the projection of the specified node path
# on the near plane
origin = nodePath.getPos(SEditor.camera)
# project this onto near plane
if origin[1] != 0.0:
return origin * (SEditor.dr.near / origin[1])
else:
# Object is coplaner with camera, just return something reasonable
return Point3(0, SEditor.dr.near, 0)
def getScreenXY(nodePath):
# Where does the node path's projection fall on the near plane
nearVec = getNearProjectionPoint(nodePath)
# Clamp these coordinates to visible screen
nearX = CLAMP(nearVec[0], SEditor.dr.left, SEditor.dr.right)
nearY = CLAMP(nearVec[2], SEditor.dr.bottom, SEditor.dr.top)
# What percentage of the distance across the screen is this?
percentX = (nearX - SEditor.dr.left)/SEditor.dr.nearWidth
percentY = (nearY - SEditor.dr.bottom)/SEditor.dr.nearHeight
# Map this percentage to the same -1 to 1 space as the mouse
screenXY = Vec3((2 * percentX) - 1.0,nearVec[1],(2 * percentY) - 1.0)
# Return the resulting value
return screenXY
def getCrankAngle(center):
# Used to compute current angle of mouse (relative to the coa's
# origin) in screen space
x = SEditor.dr.mouseX - center[0]
y = SEditor.dr.mouseY - center[2]
return (180 + rad2Deg(math.atan2(y,x)))
def relHpr(nodePath, base, h, p, r):
# Compute nodePath2newNodePath relative to base coordinate system
# nodePath2base
mNodePath2Base = nodePath.getMat(base)
# delta scale, orientation, and position matrix
mBase2NewBase = Mat4()
composeMatrix(mBase2NewBase, UNIT_VEC, VBase3(h,p,r), ZERO_VEC,
CSDefault)
# base2nodePath
mBase2NodePath = base.getMat(nodePath)
# nodePath2 Parent
mNodePath2Parent = nodePath.getMat()
# Compose the result
resultMat = mNodePath2Base * mBase2NewBase
resultMat = resultMat * mBase2NodePath
resultMat = resultMat * mNodePath2Parent
# Extract and apply the hpr
hpr = Vec3(0)
decomposeMatrix(resultMat, VBase3(), hpr, VBase3(),
CSDefault)
nodePath.setHpr(hpr)
# Quaternion interpolation
def qSlerp(startQuat, endQuat, t):
startQ = Quat(startQuat)
destQuat = Quat.identQuat()
# Calc dot product
cosOmega = (startQ.getI() * endQuat.getI() +
startQ.getJ() * endQuat.getJ() +
startQ.getK() * endQuat.getK() +
startQ.getR() * endQuat.getR())
# If the above dot product is negative, it would be better to
# go between the negative of the initial and the final, so that
# we take the shorter path.
if ( cosOmega < 0.0 ):
cosOmega *= -1
startQ.setI(-1 * startQ.getI())
startQ.setJ(-1 * startQ.getJ())
startQ.setK(-1 * startQ.getK())
startQ.setR(-1 * startQ.getR())
if ((1.0 + cosOmega) > Q_EPSILON):
# usual case
if ((1.0 - cosOmega) > Q_EPSILON):
# usual case
omega = math.acos(cosOmega)
sinOmega = math.sin(omega)
startScale = math.sin((1.0 - t) * omega)/sinOmega
endScale = math.sin(t * omega)/sinOmega
else:
# ends very close
startScale = 1.0 - t
endScale = t
destQuat.setI(startScale * startQ.getI() +
endScale * endQuat.getI())
destQuat.setJ(startScale * startQ.getJ() +
endScale * endQuat.getJ())
destQuat.setK(startScale * startQ.getK() +
endScale * endQuat.getK())
destQuat.setR(startScale * startQ.getR() +
endScale * endQuat.getR())
else:
# ends nearly opposite
destQuat.setI(-startQ.getJ())
destQuat.setJ(startQ.getI())
destQuat.setK(-startQ.getR())
destQuat.setR(startQ.getK())
startScale = math.sin((0.5 - t) * math.pi)
endScale = math.sin(t * math.pi)
destQuat.setI(startScale * startQ.getI() +
endScale * endQuat.getI())
destQuat.setJ(startScale * startQ.getJ() +
endScale * endQuat.getJ())
destQuat.setK(startScale * startQ.getK() +
endScale * endQuat.getK())
return destQuat
|
|
"""
grid search codes for machine learning
"""
from sklearn import cross_validation, cross_validation, grid_search, linear_model, svm, metrics
import numpy as np
import pandas as pd
from operator import itemgetter
import jutil
import j3x.jpyx
from jsklearn import binary_model
def gs_Lasso( xM, yV, alphas_log = (-1, 1, 9), n_folds=5, n_jobs = -1):
print(xM.shape, yV.shape)
clf = linear_model.Lasso()
#parmas = {'alpha': np.logspace(1, -1, 9)}
parmas = {'alpha': np.logspace( *alphas_log)}
kf5 = cross_validation.KFold( xM.shape[0], n_folds = n_folds, shuffle=True)
gs = grid_search.GridSearchCV( clf, parmas, scoring = 'r2', cv = kf5, n_jobs = n_jobs)
gs.fit( xM, yV)
return gs
def gs_Lasso_norm( xM, yV, alphas_log = (-1, 1, 9)):
print(xM.shape, yV.shape)
clf = linear_model.Lasso( normalize = True)
#parmas = {'alpha': np.logspace(1, -1, 9)}
parmas = {'alpha': np.logspace( *alphas_log)}
kf5 = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
gs = grid_search.GridSearchCV( clf, parmas, scoring = 'r2', cv = kf5, n_jobs = -1)
gs.fit( xM, yV)
return gs
def gs_Lasso_kf( xM, yV, alphas_log_l):
kf5_ext = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
score_l = []
for ix, (tr, te) in enumerate( kf5_ext):
print('{}th fold external validation stage ============================'.format( ix + 1))
xM_in = xM[ tr, :]
yV_in = yV[ tr, 0]
print('First Lasso Stage')
gs1 = gs_Lasso( xM_in, yV_in, alphas_log_l[0])
print('Best score:', gs1.best_score_)
print('Best param:', gs1.best_params_)
print(gs1.grid_scores_)
nz_idx = gs1.best_estimator_.sparse_coef_.indices
xM_in_nz = xM_in[ :, nz_idx]
print('Second Lasso Stage')
gs2 = gs_Lasso( xM_in_nz, yV_in, alphas_log_l[1])
print('Best score:', gs2.best_score_)
print('Best param:', gs2.best_params_)
print(gs2.grid_scores_)
print('External Validation Stage')
xM_out = xM[ te, :]
yV_out = yV[ te, 0]
xM_out_nz = xM_out[:, nz_idx]
score = gs2.score( xM_out_nz, yV_out)
print(score)
score_l.append( score)
print('')
print('all scores:', score_l)
print('average scores:', np.mean( score_l))
return score_l
def gs_Lasso_kf_ext( xM, yV, alphas_log_l):
kf5_ext = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
score_l = []
for ix, (tr, te) in enumerate( kf5_ext):
print('{}th fold external validation stage ============================'.format( ix + 1))
xM_in = xM[ tr, :]
yV_in = yV[ tr, 0]
print('First Lasso Stage')
gs1 = gs_Lasso( xM_in, yV_in, alphas_log_l[0])
print('Best score:', gs1.best_score_)
print('Best param:', gs1.best_params_)
print(gs1.grid_scores_)
nz_idx = gs1.best_estimator_.sparse_coef_.indices
xM_in_nz = xM_in[ :, nz_idx]
print('Second Lasso Stage')
gs2 = gs_Lasso( xM_in_nz, yV_in, alphas_log_l[1])
print('Best score:', gs2.best_score_)
print('Best param:', gs2.best_params_)
print(gs2.grid_scores_)
print('External Validation Stage')
# Obtain prediction model by whole data including internal validation data
alpha = gs2.best_params_['alpha']
clf = linear_model.Lasso( alpha = alpha)
clf.fit( xM_in_nz, yV_in)
xM_out = xM[ te, :]
yV_out = yV[ te, 0]
xM_out_nz = xM_out[:, nz_idx]
score = clf.score( xM_out_nz, yV_out)
print(score)
score_l.append( score)
print('')
print('all scores:', score_l)
print('average scores:', np.mean( score_l))
return score_l
def _gs_Ridge_r0( xM, yV, alphas_log = (1, -1, 9)):
print(xM.shape, yV.shape)
clf = linear_model.Ridge()
#parmas = {'alpha': np.logspace(1, -1, 9)}
parmas = {'alpha': np.logspace( *alphas_log)}
kf5 = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
gs = grid_search.GridSearchCV( clf, parmas, scoring = 'r2', cv = kf5, n_jobs = 1)
gs.fit( xM, yV)
return gs
def gs_Ridge_Asupervising_2fp( xM1, xM2, yV, s_l, alpha_l):
"""
This 2fp case uses two fingerprints at the same in order to
combines their preprocessing versions separately.
"""
r2_l2 = list()
for alpha in alpha_l:
print(alpha)
r2_l = cv_Ridge_Asupervising_2fp( xM1, xM2, yV, s_l, alpha)
r2_l2.append( r2_l)
return r2_l2
def _cv_LinearRegression_r0( xM, yV):
print(xM.shape, yV.shape)
clf = linear_model.Ridge()
kf5 = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
cv_scores = cross_validation.cross_val_score( clf, xM, yV, scoring = 'r2', cv = kf5, n_jobs = -1)
return cv_scores
def _cv_LinearRegression_r1( xM, yV):
print(xM.shape, yV.shape)
clf = linear_model.LinearRegression()
kf5 = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
cv_scores = cross_validation.cross_val_score( clf, xM, yV, scoring = 'r2', cv = kf5, n_jobs = -1)
print('R^2 mean, std -->', np.mean( cv_scores), np.std( cv_scores))
return cv_scores
def _cv_LinearRegression_r2( xM, yV, scoring = 'r2'):
print(xM.shape, yV.shape)
clf = linear_model.LinearRegression()
kf5 = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
cv_scores = cross_validation.cross_val_score( clf, xM, yV, scoring = scoring, cv = kf5, n_jobs = -1)
print('{}: mean, std -->'.format( scoring), np.mean( cv_scores), np.std( cv_scores))
return cv_scores
def cv_LinearRegression( xM, yV, n_folds = 5, scoring = 'median_absolute_error', disp = False):
"""
metrics.explained_variance_score(y_true, y_pred) Explained variance regression score function
metrics.mean_absolute_error(y_true, y_pred) Mean absolute error regression loss
metrics.mean_squared_error(y_true, y_pred[, ...]) Mean squared error regression loss
metrics.median_absolute_error(y_true, y_pred) Median absolute error regression loss
metrics.r2_score(y_true, y_pred[, ...]) R^2 (coefficient of determination) regression score function.
"""
if disp:
print(xM.shape, yV.shape)
clf = linear_model.LinearRegression()
kf5 = cross_validation.KFold( xM.shape[0], n_folds=n_folds, shuffle=True)
cv_score_l = list()
for train, test in kf5:
# clf.fit( xM[train,:], yV[train,:])
# yV is vector but not a metrix here. Hence, it should be treated as a vector
clf.fit( xM[train,:], yV[train])
yVp_test = clf.predict( xM[test,:])
if scoring == 'median_absolute_error':
cv_score_l.append( metrics.median_absolute_error(yV[test], yVp_test))
else:
raise ValueError( "{} scoring is not supported.".format( scoring))
if disp: # Now only this flag is on, the output will be displayed.
print('{}: mean, std -->'.format( scoring), np.mean( cv_score_l), np.std( cv_score_l))
return cv_score_l
def cv_LinearRegression_ci( xM, yV, n_folds = 5, scoring = 'median_absolute_error', disp = False):
"""
metrics.explained_variance_score(y_true, y_pred) Explained variance regression score function
metrics.mean_absolute_error(y_true, y_pred) Mean absolute error regression loss
metrics.mean_squared_error(y_true, y_pred[, ...]) Mean squared error regression loss
metrics.median_absolute_error(y_true, y_pred) Median absolute error regression loss
metrics.r2_score(y_true, y_pred[, ...]) R^2 (coefficient of determination) regression score function.
"""
if disp:
print(xM.shape, yV.shape)
clf = linear_model.LinearRegression()
kf5 = cross_validation.KFold( xM.shape[0], n_folds=n_folds, shuffle=True)
cv_score_l = list()
ci_l = list()
for train, test in kf5:
# clf.fit( xM[train,:], yV[train,:])
# yV is vector but not a metrix here. Hence, it should be treated as a vector
clf.fit( xM[train,:], yV[train])
yVp_test = clf.predict( xM[test,:])
# Additionally, coef_ and intercept_ are stored.
ci_l.append( (clf.coef_, clf.intercept_))
if scoring == 'median_absolute_error':
cv_score_l.append( metrics.median_absolute_error(yV[test], yVp_test))
else:
raise ValueError( "{} scoring is not supported.".format( scoring))
if disp: # Now only this flag is on, the output will be displayed.
print('{}: mean, std -->'.format( scoring), np.mean( cv_score_l), np.std( cv_score_l))
return cv_score_l, ci_l
def cv_LinearRegression_ci_pred( xM, yV, n_folds = 5, scoring = 'median_absolute_error', disp = False):
"""
metrics.explained_variance_score(y_true, y_pred) Explained variance regression score function
metrics.mean_absolute_error(y_true, y_pred) Mean absolute error regression loss
metrics.mean_squared_error(y_true, y_pred[, ...]) Mean squared error regression loss
metrics.median_absolute_error(y_true, y_pred) Median absolute error regression loss
metrics.r2_score(y_true, y_pred[, ...]) R^2 (coefficient of determination) regression score function.
"""
if disp:
print(xM.shape, yV.shape)
clf = linear_model.LinearRegression()
kf5 = cross_validation.KFold( xM.shape[0], n_folds=n_folds, shuffle=True)
cv_score_l = list()
ci_l = list()
yVp = yV.copy()
for train, test in kf5:
# clf.fit( xM[train,:], yV[train,:])
# yV is vector but not a metrix here. Hence, it should be treated as a vector
clf.fit( xM[train,:], yV[train])
yVp_test = clf.predict( xM[test,:])
yVp[test] = yVp_test
# Additionally, coef_ and intercept_ are stored.
coef = np.array(clf.coef_).tolist()
intercept = np.array(clf.intercept_).tolist()
ci_l.append( (clf.coef_, clf.intercept_))
if scoring == 'median_absolute_error':
cv_score_l.append( metrics.median_absolute_error(yV[test], yVp_test))
else:
raise ValueError( "{} scoring is not supported.".format( scoring))
if disp: # Now only this flag is on, the output will be displayed.
print('{}: mean, std -->'.format( scoring), np.mean( cv_score_l), np.std( cv_score_l))
return cv_score_l, ci_l, yVp.A1.tolist()
def cv_LinearRegression_ci_pred_full_Ridge( xM, yV, alpha, n_folds = 5, shuffle=True, disp = False):
"""
Note - scoring is not used. I may used later. Not it is remained for compatibility purpose.
metrics.explained_variance_score(y_true, y_pred) Explained variance regression score function
metrics.mean_absolute_error(y_true, y_pred) Mean absolute error regression loss
metrics.mean_squared_error(y_true, y_pred[, ...]) Mean squared error regression loss
metrics.median_absolute_error(y_true, y_pred) Median absolute error regression loss
metrics.r2_score(y_true, y_pred[, ...]) R^2 (coefficient of determination) regression score function.
"""
if disp:
print(xM.shape, yV.shape)
# print( 'alpha of Ridge is', alpha)
clf = linear_model.Ridge( alpha)
kf5 = cross_validation.KFold( xM.shape[0], n_folds=n_folds, shuffle=shuffle)
cv_score_l = list()
ci_l = list()
yVp = yV.copy()
for train, test in kf5:
# clf.fit( xM[train,:], yV[train,:])
# yV is vector but not a metrix here. Hence, it should be treated as a vector
clf.fit( xM[train,:], yV[train])
yVp_test = clf.predict( xM[test,:])
yVp[test] = yVp_test
# Additionally, coef_ and intercept_ are stored.
ci_l.append( (clf.coef_, clf.intercept_))
y_a = np.array( yV[test])[:,0]
yp_a = np.array( yVp_test)[:,0]
cv_score_l.extend( np.abs(y_a - yp_a).tolist())
return cv_score_l, ci_l, yVp.A1.tolist()
def cv_LinearRegression_ci_pred_full( xM, yV, n_folds = 5, shuffle=True, disp = False):
"""
Note - scoring is not used. I may used later. Not it is remained for compatibility purpose.
metrics.explained_variance_score(y_true, y_pred) Explained variance regression score function
metrics.mean_absolute_error(y_true, y_pred) Mean absolute error regression loss
metrics.mean_squared_error(y_true, y_pred[, ...]) Mean squared error regression loss
metrics.median_absolute_error(y_true, y_pred) Median absolute error regression loss
metrics.r2_score(y_true, y_pred[, ...]) R^2 (coefficient of determination) regression score function.
"""
if disp:
print(xM.shape, yV.shape)
clf = linear_model.LinearRegression()
kf5 = cross_validation.KFold( xM.shape[0], n_folds=n_folds, shuffle=shuffle)
cv_score_l = list()
ci_l = list()
yVp = yV.copy()
for train, test in kf5:
# clf.fit( xM[train,:], yV[train,:])
# yV is vector but not a metrix here. Hence, it should be treated as a vector
clf.fit( xM[train,:], yV[train])
yVp_test = clf.predict( xM[test,:])
yVp[test] = yVp_test
# Additionally, coef_ and intercept_ are stored.
ci_l.append( (clf.coef_, clf.intercept_))
y_a = np.array( yV[test])[:,0]
yp_a = np.array( yVp_test)[:,0]
cv_score_l.extend( np.abs(y_a - yp_a).tolist())
return cv_score_l, ci_l, yVp.A1.tolist()
def cv_LinearRegression_It( xM, yV, n_folds = 5, scoring = 'median_absolute_error', N_it = 10, disp = False, ldisp = False):
"""
N_it times iteration is performed for cross_validation in order to make further average effect.
The flag of 'disp' is truned off so each iteration will not shown.
"""
cv_score_le = list()
for ni in range( N_it):
cv_score_l = cv_LinearRegression( xM, yV, n_folds = n_folds, scoring = scoring, disp = disp)
cv_score_le.extend( cv_score_l)
o_d = {'mean': np.mean( cv_score_le),
'std': np.std( cv_score_le),
'list': cv_score_le}
if disp or ldisp:
print('{0}: mean(+/-std) --> {1}(+/-{2})'.format( scoring, o_d['mean'], o_d['std']))
return o_d
def cv_LinearRegression_ci_It( xM, yV, n_folds = 5, scoring = 'median_absolute_error', N_it = 10, disp = False, ldisp = False):
"""
N_it times iteration is performed for cross_validation in order to make further average effect.
The flag of 'disp' is truned off so each iteration will not shown.
"""
cv_score_le = list()
ci_le = list()
for ni in range( N_it):
cv_score_l, ci_l = cv_LinearRegression_ci( xM, yV, n_folds = n_folds, scoring = scoring, disp = disp)
cv_score_le.extend( cv_score_l)
ci_le.extend( ci_l)
o_d = {'mean': np.mean( cv_score_le),
'std': np.std( cv_score_le),
'list': cv_score_le,
'ci': ci_le}
if disp or ldisp:
print('{0}: mean(+/-std) --> {1}(+/-{2})'.format( scoring, o_d['mean'], o_d['std']))
return o_d
def cv_LinearRegression_ci_pred_It( xM, yV, n_folds = 5, scoring = 'median_absolute_error', N_it = 10, disp = False, ldisp = False):
"""
N_it times iteration is performed for cross_validation in order to make further average effect.
The flag of 'disp' is truned off so each iteration will not shown.
"""
cv_score_le = list()
ci_le = list()
yVp_ltype_l = list() # yVp_ltype is list type of yVp not matrix type
for ni in range( N_it):
cv_score_l, ci_l, yVp_ltype = cv_LinearRegression_ci_pred( xM, yV, n_folds = n_folds, scoring = scoring, disp = disp)
cv_score_le.extend( cv_score_l)
ci_le.extend( ci_l)
yVp_ltype_l.append( yVp_ltype)
o_d = {'mean': np.mean( cv_score_le),
'std': np.std( cv_score_le),
'list': cv_score_le,
'ci': ci_le,
'yVp': yVp_ltype_l}
if disp or ldisp:
print('{0}: mean(+/-std) --> {1}(+/-{2})'.format( scoring, o_d['mean'], o_d['std']))
return o_d
def cv_LOO( xM, yV, disp = False, ldisp = False):
"""
This is a specialized function for LOO crossvadidation.
"""
n_folds = xM.shape[0] # for LOO CV
return cv_LinearRegression_ci_pred_full_It( xM, yV, n_folds = n_folds, N_it = 1,
shuffle = False, disp = disp, ldisp = ldisp)
def cv_LOO_Ridge( xM, yV, alpha, disp = False, ldisp = False):
"""
This is a specialized function for LOO crossvadidation.
"""
n_folds = xM.shape[0] # for LOO CV
return cv_LinearRegression_ci_pred_full_It_Ridge( xM, yV, alpha, n_folds = n_folds, N_it = 1,
shuffle = False, disp = disp, ldisp = ldisp)
def cv_LinearRegression_ci_pred_full_It_Ridge( xM, yV, alpha, n_folds = 5, N_it = 10,
shuffle = True, disp = False, ldisp = False):
"""
N_it times iteration is performed for cross_validation in order to make further average effect.
The flag of 'disp' is truned off so each iteration will not shown.
"""
cv_score_le = list()
ci_le = list()
yVp_ltype_l = list() # yVp_ltype is list type of yVp not matrix type
for ni in range( N_it):
cv_score_l, ci_l, yVp_ltype = cv_LinearRegression_ci_pred_full_Ridge( xM, yV, alpha,
n_folds = n_folds, shuffle = shuffle, disp = disp)
cv_score_le.extend( cv_score_l)
ci_le.extend( ci_l)
yVp_ltype_l.append( yVp_ltype)
# List is not used if N_it is one
if N_it == 1:
yVp_ltype_l = yVp_ltype_l[0]
o_d = {'median_abs_err': np.median( cv_score_le),
'mean_abs_err': np.mean( cv_score_le),
'std_abs_err': np.std( cv_score_le),
'list': cv_score_le,
'ci': ci_le,
'yVp': yVp_ltype_l}
return o_d
def cv_LinearRegression_ci_pred_full_It( xM, yV, n_folds = 5, N_it = 10,
shuffle = True, disp = False, ldisp = False):
"""
N_it times iteration is performed for cross_validation in order to make further average effect.
The flag of 'disp' is truned off so each iteration will not shown.
"""
cv_score_le = list()
ci_le = list()
yVp_ltype_l = list() # yVp_ltype is list type of yVp not matrix type
for ni in range( N_it):
cv_score_l, ci_l, yVp_ltype = cv_LinearRegression_ci_pred_full( xM, yV,
n_folds = n_folds, shuffle = shuffle, disp = disp)
cv_score_le.extend( cv_score_l)
ci_le.extend( ci_l)
yVp_ltype_l.append( yVp_ltype)
# List is not used if N_it is one
if N_it == 1:
yVp_ltype_l = yVp_ltype_l[0]
o_d = {'median_abs_err': np.median( cv_score_le),
'mean_abs_err': np.mean( cv_score_le),
'std_abs_err': np.std( cv_score_le),
'list': cv_score_le,
'ci': ci_le,
'yVp': yVp_ltype_l}
return o_d
def mdae_no_regression( xM, yV, disp = False, ldisp = False):
"""
Median absloute error (Mdae) is calculated without any (linear) regression.
"""
xM_a = np.array( xM)
yV_a = np.array( yV)
ae_l = [ np.abs(x - y) for x, y in zip(xM_a[:,0], yV_a[:, 0])]
return np.median( ae_l)
def cv_LinearRegression_A( xM, yV, s_l):
lr = linear_model.LinearRegression()
kf5 = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
r2_l = list()
for train, test in kf5:
xM_shuffle = np.concatenate( (xM[ train, :], xM[ test, :]), axis = 0)
# print xM_shuffle.shape
A_all = j3x.jpyx.calc_tm_sim_M( xM_shuffle)
A = A_all
s_l_shuffle = [s_l[x] for x in train] #train
s_l_shuffle.extend( [s_l[x] for x in test] ) #test
molw_l = jchem.rdkit_molwt( s_l_shuffle)
A_molw = A
A_molw_train = A_molw[:len(train), :]
A_molw_test = A_molw[len(train):, :]
print(A_molw_train.shape, yV[ train, 0].shape)
lr.fit( A_molw_train, yV[ train, 0])
#print A_molw_test.shape, yV[ test, 0].shape
r2_l.append( lr.score( A_molw_test, yV[ test, 0]))
print('R^2 mean, std -->', np.mean( r2_l), np.std( r2_l))
return r2_l
def cv_LinearRegression_Asupervising( xM, yV, s_l):
lr = linear_model.LinearRegression()
kf5 = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
r2_l = list()
for train, test in kf5:
xM_shuffle = np.concatenate( (xM[ train, :], xM[ test, :]), axis = 0)
#print xM_shuffle.shape
A_all = j3x.jpyx.calc_tm_sim_M( xM_shuffle)
A = A_all[ :, :len(train)]
s_l_shuffle = [s_l[x] for x in train] #train
s_l_shuffle.extend( [s_l[x] for x in test] ) #test
molw_l = jchem.rdkit_molwt( s_l_shuffle)
A_molw = A
A_molw_train = A_molw[:len(train), :]
A_molw_test = A_molw[len(train):, :]
print(A_molw_train.shape, yV[ train, 0].shape)
lr.fit( A_molw_train, yV[ train, 0])
#print A_molw_test.shape, yV[ test, 0].shape
r2_l.append( lr.score( A_molw_test, yV[ test, 0]))
print('R^2 mean, std -->', np.mean( r2_l), np.std( r2_l))
return r2_l
def cv_LinearRegression_Asupervising_molw( xM, yV, s_l):
lr = linear_model.LinearRegression()
kf5 = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
r2_l = list()
for train, test in kf5:
xM_shuffle = np.concatenate( (xM[ train, :], xM[ test, :]), axis = 0)
# print xM_shuffle.shape
A_all = j3x.jpyx.calc_tm_sim_M( xM_shuffle)
A = A_all[ :, :len(train)]
#print 'A.shape', A.shape
s_l_shuffle = [s_l[x] for x in train] #train
s_l_shuffle.extend( [s_l[x] for x in test] ) #test
molw_l = jchem.rdkit_molwt( s_l_shuffle)
A_molw = jchem.add_new_descriptor( A, molw_l)
A_molw_train = A_molw[:len(train), :]
A_molw_test = A_molw[len(train):, :]
#print A_molw_train.shape, yV[ train, 0].shape
lr.fit( A_molw_train, yV[ train, 0])
#print A_molw_test.shape, yV[ test, 0].shape
r2_l.append( lr.score( A_molw_test, yV[ test, 0]))
print('R^2 mean, std -->', np.mean( r2_l), np.std( r2_l))
return r2_l
def cv_Ridge_Asupervising_molw( xM, yV, s_l, alpha):
lr = linear_model.Ridge( alpha = alpha)
kf5 = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
r2_l = list()
for train, test in kf5:
xM_shuffle = np.concatenate( (xM[ train, :], xM[ test, :]), axis = 0)
# print xM_shuffle.shape
A_all = j3x.jpyx.calc_tm_sim_M( xM_shuffle)
A = A_all[ :, :len(train)]
#print 'A.shape', A.shape
s_l_shuffle = [s_l[x] for x in train] #train
s_l_shuffle.extend( [s_l[x] for x in test] ) #test
molw_l = jchem.rdkit_molwt( s_l_shuffle)
A_molw = jchem.add_new_descriptor( A, molw_l)
A_molw_train = A_molw[:len(train), :]
A_molw_test = A_molw[len(train):, :]
#print A_molw_train.shape, yV[ train, 0].shape
lr.fit( A_molw_train, yV[ train, 0])
#print A_molw_test.shape, yV[ test, 0].shape
r2_l.append( lr.score( A_molw_test, yV[ test, 0]))
print('R^2 mean, std -->', np.mean( r2_l), np.std( r2_l))
return r2_l
def cv_Ridge_Asupervising_2fp( xM1, xM2, yV, s_l, alpha):
lr = linear_model.Ridge( alpha = alpha)
kf5 = cross_validation.KFold( len(s_l), n_folds=5, shuffle=True)
r2_l = list()
for train, test in kf5:
xM1_shuffle = np.concatenate( (xM1[ train, :], xM1[ test, :]), axis = 0)
xM2_shuffle = np.concatenate( (xM2[ train, :], xM2[ test, :]), axis = 0)
# print xM_shuffle.shape
A1_redundant = j3x.jpyx.calc_tm_sim_M( xM1_shuffle)
A1 = A1_redundant[ :, :len(train)]
A2_redundant = j3x.jpyx.calc_tm_sim_M( xM2_shuffle)
A2 = A2_redundant[ :, :len(train)]
#print 'A.shape', A.shape
s_l_shuffle = [s_l[x] for x in train] #train
s_l_shuffle.extend( [s_l[x] for x in test] ) #test
molw_l = jchem.rdkit_molwt( s_l_shuffle)
molwV = np.mat( molw_l).T
#A_molw = jchem.add_new_descriptor( A, molw_l)
print(A1.shape, A2.shape, molwV.shape)
# A_molw = np.concatenate( (A1, A2, molwV), axis = 1)
A_molw = np.concatenate( (A1, A2), axis = 1)
print(A_molw.shape)
A_molw_train = A_molw[:len(train), :]
A_molw_test = A_molw[len(train):, :]
#print A_molw_train.shape, yV[ train, 0].shape
lr.fit( A_molw_train, yV[ train, 0])
#print A_molw_test.shape, yV[ test, 0].shape
r2_l.append( lr.score( A_molw_test, yV[ test, 0]))
print('R^2 mean, std -->', np.mean( r2_l), np.std( r2_l))
return r2_l
def cv_Ridge_Asupervising_2fp_molw( xM1, xM2, yV, s_l, alpha):
lr = linear_model.Ridge( alpha = alpha)
kf5 = cross_validation.KFold( len(s_l), n_folds=5, shuffle=True)
r2_l = list()
for train, test in kf5:
xM1_shuffle = np.concatenate( (xM1[ train, :], xM1[ test, :]), axis = 0)
xM2_shuffle = np.concatenate( (xM2[ train, :], xM2[ test, :]), axis = 0)
# print xM_shuffle.shape
A1_redundant = j3x.jpyx.calc_tm_sim_M( xM1_shuffle)
A1 = A1_redundant[ :, :len(train)]
A2_redundant = j3x.jpyx.calc_tm_sim_M( xM2_shuffle)
A2 = A2_redundant[ :, :len(train)]
#print 'A.shape', A.shape
s_l_shuffle = [s_l[x] for x in train] #train
s_l_shuffle.extend( [s_l[x] for x in test] ) #test
molw_l = jchem.rdkit_molwt( s_l_shuffle)
molwV = np.mat( molw_l).T
#A_molw = jchem.add_new_descriptor( A, molw_l)
print(A1.shape, A2.shape, molwV.shape)
A_molw = np.concatenate( (A1, A2, molwV), axis = 1)
print(A_molw.shape)
A_molw_train = A_molw[:len(train), :]
A_molw_test = A_molw[len(train):, :]
#print A_molw_train.shape, yV[ train, 0].shape
lr.fit( A_molw_train, yV[ train, 0])
#print A_molw_test.shape, yV[ test, 0].shape
r2_l.append( lr.score( A_molw_test, yV[ test, 0]))
print('R^2 mean, std -->', np.mean( r2_l), np.std( r2_l))
return r2_l
def gs_Ridge_Asupervising_2fp_molw( xM1, xM2, yV, s_l, alpha_l):
"""
This 2fp case uses two fingerprints at the same in order to
combines their preprocessing versions separately.
"""
r2_l2 = list()
for alpha in alpha_l:
print(alpha)
r2_l = cv_Ridge_Asupervising_2fp_molw( xM1, xM2, yV, s_l, alpha)
r2_l2.append( r2_l)
return r2_l2
def gs_Ridge_Asupervising_molw( xM, yV, s_l, alpha_l):
r2_l2 = list()
for alpha in alpha_l:
print(alpha)
r2_l = cv_Ridge_Asupervising_molw( xM, yV, s_l, alpha)
r2_l2.append( r2_l)
return r2_l2
def gs_Ridge_Asupervising( xM, yV, s_l, alpha_l):
r2_l2 = list()
for alpha in alpha_l:
print(alpha)
r2_l = cv_Ridge_Asupervising( xM, yV, s_l, alpha)
r2_l2.append( r2_l)
return r2_l2
def cv_Ridge_Asupervising( xM, yV, s_l, alpha):
lr = linear_model.Ridge( alpha = alpha)
kf5 = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
r2_l = list()
for train, test in kf5:
xM_shuffle = np.concatenate( (xM[ train, :], xM[ test, :]), axis = 0)
# print xM_shuffle.shape
A_all = j3x.jpyx.calc_tm_sim_M( xM_shuffle)
A = A_all[ :, :len(train)]
#print 'A.shape', A.shape
s_l_shuffle = [s_l[x] for x in train] #train
s_l_shuffle.extend( [s_l[x] for x in test] ) #test
molw_l = jchem.rdkit_molwt( s_l_shuffle)
A_molw = A
A_molw_train = A_molw[:len(train), :]
A_molw_test = A_molw[len(train):, :]
#print A_molw_train.shape, yV[ train, 0].shape
lr.fit( A_molw_train, yV[ train, 0])
#print A_molw_test.shape, yV[ test, 0].shape
r2_l.append( lr.score( A_molw_test, yV[ test, 0]))
print('R^2 mean, std -->', np.mean( r2_l), np.std( r2_l))
return r2_l
def gs_RidgeByLasso_kf_ext( xM, yV, alphas_log_l):
kf5_ext = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
score_l = []
for ix, (tr, te) in enumerate( kf5_ext):
print('{}th fold external validation stage ============================'.format( ix + 1))
xM_in = xM[ tr, :]
yV_in = yV[ tr, 0]
print('First Ridge Stage')
gs1 = gs_Lasso( xM_in, yV_in, alphas_log_l[0])
print('Best score:', gs1.best_score_)
print('Best param:', gs1.best_params_)
print(gs1.grid_scores_)
nz_idx = gs1.best_estimator_.sparse_coef_.indices
xM_in_nz = xM_in[ :, nz_idx]
print('Second Lasso Stage')
gs2 = gs_Ridge( xM_in_nz, yV_in, alphas_log_l[1])
print('Best score:', gs2.best_score_)
print('Best param:', gs2.best_params_)
print(gs2.grid_scores_)
print('External Validation Stage')
# Obtain prediction model by whole data including internal validation data
alpha = gs2.best_params_['alpha']
clf = linear_model.Ridge( alpha = alpha)
clf.fit( xM_in_nz, yV_in)
xM_out = xM[ te, :]
yV_out = yV[ te, 0]
xM_out_nz = xM_out[:, nz_idx]
score = clf.score( xM_out_nz, yV_out)
print(score)
score_l.append( score)
print('')
print('all scores:', score_l)
print('average scores:', np.mean( score_l))
return score_l
def _gs_SVR_r0( xM, yV, svr_params):
print(xM.shape, yV.shape)
clf = svm.SVR()
#parmas = {'alpha': np.logspace(1, -1, 9)}
kf5 = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
gs = grid_search.GridSearchCV( clf, svr_params, scoring = 'r2', cv = kf5, n_jobs = -1)
gs.fit( xM, yV.A1)
return gs
def _gs_SVR_r1( xM, yV, svr_params, n_folds = 5):
print(xM.shape, yV.shape)
clf = svm.SVR()
#parmas = {'alpha': np.logspace(1, -1, 9)}
kf5 = cross_validation.KFold( xM.shape[0], n_folds=n_folds, shuffle=True)
gs = grid_search.GridSearchCV( clf, svr_params, scoring = 'r2', cv = kf5, n_jobs = -1)
gs.fit( xM, yV.A1)
return gs
def gs_SVR( xM, yV, svr_params, n_folds = 5, n_jobs = -1):
print(xM.shape, yV.shape)
clf = svm.SVR()
#parmas = {'alpha': np.logspace(1, -1, 9)}
kf5 = cross_validation.KFold( xM.shape[0], n_folds=n_folds, shuffle=True)
gs = grid_search.GridSearchCV( clf, svr_params, scoring = 'r2', cv = kf5, n_jobs = n_jobs)
gs.fit( xM, yV.A1)
return gs
def _gs_SVC_r0( xM, yVc, params):
"""
Since classification is considered, we use yVc which includes digital values
whereas yV can include float point values.
"""
print(xM.shape, yVc.shape)
clf = svm.SVC()
#parmas = {'alpha': np.logspace(1, -1, 9)}
kf5 = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
gs = grid_search.GridSearchCV( clf, params, cv = kf5, n_jobs = -1)
gs.fit( xM, yVc)
return gs
def gs_SVC( xM, yVc, params, n_folds = 5):
"""
Since classification is considered, we use yVc which includes digital values
whereas yV can include float point values.
"""
print(xM.shape, yVc.shape)
clf = svm.SVC()
#parmas = {'alpha': np.logspace(1, -1, 9)}
kf5 = cross_validation.KFold( xM.shape[0], n_folds=n_folds, shuffle=True)
gs = grid_search.GridSearchCV( clf, params, cv = kf5, n_jobs = -1)
gs.fit( xM, yVc)
return gs
def gs_SVRByLasso_kf_ext( xM, yV, alphas_log, svr_params):
kf5_ext = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
score_l = []
for ix, (tr, te) in enumerate( kf5_ext):
print('{}th fold external validation stage ============================'.format( ix + 1))
xM_in = xM[ tr, :]
yV_in = yV[ tr, 0]
print('First Ridge Stage')
gs1 = gs_Lasso( xM_in, yV_in, alphas_log)
print('Best score:', gs1.best_score_)
print('Best param:', gs1.best_params_)
print(gs1.grid_scores_)
nz_idx = gs1.best_estimator_.sparse_coef_.indices
xM_in_nz = xM_in[ :, nz_idx]
print('Second Lasso Stage')
gs2 = gs_SVR( xM_in_nz, yV_in, svr_params)
print('Best score:', gs2.best_score_)
print('Best param:', gs2.best_params_)
print(gs2.grid_scores_)
print('External Validation Stage')
# Obtain prediction model by whole data including internal validation data
C = gs2.best_params_['C']
gamma = gs2.best_params_['gamma']
epsilon = gs2.best_params_['epsilon']
clf = svm.SVR( C = C, gamma = gamma, epsilon = epsilon)
clf.fit( xM_in_nz, yV_in.A1)
xM_out = xM[ te, :]
yV_out = yV[ te, 0]
xM_out_nz = xM_out[:, nz_idx]
score = clf.score( xM_out_nz, yV_out.A1)
print(score)
score_l.append( score)
print('')
print('all scores:', score_l)
print('average scores:', np.mean( score_l))
return score_l
def gs_SVRByLasso( xM, yV, alphas_log, svr_params):
kf5_ext = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
score1_l = []
score_l = []
for ix, (tr, te) in enumerate( kf5_ext):
print('{}th fold external validation stage ============================'.format( ix + 1))
xM_in = xM[ tr, :]
yV_in = yV[ tr, 0]
print('First Ridge Stage')
gs1 = gs_Lasso( xM_in, yV_in, alphas_log)
print('Best score:', gs1.best_score_)
print('Best param:', gs1.best_params_)
print(gs1.grid_scores_)
score1_l.append( gs1.best_score_)
nz_idx = gs1.best_estimator_.sparse_coef_.indices
xM_in_nz = xM_in[ :, nz_idx]
print('Second Lasso Stage')
gs2 = gs_SVR( xM_in_nz, yV_in, svr_params)
print('Best score:', gs2.best_score_)
print('Best param:', gs2.best_params_)
print(gs2.grid_scores_)
print('External Validation Stage')
# Obtain prediction model by whole data including internal validation data
C = gs2.best_params_['C']
gamma = gs2.best_params_['gamma']
epsilon = gs2.best_params_['epsilon']
clf = svm.SVR( C = C, gamma = gamma, epsilon = epsilon)
clf.fit( xM_in_nz, yV_in.A1)
xM_out = xM[ te, :]
yV_out = yV[ te, 0]
xM_out_nz = xM_out[:, nz_idx]
score = clf.score( xM_out_nz, yV_out.A1)
print(score)
score_l.append( score)
print('')
print('all scores:', score_l)
print('average scores:', np.mean( score_l))
print('First stage scores', score1_l)
print('Average first stage scores', np.mean( score1_l))
return score_l, score1_l
def gs_ElasticNet( xM, yV, en_params):
print(xM.shape, yV.shape)
clf = linear_model.ElasticNet()
kf5 = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
gs = grid_search.GridSearchCV( clf, en_params, scoring = 'r2', cv = kf5, n_jobs = -1)
gs.fit( xM, yV)
return gs
def gs_SVRByElasticNet( xM, yV, en_params, svr_params):
kf5_ext = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
score1_l = []
score_l = []
for ix, (tr, te) in enumerate( kf5_ext):
print('{}th fold external validation stage ============================'.format( ix + 1))
xM_in = xM[ tr, :]
yV_in = yV[ tr, 0]
print('First Ridge Stage')
gs1 = gs_ElasticNet( xM_in, yV_in, en_params)
print('Best score:', gs1.best_score_)
print('Best param:', gs1.best_params_)
print(gs1.grid_scores_)
score1_l.append( gs1.best_score_)
nz_idx = gs1.best_estimator_.sparse_coef_.indices
xM_in_nz = xM_in[ :, nz_idx]
print('Second Lasso Stage')
gs2 = gs_SVR( xM_in_nz, yV_in, svr_params)
print('Best score:', gs2.best_score_)
print('Best param:', gs2.best_params_)
print(gs2.grid_scores_)
print('External Validation Stage')
# Obtain prediction model by whole data including internal validation data
C = gs2.best_params_['C']
gamma = gs2.best_params_['gamma']
epsilon = gs2.best_params_['epsilon']
clf = svm.SVR( C = C, gamma = gamma, epsilon = epsilon)
clf.fit( xM_in_nz, yV_in.A1)
xM_out = xM[ te, :]
yV_out = yV[ te, 0]
xM_out_nz = xM_out[:, nz_idx]
score = clf.score( xM_out_nz, yV_out.A1)
print(score)
score_l.append( score)
print('')
print('all scores:', score_l)
print('average scores:', np.mean( score_l))
print('First stage scores', score1_l)
print('Average first stage scores', np.mean( score1_l))
return score_l, score1_l
def gs_GPByLasso( xM, yV, alphas_log):
kf5_ext = cross_validation.KFold( xM.shape[0], n_folds=5, shuffle=True)
score1_l = []
score_l = []
for ix, (tr, te) in enumerate( kf5_ext):
print('{}th fold external validation stage ============================'.format( ix + 1))
xM_in = xM[ tr, :]
yV_in = yV[ tr, 0]
print('First Ridge Stage')
gs1 = gs_Lasso( xM_in, yV_in, alphas_log)
print('Best score:', gs1.best_score_)
print('Best param:', gs1.best_params_)
print(gs1.grid_scores_)
score1_l.append( gs1.best_score_)
nz_idx = gs1.best_estimator_.sparse_coef_.indices
xM_in_nz = xM_in[ :, nz_idx]
print('Second GP Stage')
Xa_in_nz = np.array( xM_in_nz)
ya_in = np.array( yV_in)
xM_out = xM[ te, :]
yV_out = yV[ te, 0]
xM_out_nz = xM_out[:, nz_idx]
Xa_out_nz = np.array( xM_out_nz)
ya_out = np.array( yV_out)
#jgp = gp.GaussianProcess( Xa_in_nz, ya_in, Xa_out_nz, ya_out)
# the y array should be send as [:,0] form to be sent as vector array
jgp = gp.GaussianProcess( Xa_in_nz, ya_in[:,0], Xa_out_nz, ya_out[:,0])
jgp.optimize_noise_and_amp()
jgp.run_gp()
#ya_out_pred = np.mat(jgp.predicted_targets)
ya_out_pred = jgp.predicted_targets
#print ya_out[:,0].shape, jgp.predicted_targets.shape
r2, rmse = regress_show( ya_out[:,0], ya_out_pred)
score = r2
print(score)
score_l.append( score)
print('')
print('all scores:', score_l)
print('average scores:', np.mean( score_l))
print('First stage scores', score1_l)
print('Average first stage scores', np.mean( score1_l))
return score_l, score1_l
def show_gs_alpha( grid_scores):
alphas = np.array([ x[0]['alpha'] for x in grid_scores])
r2_mean = np.array([ x[1] for x in grid_scores])
r2_std = np.array([ np.std(x[2]) for x in grid_scores])
r2_mean_pos = r2_mean + r2_std
r2_mean_neg = r2_mean - r2_std
plt.semilogx( alphas, r2_mean, 'x-', label = 'E[$r^2$]')
plt.semilogx( alphas, r2_mean_pos, ':k', label = 'E[$r^2$]+$\sigma$')
plt.semilogx( alphas, r2_mean_neg, ':k', label = 'E[$r^2$]-$\sigma$')
plt.grid()
plt.legend( loc = 2)
plt.show()
best_idx = np.argmax( r2_mean)
best_r2_mean = r2_mean[ best_idx]
best_r2_std = r2_std[ best_idx]
best_alpha = alphas[ best_idx]
print("Best: r2(alpha = {0}) -> mean:{1}, std:{2}".format( best_alpha, best_r2_mean, best_r2_std))
"""
Specialized code for extract results
"""
def gs_Ridge( xM, yV, alphas_log = (1, -1, 9), n_folds = 5, n_jobs = -1):
print(xM.shape, yV.shape)
clf = linear_model.Ridge()
#parmas = {'alpha': np.logspace(1, -1, 9)}
parmas = {'alpha': np.logspace( *alphas_log)}
kf_n = cross_validation.KFold( xM.shape[0], n_folds=n_folds, shuffle=True)
gs = grid_search.GridSearchCV( clf, parmas, scoring = 'r2', cv = kf_n, n_jobs = n_jobs)
gs.fit( xM, yV)
return gs
def gs_Ridge_BIKE( A_list, yV, XX = None, alphas_log = (1, -1, 9), n_folds = 5, n_jobs = -1):
"""
As is a list of A matrices where A is similarity matrix.
X is a concatened linear descriptors.
If no X is used, X can be empty
"""
clf = binary_model.BIKE_Ridge( A_list, XX)
parmas = {'alpha': np.logspace( *alphas_log)}
ln = A_list[0].shape[0] # ls is the number of molecules.
kf_n = cross_validation.KFold( ln, n_folds=n_folds, shuffle=True)
gs = grid_search.GridSearchCV( clf, parmas, scoring = 'r2', cv = kf_n, n_jobs = n_jobs)
AX_idx = np.array([list(range( ln))]).T
gs.fit( AX_idx, yV)
return gs
def gs_BIKE_Ridge( A_list, yV, alphas_log = (1, -1, 9), X_concat = None, n_folds = 5, n_jobs = -1):
"""
As is a list of A matrices where A is similarity matrix.
X is a concatened linear descriptors.
If no X is used, X can be empty
"""
clf = binary_model.BIKE_Ridge( A_list, X_concat)
parmas = {'alpha': np.logspace( *alphas_log)}
ln = A_list[0].shape[0] # ls is the number of molecules.
kf_n = cross_validation.KFold( ln, n_folds=n_folds, shuffle=True)
gs = grid_search.GridSearchCV( clf, parmas, scoring = 'r2', cv = kf_n, n_jobs = n_jobs)
AX_idx = np.array([list(range( ln))]).T
gs.fit( AX_idx, yV)
return gs
def cv( method, xM, yV, alpha, n_folds = 5, n_jobs = -1, grid_std = None, graph = True):
"""
method can be 'Ridge', 'Lasso'
cross validation is performed so as to generate prediction output for all input molecules
"""
print(xM.shape, yV.shape)
clf = getattr( linear_model, method)( alpha = alpha)
kf_n = cross_validation.KFold( xM.shape[0], n_folds=n_folds, shuffle=True)
yV_pred = cross_validation.cross_val_predict( clf, xM, yV, cv = kf_n, n_jobs = n_jobs)
if graph:
print('The prediction output using cross-validation is given by:')
jutil.cv_show( yV, yV_pred, grid_std = grid_std)
return yV_pred
def cv_Ridge_BIKE( A_list, yV, XX = None, alpha = 0.5, n_folds = 5, n_jobs = -1, grid_std = None):
clf = binary_model.BIKE_Ridge( A_list, XX, alpha = alpha)
ln = A_list[0].shape[0] # ls is the number of molecules.
kf_n = cross_validation.KFold( ln, n_folds=n_folds, shuffle=True)
AX_idx = np.array([list(range( ln))]).T
yV_pred = cross_validation.cross_val_predict( clf, AX_idx, yV, cv = kf_n, n_jobs = n_jobs)
print('The prediction output using cross-validation is given by:')
jutil.cv_show( yV, yV_pred, grid_std = grid_std)
return yV_pred
def cv_BIKE_Ridge( A_list, yV, alpha = 0.5, XX = None, n_folds = 5, n_jobs = -1, grid_std = None):
clf = binary_model.BIKE_Ridge( A_list, XX, alpha = alpha)
ln = A_list[0].shape[0] # ls is the number of molecules.
kf_n = cross_validation.KFold( ln, n_folds=n_folds, shuffle=True)
AX_idx = np.array([list(range( ln))]).T
yV_pred = cross_validation.cross_val_predict( clf, AX_idx, yV, cv = kf_n, n_jobs = n_jobs)
print('The prediction output using cross-validation is given by:')
jutil.cv_show( yV, yV_pred, grid_std = grid_std)
return yV_pred
def topscores( gs):
"""
return only top scores for ridge and lasso with the best parameters
"""
top_score = sorted(gs.grid_scores_, key=itemgetter(1), reverse=True)[0]
print(top_score.parameters)
print(top_score.cv_validation_scores)
return top_score.parameters, top_score.cv_validation_scores
def pd_dataframe( param, scores, descriptor = "Morgan(r=6,nB=4096)", graph = True):
pdw_score = pd.DataFrame()
k_KF = len( scores)
pdw_score["descriptor"] = [ descriptor] * k_KF
pdw_score["regularization"] = ["Ridge"] * k_KF
pdw_score["alpha"] = [param['alpha']] * k_KF
pdw_score["KFold"] = list(range( 1, k_KF + 1))
pdw_score["r2"] = scores
if graph:
pdw_score['r2'].plot( kind = 'box')
return pdw_score
def ridge( xM, yV, alphas_log, descriptor = "Morgan(r=6,nB=4096)", n_folds = 5, n_jobs = -1):
gs = gs_Ridge( xM, yV, alphas_log = alphas_log, n_folds = n_folds, n_jobs = n_jobs)
param, scores = topscores( gs)
pdw_score = pd_dataframe( param, scores, descriptor = descriptor)
print('Ridge(alpha={0}) = {1}'.format( gs.best_params_['alpha'], gs.best_score_))
print(pdw_score)
return pdw_score
def gs( method, xM, yV, alphas_log):
"""
It simplifies the function name into the two alphabets
by adding one more argument which is the name of a method.
"""
if method == "Lasso":
return gs_Lasso( xM, yV, alphas_log)
elif method == "Ridge":
return gs_Ridge( xM, yV, alphas_log)
else:
raise NameError("The method of {} is not supported".format( method))
|
|
"""
The `fluent_contents` package defines two models, for storing the content data:
* :class:`Placeholder`
* :class:`ContentItem`
Secondly, there are a few possible fields to add to parent models:
* :class:`PlaceholderField`
* :class:`PlaceholderRelation`
* :class:`ContentItemRelation`
Finally, to exchange template data, a :class:`PlaceholderData` object is available
which mirrors the relevant fields of the :class:`Placeholder` model.
"""
import django
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from django.forms import Media
from django.utils.html import conditional_escape
from django.utils.safestring import SafeData, mark_safe
from future.builtins import str
from future.utils import python_2_unicode_compatible
from fluent_contents.models.db import ContentItem, Placeholder
from fluent_contents.models.fields import (
ContentItemRelation,
PlaceholderField,
PlaceholderRelation,
)
from fluent_contents.models.managers import (
ContentItemManager,
PlaceholderManager,
get_parent_language_code,
get_parent_lookup_kwargs,
)
__all__ = (
"Placeholder",
"ContentItem",
"PlaceholderData",
"ContentItemOutput",
"ImmutableMedia",
"PlaceholderManager",
"ContentItemManager",
"get_parent_lookup_kwargs",
"get_parent_language_code",
"PlaceholderField",
"PlaceholderRelation",
"ContentItemRelation",
)
_ALLOWED_ROLES = list(dict(Placeholder.ROLES).keys())
class PlaceholderData(object):
"""
A wrapper with data of a placeholder node.
It shares the :attr:`slot`, :attr:`title` and :attr:`role` fields with the :class:`~fluent_contents.models.Placeholder` class.
"""
ROLE_ALIASES = {
"main": Placeholder.MAIN,
"sidebar": Placeholder.SIDEBAR,
"related": Placeholder.RELATED,
}
def __init__(self, slot, title=None, role=None, fallback_language=None):
"""
Create the placeholder data with a slot, and optional title and role.
"""
if not slot:
raise ValueError("Slot not defined for placeholder!")
self.slot = slot
self.title = title or self.slot
self.role = self.ROLE_ALIASES.get(role, role or Placeholder.MAIN)
self.fallback_language = fallback_language or None
# Ensure upfront value checking
if self.role not in _ALLOWED_ROLES:
raise ValueError(
"Invalid role '{0}' for placeholder '{1}': allowed are: {2}.".format(
self.role,
self.title or self.slot,
", ".join(list(self.ROLE_ALIASES.keys())),
)
)
def as_dict(self):
"""
Return the contents as dictionary, for client-side export.
The dictionary contains the fields:
* ``slot``
* ``title``
* ``role``
* ``fallback_language``
* ``allowed_plugins``
"""
plugins = self.get_allowed_plugins()
return {
"slot": self.slot,
"title": self.title,
"role": self.role,
"fallback_language": self.fallback_language,
"allowed_plugins": [plugin.name for plugin in plugins],
}
def get_allowed_plugins(self):
from fluent_contents import extensions
return extensions.plugin_pool.get_allowed_plugins(self.slot)
def __repr__(self):
return "<{0}: slot={1} role={2} title={3}>".format(
self.__class__.__name__, self.slot, self.role, self.title
)
@python_2_unicode_compatible
class ContentItemOutput(SafeData):
"""
A wrapper with holds the rendered output of a plugin,
This object is returned by the :func:`~fluent_contents.rendering.render_placeholder`
and :func:`ContentPlugin.render() <fluent_contents.extensions.ContentPlugin.render>` method.
Instances can be treated like a string object,
but also allows reading the :attr:`html` and :attr:`media` attributes.
"""
def __init__(self, html, media=None, cacheable=True, cache_timeout=DEFAULT_TIMEOUT):
self.html = conditional_escape(html) # enforce consistency
self.media = media or ImmutableMedia.empty_instance
# Mainly used internally for the _render_items():
# NOTE: this is the only place where 'cachable' was written was 'cacheable'
self.cacheable = cacheable
self.cache_timeout = cache_timeout or DEFAULT_TIMEOUT
# Pretend to be a string-like object.
# Both makes the caller easier to use, and keeps compatibility with 0.9 code.
def __str__(self):
return str(self.html)
def __len__(self):
return len(str(self.html))
def __repr__(self):
return "<ContentItemOutput '{0}'>".format(repr(self.html))
def __getattr__(self, item):
return getattr(self.html, item)
def __getitem__(self, item):
return str(self).__getitem__(item)
def __getstate__(self):
if django.VERSION >= (2, 2):
return (str(self.html), self.media._css_lists, self.media._js_lists)
else:
return (str(self.html), self.media._css, self.media._js)
def __setstate__(self, state):
# Handle pickling manually, otherwise invokes __getattr__ in a loop.
# (the first call goes to __setstate__, while self.html isn't set so __getattr__ is invoked again)
html_str, css, js = state
self.html = mark_safe(html_str)
self.cacheable = True # Implied by retrieving from cache.
self.cache_timeout = DEFAULT_TIMEOUT
if not css and not js:
self.media = ImmutableMedia.empty_instance
else:
if django.VERSION >= (2, 2):
if isinstance(css, dict):
# cache from 2.1 version, convert to lists
self.media = Media(css=css, js=js)
else:
self.media = Media()
self.media._css_lists = css
self.media._js_lists = js
else:
# Recover object for Django 2.1 and below
self.media = ImmutableMedia()
self.media._css = css
self.media._js = js
def _insert_media(self, media):
"""
Insert more media files to the output. (internal-private for now).
"""
# Upgrade the performance-optimization of ImmediateMedia to an editable object.
if self.media is ImmutableMedia.empty_instance:
self.media = Media() + media
else:
# Needs to be merged as new copy, can't risk merging the 'media' object
self.media = media + self.media
# Avoid continuous construction of Media objects.
if django.VERSION >= (2, 2):
class ImmutableMedia(Media):
#: The empty object (a shared instance of this class)
empty_instance = None
def __init__(self, **kwargs):
if kwargs:
# This wasn't used internally at all, but check if any third party packages did this.
raise ValueError(
"Providing css/js to ImmutableMedia is no longer supported on Django 2.2+"
)
super(ImmutableMedia, self).__init__()
def __add__(self, other):
# Django 2.2 no longer provides add_js/add_css,
# making the Media object behave as immutable.
# 'other' could also be ImmutableMedia.empty_instance
return other
else:
class ImmutableMedia(Media):
#: The empty object (a shared instance of this class)
empty_instance = None
def __init__(self, **kwargs):
self._css = {}
self._js = []
if kwargs:
Media.add_css(self, kwargs.get("css", None))
Media.add_js(self, kwargs.get("js", None))
def add_css(self, data):
raise RuntimeError("Immutable media object")
def add_js(self, data):
raise RuntimeError("Immutable media object")
def __add__(self, other):
# Performance improvement
if other is ImmutableMedia.empty_instance:
return other
# Fast copy for Django 2.1 and below
combined = Media()
combined._css = other._css.copy()
combined._js = other._js[:]
return combined
ImmutableMedia.empty_instance = ImmutableMedia()
|
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from sklearn import preprocessing
from . import utils
from . import timeseries
from . import pos
from . import txn
from .utils import APPROX_BDAYS_PER_MONTH
from functools import wraps
def plotting_context(func):
"""Decorator to set plotting context during function call."""
@wraps(func)
def call_w_context(*args, **kwargs):
set_context = kwargs.pop('set_context', True)
if set_context:
with context():
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return call_w_context
def context(context='notebook', font_scale=1.5, rc=None):
"""Create pyfolio default plotting style context.
Under the hood, calls and returns seaborn.plotting_context() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
context : str, optional
Name of seaborn context.
font_scale : float, optional
Scale font by factor font_scale.
rc : dict, optional
Config flags.
By default, {'lines.linewidth': 1.5,
'axes.facecolor': '0.995',
'figure.facecolor': '0.97'}
is being used and will be added to any
rc passed in, unless explicitly overriden.
Returns
-------
seaborn plotting context
Example
-------
>>> with pyfolio.plotting.context(font_scale=2):
>>> pyfolio.create_full_tear_sheet()
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {'lines.linewidth': 1.5,
'axes.facecolor': '0.995',
'figure.facecolor': '0.97'}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.plotting_context(context=context, font_scale=font_scale,
rc=rc)
def plot_rolling_fama_french(
returns,
factor_returns=None,
rolling_window=APPROX_BDAYS_PER_MONTH * 6,
legend_loc='best',
ax=None, **kwargs):
"""Plots rolling Fama-French single factor betas.
Specifically, plots SMB, HML, and UMD vs. date with a legend.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.DataFrame, optional
data set containing the Fama-French risk factors. See
utils.load_portfolio_risk_factors.
rolling_window : int, optional
The days window over which to compute the beta.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
num_months_str = '%.0f' % (rolling_window / APPROX_BDAYS_PER_MONTH)
ax.set_title(
"Rolling Fama-French Single Factor Betas (" +
num_months_str +
'-month)')
ax.set_ylabel('beta')
rolling_beta = timeseries.rolling_fama_french(
returns,
factor_returns=factor_returns,
rolling_window=rolling_window)
rolling_beta.plot(alpha=0.7, ax=ax, **kwargs)
ax.axhline(0.0, color='black')
ax.legend(['Small-Caps (SMB)',
'High-Growth (HML)',
'Momentum (UMD)'],
loc=legend_loc)
ax.set_ylim((-2.0, 2.0))
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
ax.axhline(0.0, color='black')
ax.set_xlabel('')
return ax
def plot_monthly_returns_heatmap(returns, ax=None, **kwargs):
"""
Plots a heatmap of returns by month.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
monthly_ret_table = timeseries.aggregate_returns(returns,
'monthly')
monthly_ret_table = monthly_ret_table.unstack()
monthly_ret_table = np.round(monthly_ret_table, 3)
sns.heatmap(
monthly_ret_table.fillna(0) *
100.0,
annot=True,
annot_kws={
"size": 9},
alpha=1.0,
center=0.0,
cbar=False,
cmap=matplotlib.cm.RdYlGn,
ax=ax, **kwargs)
ax.set_ylabel('Year')
ax.set_xlabel('Month')
ax.set_title("Monthly Returns (%)")
return ax
def plot_annual_returns(returns, ax=None, **kwargs):
"""
Plots a bar graph of returns by year.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
x_axis_formatter = FuncFormatter(utils.percentage)
ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))
ax.tick_params(axis='x', which='major', labelsize=10)
ann_ret_df = pd.DataFrame(
timeseries.aggregate_returns(
returns,
'yearly'))
ax.axvline(
100 *
ann_ret_df.values.mean(),
color='steelblue',
linestyle='--',
lw=4,
alpha=0.7)
(100 * ann_ret_df.sort_index(ascending=False)
).plot(ax=ax, kind='barh', alpha=0.70, **kwargs)
ax.axvline(0.0, color='black', linestyle='-', lw=3)
ax.set_ylabel('Year')
ax.set_xlabel('Returns')
ax.set_title("Annual Returns")
ax.legend(['mean'])
return ax
def plot_monthly_returns_dist(returns, ax=None, **kwargs):
"""
Plots a distribution of monthly returns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
x_axis_formatter = FuncFormatter(utils.percentage)
ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))
ax.tick_params(axis='x', which='major', labelsize=10)
monthly_ret_table = timeseries.aggregate_returns(returns, 'monthly')
ax.hist(
100 * monthly_ret_table,
color='orangered',
alpha=0.80,
bins=20,
**kwargs)
ax.axvline(
100 * monthly_ret_table.mean(),
color='gold',
linestyle='--',
lw=4,
alpha=1.0)
ax.axvline(0.0, color='black', linestyle='-', lw=3, alpha=0.75)
ax.legend(['mean'])
ax.set_ylabel('Number of months')
ax.set_xlabel('Returns')
ax.set_title("Distribution of Monthly Returns")
return ax
def plot_holdings(returns, positions, legend_loc='best', ax=None, **kwargs):
"""Plots total amount of stocks with an active position, either short
or long.
Displays daily total, daily average per month, and all-time daily
average.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame, optional
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
positions = positions.copy().drop('cash', axis='columns')
df_holdings = positions.apply(lambda x: np.sum(x != 0), axis='columns')
df_holdings_by_month = df_holdings.resample('1M', how='mean')
df_holdings.plot(color='steelblue', alpha=0.6, lw=0.5, ax=ax, **kwargs)
df_holdings_by_month.plot(
color='orangered',
alpha=0.5,
lw=2,
ax=ax,
**kwargs)
ax.axhline(
df_holdings.values.mean(),
color='steelblue',
ls='--',
lw=3,
alpha=1.0)
ax.set_xlim((returns.index[0], returns.index[-1]))
ax.legend(['Daily holdings',
'Average daily holdings, by month',
'Average daily holdings, net'],
loc=legend_loc)
ax.set_title('Holdings per Day')
ax.set_ylabel('Amount of holdings per day')
ax.set_xlabel('')
return ax
def plot_drawdown_periods(returns, top=10, ax=None, **kwargs):
"""
Plots cumulative returns highlighting top drawdown periods.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
Amount of top drawdowns periods to plot (default 10).
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
df_cum_rets = timeseries.cum_returns(returns, starting_value=1.0)
df_drawdowns = timeseries.gen_drawdown_table(returns, top=top)
df_cum_rets.plot(ax=ax, **kwargs)
lim = ax.get_ylim()
colors = sns.cubehelix_palette(len(df_drawdowns))[::-1]
for i, (peak, recovery) in df_drawdowns[
['peak date', 'recovery date']].iterrows():
if pd.isnull(recovery):
recovery = returns.index[-1]
ax.fill_between((peak, recovery),
lim[0],
lim[1],
alpha=.4,
color=colors[i])
ax.set_title('Top %i Drawdown Periods' % top)
ax.set_ylabel('Cumulative returns')
ax.legend(['Portfolio'], 'upper left')
ax.set_xlabel('')
return ax
def plot_drawdown_underwater(returns, ax=None, **kwargs):
"""Plots how far underwaterr returns are over time, or plots current
drawdown vs. date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.percentage)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
df_cum_rets = timeseries.cum_returns(returns, starting_value=1.0)
running_max = np.maximum.accumulate(df_cum_rets)
underwater = -100 * ((running_max - df_cum_rets) / running_max)
(underwater).plot(ax=ax, kind='area', color='coral', alpha=0.7, **kwargs)
ax.set_ylabel('Drawdown')
ax.set_title('Underwater Plot')
ax.set_xlabel('')
return ax
def show_perf_stats(returns, factor_returns, live_start_date=None):
"""Prints some performance metrics of the strategy.
- Shows amount of time the strategy has been run in backtest and
out-of-sample (in live trading).
- Shows Omega ratio, max drawdown, Calmar ratio, annual return,
stability, Sharpe ratio, annual volatility, alpha, and beta.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
live_start_date : datetime, optional
The point in time when the strategy began live trading, after
its backtest period.
factor_returns : pd.Series
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
"""
if live_start_date is not None:
live_start_date = utils.get_utc_timestamp(live_start_date)
returns_backtest = returns[returns.index < live_start_date]
returns_live = returns[returns.index > live_start_date]
perf_stats_live = np.round(timeseries.perf_stats(
returns_live, returns_style='arithmetic'), 2)
perf_stats_live_ab = np.round(
timeseries.calc_alpha_beta(returns_live, factor_returns), 2)
perf_stats_live.loc['alpha'] = perf_stats_live_ab[0]
perf_stats_live.loc['beta'] = perf_stats_live_ab[1]
perf_stats_live.columns = ['Out_of_Sample']
perf_stats_all = np.round(timeseries.perf_stats(
returns, returns_style='arithmetic'), 2)
perf_stats_all_ab = np.round(
timeseries.calc_alpha_beta(returns, factor_returns), 2)
perf_stats_all.loc['alpha'] = perf_stats_all_ab[0]
perf_stats_all.loc['beta'] = perf_stats_all_ab[1]
perf_stats_all.columns = ['All_History']
print('Out-of-Sample Months: ' + str(int(len(returns_live) / 21)))
else:
returns_backtest = returns
print('Backtest Months: ' + str(int(len(returns_backtest) / 21)))
perf_stats = np.round(timeseries.perf_stats(
returns_backtest, returns_style='arithmetic'), 2)
perf_stats_ab = np.round(
timeseries.calc_alpha_beta(returns_backtest, factor_returns), 2)
perf_stats.loc['alpha'] = perf_stats_ab[0]
perf_stats.loc['beta'] = perf_stats_ab[1]
perf_stats.columns = ['Backtest']
if live_start_date is not None:
perf_stats = perf_stats.join(perf_stats_live,
how='inner')
perf_stats = perf_stats.join(perf_stats_all,
how='inner')
print(perf_stats)
def plot_rolling_returns(
returns,
factor_returns=None,
live_start_date=None,
cone_std=None,
legend_loc='best',
volatility_match=False,
ax=None, **kwargs):
"""Plots cumulative rolling returns versus some benchmarks'.
Backtest returns are in green, and out-of-sample (live trading)
returns are in red.
Additionally, a linear cone plot may be added to the out-of-sample
returns region.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of a risk factor.
- This is in the same style as returns.
live_start_date : datetime, optional
The point in time when the strategy began live trading, after
its backtest period.
cone_std : float, or tuple, optional
If float, The standard deviation to use for the cone plots.
If tuple, Tuple of standard deviation values to use for the cone plots
- The cone is a normal distribution with this standard deviation
centered around a linear regression.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
volatility_match : bool, optional
Whether to normalize the volatility of the returns to those of the
benchmark returns. This helps compare strategies with different
volatilities. Requires passing of benchmark_rets.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
def draw_cone(returns, num_stdev, live_start_date, ax):
cone_df = timeseries.cone_rolling(
returns,
num_stdev=num_stdev,
cone_fit_end_date=live_start_date)
cone_in_sample = cone_df[cone_df.index < live_start_date]
cone_out_of_sample = cone_df[cone_df.index > live_start_date]
cone_out_of_sample = cone_out_of_sample[
cone_out_of_sample.index < returns.index[-1]]
ax.fill_between(cone_out_of_sample.index,
cone_out_of_sample.sd_down,
cone_out_of_sample.sd_up,
color='steelblue', alpha=0.25)
return cone_in_sample, cone_out_of_sample
if ax is None:
ax = plt.gca()
if volatility_match and factor_returns is None:
raise ValueError('volatility_match requires passing of'
'factor_returns.')
elif volatility_match and factor_returns is not None:
bmark_vol = factor_returns.loc[returns.index].std()
returns = (returns / returns.std()) * bmark_vol
df_cum_rets = timeseries.cum_returns(returns, 1.0)
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
if factor_returns is not None:
timeseries.cum_returns(factor_returns[df_cum_rets.index], 1.0).plot(
lw=2, color='gray', label='S&P500', alpha=0.60, ax=ax, **kwargs)
if live_start_date is not None:
live_start_date = utils.get_utc_timestamp(live_start_date)
if (live_start_date is None) or (df_cum_rets.index[-1] <=
live_start_date):
df_cum_rets.plot(lw=3, color='forestgreen', alpha=0.6,
label='Backtest', ax=ax, **kwargs)
else:
df_cum_rets[:live_start_date].plot(
lw=3, color='forestgreen', alpha=0.6,
label='Backtest', ax=ax, **kwargs)
df_cum_rets[live_start_date:].plot(
lw=4, color='red', alpha=0.6,
label='Live', ax=ax, **kwargs)
if cone_std is not None:
# check to see if cone_std was passed as a single value and,
# if so, just convert to list automatically
if isinstance(cone_std, float):
cone_std = [cone_std]
for cone_i in cone_std:
cone_in_sample, cone_out_of_sample = draw_cone(
returns,
cone_i,
live_start_date,
ax)
cone_in_sample['line'].plot(
ax=ax,
ls='--',
label='Backtest trend',
lw=2,
color='forestgreen',
alpha=0.7,
**kwargs)
cone_out_of_sample['line'].plot(
ax=ax,
ls='--',
label='Predicted trend',
lw=2,
color='red',
alpha=0.7,
**kwargs)
ax.axhline(1.0, linestyle='--', color='black', lw=2)
ax.set_ylabel('Cumulative returns')
ax.set_title('Cumulative Returns')
ax.legend(loc=legend_loc)
ax.set_xlabel('')
return ax
def plot_rolling_beta(returns, factor_returns, legend_loc='best',
ax=None, **kwargs):
"""
Plots the rolling 6-month and 12-month beta versus date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
ax.set_title("Rolling Portfolio Beta to S&P 500")
ax.set_ylabel('Beta')
rb_1 = timeseries.rolling_beta(
returns, factor_returns, rolling_window=APPROX_BDAYS_PER_MONTH * 6)
rb_1.plot(color='steelblue', lw=3, alpha=0.6, ax=ax, **kwargs)
rb_2 = timeseries.rolling_beta(
returns, factor_returns, rolling_window=APPROX_BDAYS_PER_MONTH * 12)
rb_2.plot(color='grey', lw=3, alpha=0.4, ax=ax, **kwargs)
ax.set_ylim((-2.5, 2.5))
ax.axhline(rb_1.mean(), color='steelblue', linestyle='--', lw=3)
ax.axhline(0.0, color='black', linestyle='-', lw=2)
ax.set_xlabel('')
ax.legend(['6-mo',
'12-mo'],
loc=legend_loc)
return ax
def plot_rolling_sharpe(returns, rolling_window=APPROX_BDAYS_PER_MONTH * 6,
legend_loc='best', ax=None, **kwargs):
"""
Plots the rolling Sharpe ratio versus date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
rolling_window : int, optional
The days window over which to compute the sharpe ratio.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
rolling_sharpe_ts = timeseries.rolling_sharpe(
returns, rolling_window)
rolling_sharpe_ts.plot(alpha=.7, lw=3, color='orangered', ax=ax,
**kwargs)
ax.set_title('Rolling Sharpe ratio (6-month)')
ax.axhline(
rolling_sharpe_ts.mean(),
color='steelblue',
linestyle='--',
lw=3)
ax.axhline(0.0, color='black', linestyle='-', lw=3)
ax.set_ylim((-3.0, 6.0))
ax.set_ylabel('Sharpe ratio')
ax.set_xlabel('')
ax.legend(['Sharpe', 'Average'],
loc=legend_loc)
return ax
def plot_gross_leverage(returns, gross_lev, ax=None, **kwargs):
"""Plots gross leverage versus date.
Gross leverage is the sum of long and short exposure per share
divided by net asset value.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
gross_lev : pd.Series, optional
The leverage of a strategy.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
gross_lev.plot(alpha=0.8, lw=0.5, color='g', legend=False, ax=ax,
**kwargs)
ax.axhline(gross_lev.mean(), color='g', linestyle='--', lw=3,
alpha=1.0)
ax.set_title('Gross Leverage')
ax.set_ylabel('Gross Leverage')
ax.set_xlabel('')
return ax
def plot_exposures(returns, positions_alloc, ax=None, **kwargs):
"""Plots a cake chart of the long and short exposure.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions_alloc : pd.DataFrame
Portfolio allocation of positions. See
pos.get_percent_alloc.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
df_long_short = pos.get_long_short_pos(positions_alloc)
df_long_short.plot(
kind='area', color=['lightblue', 'green'], alpha=1.0,
ax=ax, **kwargs)
df_cum_rets = timeseries.cum_returns(returns, starting_value=1)
ax.set_xlim((df_cum_rets.index[0], df_cum_rets.index[-1]))
ax.set_title("Long/Short Exposure")
ax.set_ylabel('Exposure')
ax.set_xlabel('')
return ax
def show_and_plot_top_positions(returns, positions_alloc,
show_and_plot=2, hide_positions=False,
legend_loc='real_best', ax=None,
**kwargs):
"""Prints and/or plots the exposures of the top 10 held positions of
all time.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions_alloc : pd.DataFrame
Portfolio allocation of positions. See pos.get_percent_alloc.
show_and_plot : int, optional
By default, this is 2, and both prints and plots.
If this is 0, it will only plot; if 1, it will only print.
hide_positions : bool, optional
If True, will not output any symbol names.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
By default, the legend will display below the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes, conditional
The axes that were plotted on.
"""
df_top_long, df_top_short, df_top_abs = pos.get_top_long_short_abs(
positions_alloc)
if show_and_plot == 1 or show_and_plot == 2:
print("\n")
print('Top 10 long positions of all time (and max%)')
print(pd.DataFrame(df_top_long).index.values)
print(np.round(pd.DataFrame(df_top_long)[0].values, 3))
print("\n")
print('Top 10 short positions of all time (and max%)')
print(pd.DataFrame(df_top_short).index.values)
print(np.round(pd.DataFrame(df_top_short)[0].values, 3))
print("\n")
print('Top 10 positions of all time (and max%)')
print(pd.DataFrame(df_top_abs).index.values)
print(np.round(pd.DataFrame(df_top_abs)[0].values, 3))
print("\n")
_, _, df_top_abs_all = pos.get_top_long_short_abs(
positions_alloc, top=9999)
print('All positions ever held')
print(pd.DataFrame(df_top_abs_all).index.values)
print(np.round(pd.DataFrame(df_top_abs_all)[0].values, 3))
print("\n")
if show_and_plot == 0 or show_and_plot == 2:
if ax is None:
ax = plt.gca()
positions_alloc[df_top_abs.index].plot(
title='Portfolio Allocation Over Time, Only Top 10 Holdings',
alpha=0.4, ax=ax, **kwargs)
# Place legend below plot, shrink plot by 20%
if legend_loc == 'real_best':
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
ax.legend(
loc='upper center', frameon=True, bbox_to_anchor=(
0.5, -0.14), ncol=5)
else:
ax.legend(loc=legend_loc)
df_cum_rets = timeseries.cum_returns(returns, starting_value=1)
ax.set_xlim((df_cum_rets.index[0], df_cum_rets.index[-1]))
ax.set_ylabel('Exposure by stock')
if hide_positions:
ax.legend_.remove()
return ax
def plot_sector_allocations(returns, sector_alloc, ax=None, **kwargs):
"""Plots the sector exposures of the portfolio over time.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
sector_alloc : pd.DataFrame
Portfolio allocation of positions. See pos.get_sector_alloc.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gcf()
sector_alloc.plot(title='Sector Allocation Over Time',
alpha=0.4, ax=ax, **kwargs)
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
ax.legend(
loc='upper center', frameon=True, bbox_to_anchor=(
0.5, -0.14), ncol=5)
ax.set_xlim((sector_alloc.index[0], sector_alloc.index[-1]))
ax.set_ylabel('Exposure by sector')
return ax
def plot_return_quantiles(returns, df_weekly, df_monthly, ax=None, **kwargs):
"""Creates a box plot of daily, weekly, and monthly return
distributions.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
df_weekly : pd.Series
Weekly returns of the strategy, noncumulative.
- See timeseries.aggregate_returns.
df_monthly : pd.Series
Monthly returns of the strategy, noncumulative.
- See timeseries.aggregate_returns.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
sns.boxplot(data=[returns, df_weekly, df_monthly],
ax=ax, **kwargs)
ax.set_xticklabels(['daily', 'weekly', 'monthly'])
ax.set_title('Return quantiles')
return ax
def show_return_range(returns, df_weekly):
"""
Print monthly return and weekly return standard deviations.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
df_weekly : pd.Series
Weekly returns of the strategy, noncumulative.
- See timeseries.aggregate_returns.
"""
two_sigma_daily = returns.mean() - 2 * returns.std()
two_sigma_weekly = df_weekly.mean() - 2 * df_weekly.std()
var_sigma = pd.Series([two_sigma_daily, two_sigma_weekly],
index=['2-sigma returns daily',
'2-sigma returns weekly'])
print(np.round(var_sigma, 3))
def plot_turnover(returns, transactions, positions,
legend_loc='best', ax=None, **kwargs):
"""Plots turnover vs. date.
Turnover is the number of shares traded for a period as a fraction
of total shares.
Displays daily total, daily average per month, and all-time daily
average.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Daily transaction volume and dollar ammount.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
df_turnover = txn.get_turnover(transactions, positions)
df_turnover_by_month = df_turnover.resample("M")
df_turnover.plot(color='steelblue', alpha=1.0, lw=0.5, ax=ax, **kwargs)
df_turnover_by_month.plot(
color='orangered',
alpha=0.5,
lw=2,
ax=ax,
**kwargs)
ax.axhline(
df_turnover.mean(), color='steelblue', linestyle='--', lw=3, alpha=1.0)
ax.legend(['Daily turnover',
'Average daily turnover, by month',
'Average daily turnover, net'],
loc=legend_loc)
ax.set_title('Daily Turnover')
df_cum_rets = timeseries.cum_returns(returns, starting_value=1)
ax.set_xlim((df_cum_rets.index[0], df_cum_rets.index[-1]))
ax.set_ylim((0, 1))
ax.set_ylabel('Turnover')
ax.set_xlabel('')
return ax
def plot_slippage_sweep(returns, transactions, positions,
slippage_params=(3, 8, 10, 12, 15, 20, 50),
ax=None, **kwargs):
"""Plots a equity curves at different per-dollar slippage assumptions.
Parameters
----------
returns : pd.Series
Timeseries of portfolio returns to be adjusted for various
degrees of slippage.
transactions : pd.DataFrame
Daily transaction volume and dollar ammount.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
slippage_params: tuple
Slippage pameters to apply to the return time series (in
basis points).
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
turnover = txn.get_turnover(transactions, positions,
period=None, average=False)
slippage_sweep = pd.DataFrame()
for bps in slippage_params:
adj_returns = txn.adjust_returns_for_slippage(returns, turnover, bps)
label = str(bps) + " bps"
slippage_sweep[label] = timeseries.cum_returns(adj_returns, 1)
slippage_sweep.plot(alpha=1.0, lw=0.5, ax=ax)
ax.set_title('Cumulative Returns Given Additional Per-Dollar Slippage')
ax.set_ylabel('')
ax.legend(loc='center left')
return ax
def plot_slippage_sensitivity(returns, transactions, positions,
ax=None, **kwargs):
"""Plots curve relating per-dollar slippage to average annual returns.
Parameters
----------
returns : pd.Series
Timeseries of portfolio returns to be adjusted for various
degrees of slippage.
transactions : pd.DataFrame
Daily transaction volume and dollar ammount.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
turnover = txn.get_turnover(transactions, positions,
period=None, average=False)
avg_returns_given_slippage = pd.Series()
for bps in range(1, 100):
adj_returns = txn.adjust_returns_for_slippage(returns, turnover, bps)
avg_returns = timeseries.annual_return(
adj_returns, style='calendar')
avg_returns_given_slippage.loc[bps] = avg_returns
avg_returns_given_slippage.plot(alpha=1.0, lw=2, ax=ax)
ax.set(title='Average Annual Returns Given Additional Per-Dollar Slippage',
xticks=np.arange(0, 100, 10),
ylabel='Average Annual Return',
xlabel='Per-Dollar Slippage (bps)')
return ax
def plot_daily_turnover_hist(transactions, positions,
ax=None, **kwargs):
"""Plots a histogram of daily turnover rates.
Parameters
----------
transactions : pd.DataFrame
Daily transaction volume and dollar ammount.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
turnover = txn.get_turnover(transactions, positions, period=None)
sns.distplot(turnover, ax=ax, **kwargs)
ax.set_title('Distribution of Daily Turnover Rates')
ax.set_xlabel('Turnover Rate')
return ax
def plot_daily_volume(returns, transactions, ax=None, **kwargs):
"""Plots trading volume per day vs. date.
Also displays all-time daily average.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Daily transaction volume and dollar ammount.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
transactions.txn_shares.plot(alpha=1.0, lw=0.5, ax=ax, **kwargs)
ax.axhline(transactions.txn_shares.mean(), color='steelblue',
linestyle='--', lw=3, alpha=1.0)
ax.set_title('Daily Trading Volume')
df_cum_rets = timeseries.cum_returns(returns, starting_value=1)
ax.set_xlim((df_cum_rets.index[0], df_cum_rets.index[-1]))
ax.set_ylabel('Amount of shares traded')
ax.set_xlabel('')
return ax
def plot_daily_returns_similarity(returns_backtest, returns_live,
title='', scale_kws=None, ax=None,
**kwargs):
"""Plots overlapping distributions of in-sample (backtest) returns
and out-of-sample (live trading) returns.
Parameters
----------
returns_backtest : pd.Series
Daily returns of the strategy's backtest, noncumulative.
returns_live : pd.Series
Daily returns of the strategy's live trading, noncumulative.
title : str, optional
The title to use for the plot.
scale_kws : dict, optional
Additional arguments passed to preprocessing.scale.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
if scale_kws is None:
scale_kws = {}
sns.kdeplot(preprocessing.scale(returns_backtest, **scale_kws),
bw='scott', shade=True, label='backtest',
color='forestgreen', ax=ax, **kwargs)
sns.kdeplot(preprocessing.scale(returns_live, **scale_kws),
bw='scott', shade=True, label='out-of-sample',
color='red', ax=ax, **kwargs)
ax.set_title(title)
return ax
def show_worst_drawdown_periods(returns, top=5):
"""Prints information about the worst drawdown periods.
Prints peak dates, valley dates, recovery dates, and net
drawdowns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
Amount of top drawdowns periods to plot (default 5).
"""
print('\nWorst Drawdown Periods')
drawdown_df = timeseries.gen_drawdown_table(returns, top=top)
drawdown_df['net drawdown in %'] = list(
map(utils.round_two_dec_places, drawdown_df['net drawdown in %']))
print(drawdown_df.sort('net drawdown in %', ascending=False))
|
|
from __future__ import absolute_import, unicode_literals
######################
# CARTRIDGE SETTINGS #
######################
# The following settings are already defined in cartridge.shop.defaults
# with default values, but are common enough to be put here, commented
# out, for convenient overriding.
# Sequence of available credit card types for payment.
# SHOP_CARD_TYPES = ("Mastercard", "Visa", "Diners", "Amex")
# Setting to turn on featured images for shop categories. Defaults to False.
# SHOP_CATEGORY_USE_FEATURED_IMAGE = True
# Set an alternative OrderForm class for the checkout process.
# SHOP_CHECKOUT_FORM_CLASS = 'cartridge.shop.forms.OrderForm'
# If True, the checkout process is split into separate
# billing/shipping and payment steps.
# SHOP_CHECKOUT_STEPS_SPLIT = True
# If True, the checkout process has a final confirmation step before
# completion.
# SHOP_CHECKOUT_STEPS_CONFIRMATION = True
# Controls the formatting of monetary values accord to the locale
# module in the python standard library. If an empty string is
# used, will fall back to the system's locale.
# SHOP_CURRENCY_LOCALE = 'en_IN.UTF.8'
# Dotted package path and class name of the function that
# is called on submit of the billing/shipping checkout step. This
# is where shipping calculation can be performed and set using the
# function ``cartridge.shop.utils.set_shipping``.
# SHOP_HANDLER_BILLING_SHIPPING = \
# "cartridge.shop.checkout.default_billship_handler"
# Dotted package path and class name of the function that
# is called once an order is successful and all of the order
# object's data has been created. This is where any custom order
# processing should be implemented.
# SHOP_HANDLER_ORDER = "cartridge.shop.checkout.default_order_handler"
# Dotted package path and class name of the function that
# is called on submit of the payment checkout step. This is where
# integration with a payment gateway should be implemented.
# SHOP_HANDLER_PAYMENT = "cartridge.shop.checkout.default_payment_handler"
# Sequence of value/name pairs for order statuses.
# SHOP_ORDER_STATUS_CHOICES = (
# (1, "Unprocessed"),
# (2, "Processed"),
# )
# Sequence of value/name pairs for types of product options,
# eg Size, Colour.
# SHOP_OPTION_TYPE_CHOICES = (
# (1, "Size"),
# (2, "Colour"),
# )
# Sequence of indexes from the SHOP_OPTION_TYPE_CHOICES setting that
# control how the options should be ordered in the admin,
# eg for "Colour" then "Size" given the above:
# SHOP_OPTION_ADMIN_ORDER = (2, 1)
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for convenient
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", ("Media Library", "fb_browse"),)),
# ("Shop", ("shop.Product", "shop.ProductOption", "shop.DiscountCode",
# "shop.Sale", "shop.Order")),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, "Top navigation bar", "pages/menus/dropdown.html"),
# (2, "Left-hand tree", "pages/menus/tree.html"),
# (3, "Footer", "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# ("Image",),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# ("Another name",),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the south application will be automatically added to the
# INSTALLED_APPS setting.
USE_SOUTH = True
SECRET_KEY = '3%_u63qmfgme2k1g4yj$huv8!asn$i7obk9kqaj860mkch#(%&'
########################
# MAIN DJANGO SETTINGS #
########################
# People who get code error notifications.
# In the format (('Full Name', '[email protected]'),
# ('Full Name', '[email protected]'))
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['52.10.5.91']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Asia/Kolkata'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
_ = lambda s: s
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ("127.0.0.1",)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": 'django.db.backends.mysql',
# DB name or path to database file if using sqlite3.
"NAME": 'mart_db',
# Not used with sqlite3.
"USER": 'root',
# Not used with sqlite3.
"PASSWORD": 'admin',
# Set to empty string for localhost. Not used with sqlite3.
"HOST": '127.0.0.1',
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
import os
# Full filesystem path to the project.
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Name of the directory for the project.
PROJECT_DIRNAME = PROJECT_ROOT.split(os.sep)[-1]
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_DIRNAME
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_DIRNAME
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
#TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, "moderna/templates"),
os.path.join(PROJECT_ROOT, "templates"),
)
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"moderna",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"cartridge.shop",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.pages",
"mezzanine.galleries",
"mezzanine.twitter",
#"mezzanine.accounts",
#"mezzanine.mobile",
"mart",
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page", #Added by me as shown in CMD while starting local server
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.redirects.middleware.RedirectFallbackMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"cartridge.shop.middleware.ShopMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
DEBUG_TOOLBAR_CONFIG = {"INTERCEPT_REDIRECTS": False}
###################
# DEPLOY SETTINGS #
###################
# These settings are used by the default fabfile.py provided.
# Check fabfile.py for defaults.
# FABRIC = {
# "SSH_USER": "", # SSH username
# "SSH_PASS": "", # SSH password (consider key-based authentication)
# "SSH_KEY_PATH": "", # Local path to SSH key file, for key-based auth
# "HOSTS": [], # List of hosts to deploy to
# "VIRTUALENV_HOME": "", # Absolute remote path for virtualenvs
# "PROJECT_NAME": "", # Unique identifier for project
# "REQUIREMENTS_PATH": "", # Path to pip requirements, relative to project
# "GUNICORN_PORT": 8000, # Port gunicorn will listen on
# "LOCALE": "en_US.UTF-8", # Should end with ".UTF-8"
# "LIVE_HOSTNAME": "www.example.com", # Host for public site.
# "REPO_URL": "", # Git or Mercurial remote repo URL for the project
# "DB_PASS": "", # Live database password
# "ADMIN_PASS": "", # Live admin user password
# "SECRET_KEY": SECRET_KEY,
# "NEVERCACHE_KEY": NEVERCACHE_KEY,
# }
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
try:
from local_settings import *
except ImportError:
pass
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core Keras layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import types as python_types
import numpy as np
from tensorflow.contrib.keras.python.keras import activations
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras import constraints
from tensorflow.contrib.keras.python.keras import initializers
from tensorflow.contrib.keras.python.keras import regularizers
from tensorflow.contrib.keras.python.keras.engine import InputSpec
from tensorflow.contrib.keras.python.keras.engine import Layer
from tensorflow.contrib.keras.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.contrib.keras.python.keras.utils.generic_utils import func_dump
from tensorflow.contrib.keras.python.keras.utils.generic_utils import func_load
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import core as tf_core_layers
from tensorflow.python.util import tf_inspect
class Masking(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,
to be fed to a LSTM layer.
You want to mask timestep #3 and #5 because you lack data for
these timesteps. You can:
- set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
model = Sequential()
model.add(Masking(mask_value=0., input_shape=(timesteps, features)))
model.add(LSTM(32))
```
"""
def __init__(self, mask_value=0., **kwargs):
super(Masking, self).__init__(**kwargs)
self.supports_masking = True
self.mask_value = mask_value
def compute_mask(self, inputs, mask=None):
return K.any(K.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = K.any(
K.not_equal(inputs, self.mask_value), axis=-1, keepdims=True)
return inputs * K.cast(boolean_mask, K.floatx())
def get_config(self):
config = {'mask_value': self.mask_value}
base_config = super(Masking, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Dropout(tf_core_layers.Dropout, Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting
a fraction `rate` of input units to 0 at each update during training time,
which helps prevent overfitting.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
self.supports_masking = True
# Inheritance call order:
# 1) tf.layers.Dropout, 2) keras.layers.Layer, 3) tf.layers.Layer
super(Dropout, self).__init__(rate=rate, noise_shape=noise_shape, seed=seed, **kwargs)
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
output = super(Dropout, self).call(inputs, training=training)
if training is K.learning_phase():
output._uses_learning_phase = True # pylint: disable=protected-access
return output
def get_config(self):
config = {'rate': self.rate}
base_config = super(Dropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SpatialDropout1D(Dropout):
"""Spatial 1D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 1D feature maps instead of individual elements. If adjacent frames
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout1D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
Input shape:
3D tensor with shape:
`(samples, timesteps, channels)`
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, **kwargs):
super(SpatialDropout1D, self).__init__(rate, **kwargs)
self.input_spec = InputSpec(ndim=3)
def _get_noise_shape(self, inputs):
input_shape = K.shape(inputs)
noise_shape = (input_shape[0], 1, input_shape[2])
return noise_shape
class SpatialDropout2D(Dropout):
"""Spatial 2D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 2D feature maps instead of individual elements. If adjacent pixels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout2D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension
(the depth) is at index 1,
in 'channels_last' mode is it at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout2D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=4)
def _get_noise_shape(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
noise_shape = (input_shape[0], input_shape[1], 1, 1)
elif self.data_format == 'channels_last':
noise_shape = (input_shape[0], 1, 1, input_shape[3])
else:
raise ValueError('Invalid data_format:', self.data_format)
return noise_shape
class SpatialDropout3D(Dropout):
"""Spatial 3D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 3D feature maps instead of individual elements. If adjacent voxels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout3D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension (the depth)
is at index 1, in 'channels_last' mode is it at index 4.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
`(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'
or 5D tensor with shape:
`(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout3D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=5)
def _get_noise_shape(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
noise_shape = (input_shape[0], input_shape[1], 1, 1, 1)
elif self.data_format == 'channels_last':
noise_shape = (input_shape[0], 1, 1, 1, input_shape[4])
else:
raise ValueError('Invalid data_format:', self.data_format)
return noise_shape
class Activation(Layer):
"""Applies an activation function to an output.
Arguments:
activation: name of activation function to use
or alternatively, a Theano or TensorFlow operation.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, activation, **kwargs):
super(Activation, self).__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def get_config(self):
config = {'activation': activations.serialize(self.activation)}
base_config = super(Activation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Reshape(Layer):
"""Reshapes an output to a certain shape.
Arguments:
target_shape: target shape. Tuple of integers,
does not include the samples dimension (batch size).
Input shape:
Arbitrary, although all dimensions in the input shaped must be fixed.
Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
`(batch_size,) + target_shape`
Example:
```python
# as first layer in a Sequential model
model = Sequential()
model.add(Reshape((3, 4), input_shape=(12,)))
# now: model.output_shape == (None, 3, 4)
# note: `None` is the batch dimension
# as intermediate layer in a Sequential model
model.add(Reshape((6, 2)))
# now: model.output_shape == (None, 6, 2)
# also supports shape inference using `-1` as dimension
model.add(Reshape((-1, 2, 2)))
# now: model.output_shape == (None, 3, 2, 2)
```
"""
def __init__(self, target_shape, **kwargs):
super(Reshape, self).__init__(**kwargs)
self.target_shape = tuple(target_shape)
def _fix_unknown_dimension(self, input_shape, output_shape):
"""Find and replace a missing dimension in an output shape.
This is a near direct port of the internal Numpy function
`_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`
Arguments:
input_shape: shape of array being reshaped
output_shape: desired shape of the array with at most
a single -1 which indicates a dimension that should be
derived from the input shape.
Returns:
The new output shape with a -1 replaced with its computed value.
Raises a ValueError if the total array size of the output_shape is
different then the input_shape, or more then one unknown dimension
is specified.
Raises:
ValueError: in case of invalid values
for `input_shape` or `input_shape`.
"""
output_shape = list(output_shape)
msg = 'total size of new array must be unchanged'
known, unknown = 1, None
for index, dim in enumerate(output_shape):
if dim < 0:
if unknown is None:
unknown = index
else:
raise ValueError('Can only specify one unknown dimension.')
else:
known *= dim
original = np.prod(input_shape, dtype=int)
if unknown is not None:
if known == 0 or original % known != 0:
raise ValueError(msg)
output_shape[unknown] = original // known
elif original != known:
raise ValueError(msg)
return output_shape
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = [input_shape[0]]
output_shape += self._fix_unknown_dimension(input_shape[1:],
self.target_shape)
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
# In case the target shape is not fully defined,
# we need access to the shape of x.
target_shape = self.target_shape
if -1 in target_shape:
# target shape not fully defined
target_shape = self._compute_output_shape(inputs.get_shape())
target_shape = target_shape.as_list()[1:]
return K.reshape(inputs, (-1,) + tuple(target_shape))
def get_config(self):
config = {'target_shape': self.target_shape}
base_config = super(Reshape, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Permute(Layer):
"""Permutes the dimensions of the input according to a given pattern.
Useful for e.g. connecting RNNs and convnets together.
Example:
```python
model = Sequential()
model.add(Permute((2, 1), input_shape=(10, 64)))
# now: model.output_shape == (None, 64, 10)
# note: `None` is the batch dimension
```
Arguments:
dims: Tuple of integers. Permutation pattern, does not include the
samples dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimension
of the input.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
"""
def __init__(self, dims, **kwargs):
super(Permute, self).__init__(**kwargs)
self.dims = tuple(dims)
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = copy.copy(input_shape)
for i, dim in enumerate(self.dims):
target_dim = input_shape[dim]
output_shape[i + 1] = target_dim
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return K.permute_dimensions(inputs, (0,) + self.dims)
def get_config(self):
config = {'dims': self.dims}
base_config = super(Permute, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Flatten(Layer):
"""Flattens the input. Does not affect the batch size.
Example:
```python
model = Sequential()
model.add(Convolution2D(64, 3, 3,
border_mode='same',
input_shape=(3, 32, 32)))
# now: model.output_shape == (None, 64, 32, 32)
model.add(Flatten())
# now: model.output_shape == (None, 65536)
```
"""
def __init__(self, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.input_spec = InputSpec(min_ndim=3)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if not all(input_shape[1:]):
raise ValueError('The shape of the input to "Flatten" '
'is not fully defined '
'(got ' + str(input_shape[1:]) + '. '
'Make sure to pass a complete "input_shape" '
'or "batch_input_shape" argument to the first '
'layer in your model.')
return tensor_shape.TensorShape([input_shape[0], np.prod(input_shape[1:])])
def call(self, inputs):
outputs = K.batch_flatten(inputs)
outputs.set_shape(self._compute_output_shape(inputs.get_shape()))
return outputs
class RepeatVector(Layer):
"""Repeats the input n times.
Example:
```python
model = Sequential()
model.add(Dense(32, input_dim=32))
# now: model.output_shape == (None, 32)
# note: `None` is the batch dimension
model.add(RepeatVector(3))
# now: model.output_shape == (None, 3, 32)
```
Arguments:
n: integer, repetition factor.
Input shape:
2D tensor of shape `(num_samples, features)`.
Output shape:
3D tensor of shape `(num_samples, n, features)`.
"""
def __init__(self, n, **kwargs):
super(RepeatVector, self).__init__(**kwargs)
self.n = n
self.input_spec = InputSpec(ndim=2)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape([input_shape[0], self.n, input_shape[1]])
def call(self, inputs):
return K.repeat(inputs, self.n)
def get_config(self):
config = {'n': self.n}
base_config = super(RepeatVector, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Lambda(Layer):
"""Wraps arbitrary expression as a `Layer` object.
Examples:
```python
# add a x -> x^2 layer
model.add(Lambda(lambda x: x ** 2))
```
```python
# add a layer that returns the concatenation
# of the positive part of the input and
# the opposite of the negative part
def antirectifier(x):
x -= K.mean(x, axis=1, keepdims=True)
x = K.l2_normalize(x, axis=1)
pos = K.relu(x)
neg = K.relu(-x)
return K.concatenate([pos, neg], axis=1)
model.add(Lambda(antirectifier))
```
Arguments:
function: The function to be evaluated.
Takes input tensor as first argument.
arguments: optional dictionary of keyword arguments to be passed
to the function.
Input shape:
Arbitrary. Use the keyword argument input_shape
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Specified by `output_shape` argument
(or auto-inferred when using TensorFlow).
"""
def __init__(self, function, mask=None, arguments=None, **kwargs):
super(Lambda, self).__init__(**kwargs)
self.function = function
self.arguments = arguments if arguments else {}
if mask is not None:
self.supports_masking = True
self.mask = mask
def call(self, inputs, mask=None):
arguments = self.arguments
arg_spec = tf_inspect.getargspec(self.function)
if 'mask' in arg_spec.args:
arguments['mask'] = mask
return self.function(inputs, **arguments)
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
if isinstance(self.function, python_types.LambdaType):
function = func_dump(self.function)
function_type = 'lambda'
else:
function = self.function.__name__
function_type = 'function'
config = {
'function': function,
'function_type': function_type,
'arguments': self.arguments
}
base_config = super(Lambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
globs = globals()
if custom_objects:
globs = dict(list(globs.items()) + list(custom_objects.items()))
function_type = config.pop('function_type')
if function_type == 'function':
# Simple lookup in custom objects
function = deserialize_keras_object(
config['function'],
custom_objects=custom_objects,
printable_module_name='function in Lambda layer')
elif function_type == 'lambda':
# Unsafe deserialization from bytecode
function = func_load(config['function'], globs=globs)
else:
raise TypeError('Unknown function type:', function_type)
config['function'] = function
return cls(**config)
class Dense(tf_core_layers.Dense, Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
Example:
```python
# as first layer in a sequential model:
model = Sequential()
model.add(Dense(32, input_shape=(16,)))
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(Dense(32))
```
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
nD tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
nD tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
# Inheritance call order:
# 1) tf.layers.Dense, 2) keras.layers.Layer, 3) tf.layers.Layer
super(Dense, self).__init__(
units,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
# TODO(fchollet): move weight constraint support to core layers.
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.supports_masking = True
def build(self, input_shape):
super(Dense, self).build(input_shape)
# TODO(fchollet): move weight constraint support to core layers.
if self.kernel_constraint:
self.constraints[self.kernel] = self.kernel_constraint
if self.use_bias and self.bias_constraint:
self.constraints[self.bias] = self.bias_constraint
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Arguments:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0., l2=0., **kwargs):
super(ActivityRegularization, self).__init__(**kwargs)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
self.activity_regularizer = regularizers.L1L2(l1=l1, l2=l2)
def get_config(self):
config = {'l1': self.l1, 'l2': self.l2}
base_config = super(ActivityRegularization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
|
import unittest
from mock import patch, Mock
import SocketServer
import serial
import socket
from pymodbus3.device import ModbusDeviceIdentification
from pymodbus3.server.sync import ModbusBaseRequestHandler
from pymodbus3.server.sync import ModbusSingleRequestHandler
from pymodbus3.server.sync import ModbusConnectedRequestHandler
from pymodbus3.server.sync import ModbusDisconnectedRequestHandler
from pymodbus3.server.sync import ModbusTcpServer, ModbusUdpServer, ModbusSerialServer
from pymodbus3.server.sync import StartTcpServer, StartUdpServer, StartSerialServer
from pymodbus3.bit_read_message import ReadCoilsRequest, ReadCoilsResponse
#---------------------------------------------------------------------------#
# Mock Classes
#---------------------------------------------------------------------------#
class MockServer(object):
def __init__(self):
self.framer = lambda _: "framer"
self.decoder = "decoder"
self.threads = []
self.context = {}
#---------------------------------------------------------------------------#
# Fixture
#---------------------------------------------------------------------------#
class SynchronousServerTest(unittest.TestCase):
"""
This is the unittest for the pymodbus3.server.sync module
"""
#-----------------------------------------------------------------------#
# Test Base Request Handler
#-----------------------------------------------------------------------#
def test_base_handler_undefined_methods(self):
""" Test the base handler undefined methods"""
handler = SocketServer.BaseRequestHandler(None, None, None)
handler.__class__ = ModbusBaseRequestHandler
self.assertRaises(NotImplementedError, lambda: handler.send(None))
self.assertRaises(NotImplementedError, lambda: handler.handle())
def test_base_handler_methods(self):
""" Test the base class for all the clients """
request = ReadCoilsRequest(1, 1)
address = ('server', 12345)
server = MockServer()
with patch.object(ModbusBaseRequestHandler, 'handle') as mock_handle:
with patch.object(ModbusBaseRequestHandler, 'send') as mock_send:
mock_handle.return_value = True
mock_send.return_value = True
handler = ModbusBaseRequestHandler(request, address, server)
self.assertEqual(handler.running, True)
self.assertEqual(handler.framer, 'framer')
handler.execute(request)
self.assertEqual(mock_send.call_count, 1)
server.context[0x00] = object()
handler.execute(request)
self.assertEqual(mock_send.call_count, 2)
#-----------------------------------------------------------------------#
# Test Single Request Handler
#-----------------------------------------------------------------------#
def test_modbus_single_request_handler_send(self):
handler = SocketServer.BaseRequestHandler(None, None, None)
handler.__class__ = ModbusSingleRequestHandler
handler.framer = Mock()
handler.framer.buildPacket.return_value = "message"
handler.request = Mock()
request = ReadCoilsResponse([1])
handler.send(request)
self.assertEqual(handler.request.send.call_count, 1)
request.should_respond = False
handler.send(request)
self.assertEqual(handler.request.send.call_count, 1)
def test_modbus_single_request_handler_handle(self):
handler = SocketServer.BaseRequestHandler(None, None, None)
handler.__class__ = ModbusSingleRequestHandler
handler.framer = Mock()
handler.framer.buildPacket.return_value = "message"
handler.request = Mock()
handler.request.recv.return_value = "\x12\x34"
# exit if we are not running
handler.running = False
handler.handle()
self.assertEqual(handler.framer.processIncomingPacket.call_count, 0)
# run forever if we are running
def _callback1(a, b):
handler.running = False # stop infinite loop
handler.framer.processIncomingPacket.side_effect = _callback1
handler.running = True
handler.handle()
self.assertEqual(handler.framer.processIncomingPacket.call_count, 1)
# exceptions are simply ignored
def _callback2(a, b):
if handler.framer.processIncomingPacket.call_count == 2:
raise Exception("example exception")
else:
handler.running = False # stop infinite loop
handler.framer.processIncomingPacket.side_effect = _callback2
handler.running = True
handler.handle()
self.assertEqual(handler.framer.processIncomingPacket.call_count, 3)
#-----------------------------------------------------------------------#
# Test Connected Request Handler
#-----------------------------------------------------------------------#
def test_modbus_connected_request_handler_send(self):
handler = SocketServer.BaseRequestHandler(None, None, None)
handler.__class__ = ModbusConnectedRequestHandler
handler.framer = Mock()
handler.framer.buildPacket.return_value = "message"
handler.request = Mock()
request = ReadCoilsResponse([1])
handler.send(request)
self.assertEqual(handler.request.send.call_count, 1)
request.should_respond = False
handler.send(request)
self.assertEqual(handler.request.send.call_count, 1)
def test_modbus_connected_request_handler_handle(self):
handler = SocketServer.BaseRequestHandler(None, None, None)
handler.__class__ = ModbusConnectedRequestHandler
handler.framer = Mock()
handler.framer.buildPacket.return_value = "message"
handler.request = Mock()
handler.request.recv.return_value = "\x12\x34"
# exit if we are not running
handler.running = False
handler.handle()
self.assertEqual(handler.framer.processIncomingPacket.call_count, 0)
# run forever if we are running
def _callback(a, b):
handler.running = False # stop infinite loop
handler.framer.processIncomingPacket.side_effect = _callback
handler.running = True
handler.handle()
self.assertEqual(handler.framer.processIncomingPacket.call_count, 1)
# socket errors cause the client to disconnect
handler.framer.processIncomingPacket.side_effect = socket.error()
handler.running = True
handler.handle()
self.assertEqual(handler.framer.processIncomingPacket.call_count, 2)
# every other exception causes the client to disconnect
handler.framer.processIncomingPacket.side_effect = Exception()
handler.running = True
handler.handle()
self.assertEqual(handler.framer.processIncomingPacket.call_count, 3)
# receiving no data causes the client to disconnect
handler.request.recv.return_value = None
handler.running = True
handler.handle()
self.assertEqual(handler.framer.processIncomingPacket.call_count, 3)
#-----------------------------------------------------------------------#
# Test Disconnected Request Handler
#-----------------------------------------------------------------------#
def test_modbus_disconnected_request_handler_send(self):
handler = SocketServer.BaseRequestHandler(None, None, None)
handler.__class__ = ModbusDisconnectedRequestHandler
handler.framer = Mock()
handler.framer.buildPacket.return_value = "message"
handler.request = Mock()
request = ReadCoilsResponse([1])
handler.send(request)
self.assertEqual(handler.request.sendto.call_count, 1)
request.should_respond = False
handler.send(request)
self.assertEqual(handler.request.sendto.call_count, 1)
def test_modbus_disconnected_request_handler_handle(self):
handler = SocketServer.BaseRequestHandler(None, None, None)
handler.__class__ = ModbusDisconnectedRequestHandler
handler.framer = Mock()
handler.framer.buildPacket.return_value = "message"
handler.request = ("\x12\x34", handler.request)
# exit if we are not running
handler.running = False
handler.handle()
self.assertEqual(handler.framer.processIncomingPacket.call_count, 0)
# run forever if we are running
def _callback(a, b):
handler.running = False # stop infinite loop
handler.framer.processIncomingPacket.side_effect = _callback
handler.running = True
handler.handle()
self.assertEqual(handler.framer.processIncomingPacket.call_count, 1)
# socket errors cause the client to disconnect
handler.request = ("\x12\x34", handler.request)
handler.framer.processIncomingPacket.side_effect = socket.error()
handler.running = True
handler.handle()
self.assertEqual(handler.framer.processIncomingPacket.call_count, 2)
# every other exception causes the client to disconnect
handler.request = ("\x12\x34", handler.request)
handler.framer.processIncomingPacket.side_effect = Exception()
handler.running = True
handler.handle()
self.assertEqual(handler.framer.processIncomingPacket.call_count, 3)
# receiving no data causes the client to disconnect
handler.request = (None, handler.request)
handler.running = True
handler.handle()
self.assertEqual(handler.framer.processIncomingPacket.call_count, 3)
#-----------------------------------------------------------------------#
# Test TCP Server
#-----------------------------------------------------------------------#
def test_tcp_server_close(self):
""" test that the synchronous TCP server closes correctly """
with patch.object(socket.socket, 'bind') as mock_socket:
identity = ModbusDeviceIdentification(info={0x00: 'VendorName'})
server = ModbusTcpServer(context=None, identity=identity)
server.threads.append(Mock(**{'running': True}))
server.server_close()
self.assertEqual(server.control.Identity.VendorName, 'VendorName')
self.assertFalse(server.threads[0].running)
def test_tcp_server_process(self):
""" test that the synchronous TCP server processes requests """
with patch('SocketServer.ThreadingTCPServer') as mock_server:
server = ModbusTcpServer(None)
server.process_request('request', 'client')
self.assertTrue(mock_server.process_request.called)
#-----------------------------------------------------------------------#
# Test UDP Server
#-----------------------------------------------------------------------#
def test_udp_server_close(self):
""" test that the synchronous UDP server closes correctly """
with patch.object(socket.socket, 'bind') as mock_socket:
identity = ModbusDeviceIdentification(info={0x00: 'VendorName'})
server = ModbusUdpServer(context=None, identity=identity)
server.threads.append(Mock(**{'running': True}))
server.server_close()
self.assertEqual(server.control.Identity.VendorName, 'VendorName')
self.assertFalse(server.threads[0].running)
def test_udp_server_process(self):
""" test that the synchronous UDP server processes requests """
with patch('SocketServer.ThreadingUDPServer') as mock_server:
server = ModbusUdpServer(None)
request = ('data', 'socket')
server.process_request(request, 'client')
self.assertTrue(mock_server.process_request.called)
#-----------------------------------------------------------------------#
# Test Serial Server
#-----------------------------------------------------------------------#
def test_serial_server_connect(self):
with patch.object(serial, 'Serial') as mock_serial:
mock_serial.return_value = "socket"
identity = ModbusDeviceIdentification(info={0x00: 'VendorName'})
server = ModbusSerialServer(context=None, identity=identity)
self.assertEqual(server.socket, "socket")
self.assertEqual(server.control.Identity.VendorName, 'VendorName')
server._connect()
self.assertEqual(server.socket, "socket")
with patch.object(serial, 'Serial') as mock_serial:
mock_serial.side_effect = serial.SerialException()
server = ModbusSerialServer(None)
self.assertEqual(server.socket, None)
def test_serial_server_serve_forever(self):
""" test that the synchronous serial server closes correctly """
with patch.object(serial, 'Serial') as mock_serial:
with patch('pymodbus3.server.sync.ModbusSingleRequestHandler') as mock_handler:
server = ModbusSerialServer(None)
instance = mock_handler.return_value
instance.handle.side_effect = server.server_close
server.serve_forever()
instance.handle.assert_any_call()
def test_serial_server_close(self):
""" test that the synchronous serial server closes correctly """
with patch.object(serial, 'Serial') as mock_serial:
instance = mock_serial.return_value
server = ModbusSerialServer(None)
server.server_close()
instance.close.assert_any_call()
#-----------------------------------------------------------------------#
# Test Synchronous Factories
#-----------------------------------------------------------------------#
def test_start_tcp_server(self):
""" Test the tcp server starting factory """
with patch.object(ModbusTcpServer, 'serve_forever') as mock_server:
with patch.object(SocketServer.TCPServer, 'server_bind') as mock_binder:
StartTcpServer()
def test_start_udp_server(self):
""" Test the udp server starting factory """
with patch.object(ModbusUdpServer, 'serve_forever') as mock_server:
with patch.object(SocketServer.UDPServer, 'server_bind') as mock_binder:
StartUdpServer()
def test_start_serial_server(self):
""" Test the serial server starting factory """
with patch.object(ModbusSerialServer, 'serve_forever') as mock_server:
StartSerialServer()
#---------------------------------------------------------------------------#
# Main
#---------------------------------------------------------------------------#
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import json
import os
import sys
import urlparse
import httplib2
from six import moves
from tempest import clients
from tempest import config
CONF = config.CONF
RAW_HTTP = httplib2.Http()
CONF_PARSER = None
def _get_config_file():
default_config_dir = os.path.join(os.path.abspath(
os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), "etc")
default_config_file = "tempest.conf"
conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', default_config_dir)
conf_file = os.environ.get('TEMPEST_CONFIG', default_config_file)
path = os.path.join(conf_dir, conf_file)
fd = open(path, 'rw')
return fd
def change_option(option, group, value):
if not CONF_PARSER.has_section(group):
CONF_PARSER.add_section(group)
CONF_PARSER.set(group, option, str(value))
def print_and_or_update(option, group, value, update):
print('Config option %s in group %s should be changed to: %s'
% (option, group, value))
if update:
change_option(option, group, value)
def verify_glance_api_versions(os, update):
# Check glance api versions
__, versions = os.image_client.get_versions()
if CONF.image_feature_enabled.api_v1 != ('v1.1' in versions or 'v1.0' in
versions):
print_and_or_update('api_v1', 'image_feature_enabled',
not CONF.image_feature_enabled.api_v1, update)
if CONF.image_feature_enabled.api_v2 != ('v2.0' in versions):
print_and_or_update('api_v2', 'image_feature_enabled',
not CONF.image_feature_enabled.api_v2, update)
def _get_unversioned_endpoint(base_url):
endpoint_parts = urlparse.urlparse(base_url)
endpoint = endpoint_parts.scheme + '://' + endpoint_parts.netloc
return endpoint
def _get_api_versions(os, service):
client_dict = {
'nova': os.servers_client,
'keystone': os.identity_client,
'cinder': os.volumes_client,
}
client_dict[service].skip_path()
endpoint = _get_unversioned_endpoint(client_dict[service].base_url)
__, body = RAW_HTTP.request(endpoint, 'GET')
client_dict[service].reset_path()
body = json.loads(body)
if service == 'keystone':
versions = map(lambda x: x['id'], body['versions']['values'])
else:
versions = map(lambda x: x['id'], body['versions'])
return versions
def verify_keystone_api_versions(os, update):
# Check keystone api versions
versions = _get_api_versions(os, 'keystone')
if CONF.identity_feature_enabled.api_v2 != ('v2.0' in versions):
print_and_or_update('api_v2', 'identity_feature_enabled',
not CONF.identity_feature_enabled.api_v2, update)
if CONF.identity_feature_enabled.api_v3 != ('v3.0' in versions):
print_and_or_update('api_v3', 'identity_feature_enabled',
not CONF.identity_feature_enabled.api_v3, update)
def verify_nova_api_versions(os, update):
versions = _get_api_versions(os, 'nova')
if CONF.compute_feature_enabled.api_v3 != ('v3.0' in versions):
print_and_or_update('api_v3', 'compute_feature_enabled',
not CONF.compute_feature_enabled.api_v3, update)
def verify_cinder_api_versions(os, update):
# Check cinder api versions
versions = _get_api_versions(os, 'cinder')
if CONF.volume_feature_enabled.api_v1 != ('v1.0' in versions):
print_and_or_update('api_v1', 'volume_feature_enabled',
not CONF.volume_feature_enabled.api_v1, update)
if CONF.volume_feature_enabled.api_v2 != ('v2.0' in versions):
print_and_or_update('api_v2', 'volume_feature_enabled',
not CONF.volume_feature_enabled.api_v2, update)
def get_extension_client(os, service):
extensions_client = {
'nova': os.extensions_client,
'nova_v3': os.extensions_v3_client,
'cinder': os.volumes_extension_client,
'neutron': os.network_client,
'swift': os.account_client,
}
if service not in extensions_client:
print('No tempest extensions client for %s' % service)
exit(1)
return extensions_client[service]
def get_enabled_extensions(service):
extensions_options = {
'nova': CONF.compute_feature_enabled.api_extensions,
'nova_v3': CONF.compute_feature_enabled.api_v3_extensions,
'cinder': CONF.volume_feature_enabled.api_extensions,
'neutron': CONF.network_feature_enabled.api_extensions,
'swift': CONF.object_storage_feature_enabled.discoverable_apis,
}
if service not in extensions_options:
print('No supported extensions list option for %s' % service)
exit(1)
return extensions_options[service]
def verify_extensions(os, service, results):
extensions_client = get_extension_client(os, service)
__, resp = extensions_client.list_extensions()
if isinstance(resp, dict):
# For both Nova and Neutron we use the alias name rather than the
# 'name' field because the alias is considered to be the canonical
# name.
if service in ['nova', 'nova_v3', 'neutron']:
extensions = map(lambda x: x['alias'], resp['extensions'])
elif service == 'swift':
# Remove Swift general information from extensions list
resp.pop('swift')
extensions = resp.keys()
else:
extensions = map(lambda x: x['name'], resp['extensions'])
else:
extensions = map(lambda x: x['name'], resp)
if not results.get(service):
results[service] = {}
extensions_opt = get_enabled_extensions(service)
if extensions_opt[0] == 'all':
results[service]['extensions'] = extensions
return results
# Verify that all configured extensions are actually enabled
for extension in extensions_opt:
results[service][extension] = extension in extensions
# Verify that there aren't additional extensions enabled that aren't
# specified in the config list
for extension in extensions:
if extension not in extensions_opt:
results[service][extension] = False
return results
def display_results(results, update, replace):
update_dict = {
'swift': 'object-storage-feature-enabled',
'nova': 'compute-feature-enabled',
'nova_v3': 'compute-feature-enabled',
'cinder': 'volume-feature-enabled',
'neutron': 'network-feature-enabled',
}
for service in results:
# If all extensions are specified as being enabled there is no way to
# verify this so we just assume this to be true
if results[service].get('extensions'):
if replace:
output_list = results[service].get('extensions')
else:
output_list = ['all']
else:
extension_list = get_enabled_extensions(service)
output_list = []
for extension in results[service]:
if not results[service][extension]:
if extension in extension_list:
print("%s extension: %s should not be included in the "
"list of enabled extensions" % (service,
extension))
else:
print("%s extension: %s should be included in the list"
" of enabled extensions" % (service, extension))
output_list.append(extension)
else:
output_list.append(extension)
if update:
# Sort List
output_list.sort()
# Convert list to a string
output_string = ', '.join(output_list)
if service == 'swift':
change_option('discoverable_apis', update_dict[service],
output_string)
elif service == 'nova_v3':
change_option('api_v3_extensions', update_dict[service],
output_string)
else:
change_option('api_extensions', update_dict[service],
output_string)
def check_service_availability(os, update):
services = []
avail_services = []
codename_match = {
'volume': 'cinder',
'network': 'neutron',
'image': 'glance',
'object_storage': 'swift',
'compute': 'nova',
'orchestration': 'heat',
'metering': 'ceilometer',
'telemetry': 'ceilometer',
'data_processing': 'sahara',
'baremetal': 'ironic',
'identity': 'keystone',
'queuing': 'marconi',
'database': 'trove'
}
# Get catalog list for endpoints to use for validation
__, endpoints = os.endpoints_client.list_endpoints()
for endpoint in endpoints:
__, service = os.service_client.get_service(endpoint['service_id'])
services.append(service['type'])
# Pull all catalog types from config file and compare against endpoint list
for cfgname in dir(CONF._config):
cfg = getattr(CONF, cfgname)
catalog_type = getattr(cfg, 'catalog_type', None)
if not catalog_type:
continue
else:
if cfgname == 'identity':
# Keystone is a required service for tempest
continue
if catalog_type not in services:
if getattr(CONF.service_available, codename_match[cfgname]):
print('Endpoint type %s not found either disable service '
'%s or fix the catalog_type in the config file' % (
catalog_type, codename_match[cfgname]))
if update:
change_option(codename_match[cfgname],
'service_available', False)
else:
if not getattr(CONF.service_available,
codename_match[cfgname]):
print('Endpoint type %s is available, service %s should be'
' set as available in the config file.' % (
catalog_type, codename_match[cfgname]))
if update:
change_option(codename_match[cfgname],
'service_available', True)
# If we are going to enable this we should allow
# extension checks.
avail_services.append(codename_match[cfgname])
else:
avail_services.append(codename_match[cfgname])
return avail_services
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--update', action='store_true',
help='Update the config file with results from api '
'queries. This assumes whatever is set in the '
'config file is incorrect. In the case of '
'endpoint checks where it could either be the '
'incorrect catalog type or the service available '
'option the service available option is assumed '
'to be incorrect and is thus changed')
parser.add_argument('-o', '--output',
help="Output file to write an updated config file to. "
"This has to be a separate file from the "
"original config file. If one isn't specified "
"with -u the new config file will be printed to "
"STDOUT")
parser.add_argument('-r', '--replace-ext', action='store_true',
help="If specified the all option will be replaced "
"with a full list of extensions")
args = parser.parse_args()
return args
def main():
print('Running config verification...')
opts = parse_args()
update = opts.update
replace = opts.replace_ext
global CONF_PARSER
outfile = sys.stdout
if update:
conf_file = _get_config_file()
if opts.output:
outfile = open(opts.output, 'w+')
CONF_PARSER = moves.configparser.SafeConfigParser()
CONF_PARSER.optionxform = str
CONF_PARSER.readfp(conf_file)
os = clients.ComputeAdminManager(interface='json')
services = check_service_availability(os, update)
results = {}
for service in ['nova', 'nova_v3', 'cinder', 'neutron', 'swift']:
if service == 'nova_v3' and 'nova' not in services:
continue
elif service not in services:
continue
results = verify_extensions(os, service, results)
verify_keystone_api_versions(os, update)
verify_glance_api_versions(os, update)
verify_nova_api_versions(os, update)
verify_cinder_api_versions(os, update)
display_results(results, update, replace)
if update:
conf_file.close()
CONF_PARSER.write(outfile)
outfile.close()
if __name__ == "__main__":
main()
|
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/publisher/messaging.py
"""
import datetime
import uuid
import mock
from oslo_config import fixture as fixture_config
from oslo_utils import netutils
import testscenarios.testcase
from ceilometer.event.storage import models as event
from ceilometer.publisher import messaging as msg_publisher
from ceilometer import sample
from ceilometer.tests import base as tests_base
class BasePublisherTestCase(tests_base.BaseTestCase):
test_event_data = [
event.Event(message_id=uuid.uuid4(),
event_type='event_%d' % i,
generated=datetime.datetime.utcnow(),
traits=[], raw={})
for i in range(0, 5)
]
test_sample_data = [
sample.Sample(
name='test',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test2',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test2',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test3',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
]
def setUp(self):
super(BasePublisherTestCase, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.setup_messaging(self.CONF)
class NotifierOnlyPublisherTest(BasePublisherTestCase):
@mock.patch('oslo_messaging.Notifier')
def test_publish_topic_override(self, notifier):
msg_publisher.SampleNotifierPublisher(
netutils.urlsplit('notifier://?topic=custom_topic'))
notifier.assert_called_with(mock.ANY, topics=['custom_topic'],
driver=mock.ANY, retry=mock.ANY,
publisher_id=mock.ANY)
msg_publisher.EventNotifierPublisher(
netutils.urlsplit('notifier://?topic=custom_event_topic'))
notifier.assert_called_with(mock.ANY, topics=['custom_event_topic'],
driver=mock.ANY, retry=mock.ANY,
publisher_id=mock.ANY)
@mock.patch('ceilometer.messaging.get_transport')
def test_publish_other_host(self, cgt):
msg_publisher.SampleNotifierPublisher(
netutils.urlsplit('notifier://foo:[email protected]:1234'))
cgt.assert_called_with(self.CONF, 'rabbit://foo:[email protected]:1234')
msg_publisher.EventNotifierPublisher(
netutils.urlsplit('notifier://foo:[email protected]:1234'))
cgt.assert_called_with(self.CONF, 'rabbit://foo:[email protected]:1234')
@mock.patch('ceilometer.messaging.get_transport')
def test_publish_other_host_vhost_and_query(self, cgt):
msg_publisher.SampleNotifierPublisher(
netutils.urlsplit('notifier://foo:[email protected]:1234/foo'
'?driver=amqp&amqp_auto_delete=true'))
cgt.assert_called_with(self.CONF, 'amqp://foo:[email protected]:1234/foo'
'?amqp_auto_delete=true')
msg_publisher.EventNotifierPublisher(
netutils.urlsplit('notifier://foo:[email protected]:1234/foo'
'?driver=amqp&amqp_auto_delete=true'))
cgt.assert_called_with(self.CONF, 'amqp://foo:[email protected]:1234/foo'
'?amqp_auto_delete=true')
class TestPublisher(testscenarios.testcase.WithScenarios,
BasePublisherTestCase):
scenarios = [
('notifier',
dict(protocol="notifier",
publisher_cls=msg_publisher.SampleNotifierPublisher,
test_data=BasePublisherTestCase.test_sample_data,
pub_func='publish_samples', attr='source')),
('event_notifier',
dict(protocol="notifier",
publisher_cls=msg_publisher.EventNotifierPublisher,
test_data=BasePublisherTestCase.test_event_data,
pub_func='publish_events', attr='event_type')),
]
def setUp(self):
super(TestPublisher, self).setUp()
self.topic = (self.CONF.publisher_notifier.event_topic
if self.pub_func == 'publish_events' else
self.CONF.publisher_notifier.metering_topic)
class TestPublisherPolicy(TestPublisher):
@mock.patch('ceilometer.publisher.messaging.LOG')
def test_published_with_no_policy(self, mylog):
publisher = self.publisher_cls(
netutils.urlsplit('%s://' % self.protocol))
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
self.assertRaises(
msg_publisher.DeliveryFailure,
getattr(publisher, self.pub_func),
self.test_data)
self.assertTrue(mylog.info.called)
self.assertEqual('default', publisher.policy)
self.assertEqual(0, len(publisher.local_queue))
fake_send.assert_called_once_with(
self.topic, mock.ANY)
@mock.patch('ceilometer.publisher.messaging.LOG')
def test_published_with_policy_block(self, mylog):
publisher = self.publisher_cls(
netutils.urlsplit('%s://?policy=default' % self.protocol))
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
self.assertRaises(
msg_publisher.DeliveryFailure,
getattr(publisher, self.pub_func),
self.test_data)
self.assertTrue(mylog.info.called)
self.assertEqual(0, len(publisher.local_queue))
fake_send.assert_called_once_with(
self.topic, mock.ANY)
@mock.patch('ceilometer.publisher.messaging.LOG')
def test_published_with_policy_incorrect(self, mylog):
publisher = self.publisher_cls(
netutils.urlsplit('%s://?policy=notexist' % self.protocol))
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
self.assertRaises(
msg_publisher.DeliveryFailure,
getattr(publisher, self.pub_func),
self.test_data)
self.assertTrue(mylog.warning.called)
self.assertEqual('default', publisher.policy)
self.assertEqual(0, len(publisher.local_queue))
fake_send.assert_called_once_with(
self.topic, mock.ANY)
@mock.patch('ceilometer.publisher.messaging.LOG', mock.Mock())
class TestPublisherPolicyReactions(TestPublisher):
def test_published_with_policy_drop_and_rpc_down(self):
publisher = self.publisher_cls(
netutils.urlsplit('%s://?policy=drop' % self.protocol))
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
getattr(publisher, self.pub_func)(self.test_data)
self.assertEqual(0, len(publisher.local_queue))
fake_send.assert_called_once_with(
self.topic, mock.ANY)
def test_published_with_policy_queue_and_rpc_down(self):
publisher = self.publisher_cls(
netutils.urlsplit('%s://?policy=queue' % self.protocol))
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
getattr(publisher, self.pub_func)(self.test_data)
self.assertEqual(1, len(publisher.local_queue))
fake_send.assert_called_once_with(
self.topic, mock.ANY)
def test_published_with_policy_queue_and_rpc_down_up(self):
self.rpc_unreachable = True
publisher = self.publisher_cls(
netutils.urlsplit('%s://?policy=queue' % self.protocol))
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
getattr(publisher, self.pub_func)(self.test_data)
self.assertEqual(1, len(publisher.local_queue))
fake_send.side_effect = mock.MagicMock()
getattr(publisher, self.pub_func)(self.test_data)
self.assertEqual(0, len(publisher.local_queue))
topic = self.topic
expected = [mock.call(topic, mock.ANY),
mock.call(topic, mock.ANY),
mock.call(topic, mock.ANY)]
self.assertEqual(expected, fake_send.mock_calls)
def test_published_with_policy_sized_queue_and_rpc_down(self):
publisher = self.publisher_cls(netutils.urlsplit(
'%s://?policy=queue&max_queue_length=3' % self.protocol))
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
for i in range(0, 5):
for s in self.test_data:
setattr(s, self.attr, 'test-%d' % i)
getattr(publisher, self.pub_func)(self.test_data)
self.assertEqual(3, len(publisher.local_queue))
self.assertEqual(
'test-2',
publisher.local_queue[0][1][0][self.attr]
)
self.assertEqual(
'test-3',
publisher.local_queue[1][1][0][self.attr]
)
self.assertEqual(
'test-4',
publisher.local_queue[2][1][0][self.attr]
)
def test_published_with_policy_default_sized_queue_and_rpc_down(self):
publisher = self.publisher_cls(
netutils.urlsplit('%s://?policy=queue' % self.protocol))
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
for i in range(0, 2000):
for s in self.test_data:
setattr(s, self.attr, 'test-%d' % i)
getattr(publisher, self.pub_func)(self.test_data)
self.assertEqual(1024, len(publisher.local_queue))
self.assertEqual(
'test-976',
publisher.local_queue[0][1][0][self.attr]
)
self.assertEqual(
'test-1999',
publisher.local_queue[1023][1][0][self.attr]
)
|
|
#!/usr/bin/env python
from pynes.instruction import *
from pynes.addressmode import *
def create_instruction(func, mode):
"""creates a dynamic type using "func" and "mode" as base classes"""
return type(func.__name__ + mode.__name__, (func, mode), {})
instruction_map = {
# ADC instructions
0x69: create_instruction(ADC, Immediate),
0x65: create_instruction(ADC, ZeroPage),
0x75: create_instruction(ADC, ZeroPageX),
0x60: create_instruction(ADC, Absolute),
0x70: create_instruction(ADC, AbsoluteX),
0x79: create_instruction(ADC, AbsoluteY),
0x61: create_instruction(ADC, IndirectX),
0x71: create_instruction(ADC, IndirectY),
# AND instructions
0x29: create_instruction(AND, Immediate),
0x25: create_instruction(AND, ZeroPage),
0x35: create_instruction(AND, ZeroPageX),
0x2D: create_instruction(AND, Absolute),
0x3D: create_instruction(AND, AbsoluteX),
0x39: create_instruction(AND, AbsoluteY),
0x21: create_instruction(AND, IndirectX),
0x31: create_instruction(AND, IndirectY),
# ASL instructions
0x0A: create_instruction(ASL, Accumulator),
0x06: create_instruction(ASL, ZeroPage),
0x16: create_instruction(ASL, ZeroPageX),
0x0E: create_instruction(ASL, Absolute),
0x1E: create_instruction(ASL, AbsoluteX),
# BCC instructions
0x90: create_instruction(BCC, Relative),
# BCS instructions
0xB0: create_instruction(BCS, Relative),
# BEQ instructions
0xF0: create_instruction(BEQ, Relative),
# BIT instructions
0x24: create_instruction(BIT, ZeroPage),
0x2C: create_instruction(BIT, Absolute),
# BMI instructions
0x30: create_instruction(BMI, Relative),
#BNE instructions
0xD0: create_instruction(BNE, Relative),
# BPL instructions
0x10: create_instruction(BPL, Relative),
# BRK instructions
0x00: create_instruction(BRK, Implied),
#BVC instructions
0x50: create_instruction(BVC, Relative),
# BVS instructions
0x70: create_instruction(BVS, Relative),
# CLC instructions
0x18: create_instruction(CLC, Implied),
# CLD instructions
0xD8: create_instruction(CLD, Implied),
# CLI instructions
0x58: create_instruction(CLI, Implied),
# CLV instructions
0xB8: create_instruction(CLV, Implied),
# CMP instructions
0xC9: create_instruction(CLV, Immediate),
0xC5: create_instruction(CLV, ZeroPage),
0xD5: create_instruction(CLV, ZeroPageX),
0xCD: create_instruction(CLV, Absolute),
0xDD: create_instruction(CLV, AbsoluteX),
0xD9: create_instruction(CLV, AbsoluteY),
0xC1: create_instruction(CLV, IndirectX),
0xD1: create_instruction(CLV, IndirectY),
# CPX instructions
0xE0: create_instruction(CPX, Immediate),
0xE4: create_instruction(CPX, ZeroPage),
0xEC: create_instruction(CPX, Absolute),
# CPY instructions
0xC0: create_instruction(CPY, Immediate),
0xC4: create_instruction(CPY, ZeroPage),
0xCC: create_instruction(CPY, Absolute),
# DEC instructions
0xC6: create_instruction(DEC, ZeroPage),
0xD6: create_instruction(DEC, ZeroPageX),
0xCE: create_instruction(DEC, Absolute),
0xDE: create_instruction(DEC, AbsoluteX),
# DEX instructions
0xCA: create_instruction(DEX, Implied),
# DEY instructions
0x88: create_instruction(DEY, Implied),
# EOR instructions
0x49: create_instruction(EOR, Immediate),
0x45: create_instruction(EOR, ZeroPage),
0x55: create_instruction(EOR, ZeroPageX),
0x40: create_instruction(EOR, Absolute),
0x50: create_instruction(EOR, AbsoluteX),
0x59: create_instruction(EOR, AbsoluteY),
0x41: create_instruction(EOR, IndirectX),
0x51: create_instruction(EOR, IndirectY),
# INC instructions
0xE6: create_instruction(INC, ZeroPage),
0xF6: create_instruction(INC, ZeroPageX),
0xEE: create_instruction(INC, Absolute),
0xFE: create_instruction(INC, AbsoluteX),
# INX instructions
0xE8: create_instruction(INX, Implied),
# INY instructions
0xC8: create_instruction(INY, Implied),
# JMP instructions
0x4C: create_instruction(JMP, Absolute),
0x6C: create_instruction(JMP, Indirect),
# JSR instructions
0x20: create_instruction(JSR, Absolute),
# LDA instructions
0xA9: create_instruction(LDA, Immediate),
0xA5: create_instruction(LDA, ZeroPage),
0xB5: create_instruction(LDA, ZeroPageX),
0xAD: create_instruction(LDA, Absolute),
0xBD: create_instruction(LDA, AbsoluteX),
0xB9: create_instruction(LDA, AbsoluteY),
0xA1: create_instruction(LDA, IndirectX),
0xB1: create_instruction(LDA, IndirectY),
# LDX instructions
0xA2: create_instruction(LDX, Immediate),
0xA6: create_instruction(LDX, ZeroPage),
0xB6: create_instruction(LDX, ZeroPageY),
0xAE: create_instruction(LDX, Absolute),
0xBE: create_instruction(LDX, AbsoluteY),
# LDY instructions
0xA0: create_instruction(LDY, Immediate),
0xA4: create_instruction(LDY, ZeroPage),
0xB4: create_instruction(LDY, ZeroPageX),
0xAC: create_instruction(LDY, Absolute),
0xBC: create_instruction(LDY, AbsoluteX),
# LSR instructions
0x4A: create_instruction(LSR, Accumulator),
0x46: create_instruction(LSR, ZeroPage),
0x56: create_instruction(LSR, ZeroPageX),
0x4E: create_instruction(LSR, Absolute),
0x5E: create_instruction(LSR, AbsoluteX),
# NOP instructions
0xEA: create_instruction(NOP, Implied),
# ORA instructions
0x09: create_instruction(ORA, Immediate),
0x05: create_instruction(ORA, ZeroPage),
0x15: create_instruction(ORA, ZeroPageX),
0x0D: create_instruction(ORA, Absolute),
0x1D: create_instruction(ORA, AbsoluteX),
0x19: create_instruction(ORA, AbsoluteY),
0x01: create_instruction(ORA, IndirectX),
0x11: create_instruction(ORA, IndirectY),
# PHA instructions
0x48: create_instruction(PHA, Implied),
# PHP instructions
0x08: create_instruction(PHP, Implied),
# PLA instructions
0x68: create_instruction(PLA, Implied),
# PLP instructions
0x28: create_instruction(PLP, Implied),
# ROL instructions
0x2A: create_instruction(ROL, Accumulator),
0x26: create_instruction(ROL, ZeroPage),
0x36: create_instruction(ROL, ZeroPageX),
0x2E: create_instruction(ROL, Absolute),
0x3E: create_instruction(ROL, AbsoluteX),
# ROR instructions
0x6A: create_instruction(ROR, Accumulator),
0x66: create_instruction(ROR, ZeroPage),
0x76: create_instruction(ROR, ZeroPageX),
0x6E: create_instruction(ROR, Absolute),
0x7E: create_instruction(ROR, AbsoluteX),
# RTI instructions
0x4D: create_instruction(RTI, Implied),
# RTS instructions
0x60: create_instruction(RTS, Implied),
# SBC instructions
0xE9: create_instruction(SBC, Immediate),
0xE5: create_instruction(SBC, ZeroPage),
0xF5: create_instruction(SBC, ZeroPageX),
0xED: create_instruction(SBC, Absolute),
0xFD: create_instruction(SBC, AbsoluteX),
0xF9: create_instruction(SBC, AbsoluteY),
0xE1: create_instruction(SBC, IndirectX),
0xF1: create_instruction(SBC, IndirectY),
# SEC instructions
0x38: create_instruction(SEC, Implied),
# SED instructions
0xF8: create_instruction(SED, Implied),
# SEI instructions
0x78: create_instruction(SEI, Implied),
# STA instructions
0x85: create_instruction(STA, ZeroPage),
0x95: create_instruction(STA, ZeroPageX),
0x80: create_instruction(STA, Absolute),
0x90: create_instruction(STA, AbsoluteX),
0x99: create_instruction(STA, AbsoluteY),
0x81: create_instruction(STA, IndirectX),
0x91: create_instruction(STA, IndirectY),
# STX instructions
0x86: create_instruction(STX, ZeroPage),
0x96: create_instruction(STX, ZeroPageY),
0x8E: create_instruction(STX, Absolute),
# STY instructions
0x84: create_instruction(STY, ZeroPage),
0x94: create_instruction(STY, ZeroPageX),
0x8C: create_instruction(STY, Absolute),
# TAX instructions
0xAA: create_instruction(TAX, Implied),
# TAY instructions
0xA8: create_instruction(TAY, Implied),
# TSX instructions
0xBA: create_instruction(TSX, Implied),
# TXA instructions
0x8A: create_instruction(TXA, Implied),
# TXS instructions
0x9A: create_instruction(TXS, Implied),
# TYA instructions
0x98: create_instruction(TYA, Implied),
}
|
|
'''
Cobra Distributed Event subsystem.
'''
import json
import time
import socket
import struct
import logging
import itertools
import threading
import collections
import envi.threads as e_threads
logger = logging.getLogger(__name__)
class CobraEventCore:
def __init__(self):
self._ce_chanids = itertools.count()
self._ce_fireq = e_threads.ChunkQueue()
self._ce_chans = []
self._ce_handlers = collections.defaultdict(list)
self._ce_upstreams = []
self._ce_mcastport = None
self._ce_mcasthost = None
self._ce_ecastsock = None
self._ce_chanlookup = {}
self._ce_firethr = self._fireFireThread()
def finiEventCore(self):
self._ce_fireq.shutdown()
self._ce_firethr.join()
@e_threads.firethread
def _fireFireThread(self):
for args, kwargs in self._ce_fireq:
try:
self._fireEvent(*args, **kwargs)
except Exception as e:
logger.warning('fireFireThread _fireEventError: %s', e)
@e_threads.maintthread(3)
def cullAbandonedChannels(self, abtime):
[self.finiEventChannel(q.chanid) for q in self._ce_chans if q.abandoned(abtime)]
def initEventChannel(self, qmax=0):
'''
Create a new channel id and allocate an
event Queue.
'''
chanid = next(self._ce_chanids)
q = e_threads.ChunkQueue()
q.chanid = chanid # monkey patch the chanid to the q
self._ce_chans.append(q)
self._ce_chanlookup[chanid] = q
return chanid
def finiEventChannel(self, chanid):
'''
Close the specified event channel by adding a
(None,None) event and removing the channel's
Queue object.
'''
q = self._ce_chanlookup.pop(chanid)
q.put((None, None))
self._ce_chans.remove(q)
def finiEventChannels(self):
'''
Close down all event channels by adding a (None,None)
event and removing the event Q from the datastructs.
'''
for upstream, upchan in self._ce_upstreams:
try:
upstream.finiEventChannel(upchan)
except Exception as e:
logger.warning('upstream error: %s %s', str(upstream), str(e))
[self.finiEventChannel(chanid) for chanid in self._ce_chanlookup.keys()]
def getNextEventsForChan(self, chanid, timeout=None):
'''
Get the next event for a previously initialized
event channel. If "timeout" is specified, the
call will return None after the timeout interval.
Each returned event is a tuple of ( eventname, eventinfo ).
When the channel returns (None, None) it has closed.
'''
q = self._ce_chanlookup.get(chanid)
if q is None:
return None
return q.get(timeout=timeout)
def fireEvent(self, *args, **kwargs):
'''
Fire an event into the event distribution system.
( see _fireEvent for arg defs, we proxy all fire events through 1 thread )
NOTE: an event coming down from an upstream will *not*
be propigated upward to *any* upstreams.
'''
self._ce_fireq.put((args, kwargs))
def _fireEvent(self, event, einfo, upstream=True, skip=None, chans=None):
etup = (event, einfo)
# Speed hack
if chans is not None:
[q.put(etup) for q in self._ce_chans if q.chanid in chans]
else:
[q.put(etup) for q in self._ce_chans if q.chanid != skip]
if self._ce_ecastsock:
self._ce_ecastsock.sendto(json.dumps(etup), (self._ce_mcasthost, self._ce_mcastport))
for handler in self._ce_handlers[event]:
try:
handler(event, einfo)
except Exception as e:
logger.warning('handler error(%s): %s', str(event), str(e))
if upstream:
for upstream, upchan in self._ce_upstreams:
try:
upstream.fireEvent(event, einfo, skip=upchan)
except Exception as e:
logger.warning('upstream error: %s %s', str(upstream), str(e))
def addEventHandler(self, event, callback):
'''
Add a local handler which will be called on fireEvent() for
the specified event type. The callback uses the following
convention:
callback(event, einfo)
'''
self._ce_handlers[event].append(callback)
def delEventHandler(self, event, callback):
'''
Remove a previously added event handler for the given event
type.
'''
self._ce_handlers[event].remove(callback)
@e_threads.firethread
def addEventUpstream(self, evtcore, qmax=0, timeout=5):
'''
Add another eventcore obejct as an "upstream" eventer to this
one. We will propigate local events upward to him, as well as
recieve all his events ( minus our own ).
'''
chan = evtcore.initEventChannel(qmax=qmax)
corechan = [evtcore, chan]
self._ce_upstreams.append(corechan)
while True:
try:
events = evtcore.getNextEventsForChan(chan, timeout=5)
except Exception as e:
logger.warning('addEventUpstream Error: %s', e)
time.sleep(1)
# grab a new channel...
chan = evtcore.initEventChannel(qmax=qmax)
corechan[1] = chan
continue
# channel closed..
if events is None:
return
try:
[self.fireEvent(event, einfo, upstream=False) for (event, einfo) in events]
except Exception as e:
logger.warning('addEventUpstream fireEvent Error: %s', e)
time.sleep(1)
def addEventCallback(self, callback, qmax=0, firethread=True):
'''
Create a new event channel and fire a thread which
listens for events and hands them off to the function
"callback"
def mycallback(event, einfo):
dostuff()
evt = CobraEventCore()
evt.addEventCallback( mycallback )
NOTE: This API is *not* cobra proxy call safe.
'''
if firethread:
thr = threading.Thread(target=self.addEventCallback, args=(callback, qmax, False))
thr.setDaemon(True)
thr.start()
return
chanid = self.initEventChannel(qmax=qmax)
q = self._ce_chanlookup.get(chanid)
while True:
for event, einfo in q.get(timeout=5):
if event is None:
break
try:
callback(event, einfo)
except Exception as e:
logger.warning('Event Callback Exception (chan: %d): %s', chanid, e)
def setEventCast(self, mcast='224.56.56.56', port=45654, bind='0.0.0.0'):
'''
Tie this CobraEventCore to any others which share the same multicast
ip and port. This basically creates a ( udp "unreliable" ) "bus" on
which events are serialized using json.
'''
# Setup a UDP casting socket
self._ce_mcastport = port
self._ce_mcasthost = mcast
self._ce_ecastsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._ce_ecastsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._ce_ecastsock.bind((bind, port))
# Join the multicast IP
mreq = struct.pack("4sL", socket.inet_aton(mcast), socket.INADDR_ANY)
self._ce_ecastsock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
thr = threading.Thread(target=self._runSocketListener)
thr.setDaemon(True)
thr.start()
def _runSocketListener(self):
sock = self._ce_ecastsock
while True:
sockdata, sockaddr = sock.recvfrom(4096)
etup = json.loads(sockdata)
[q.put(etup) for q in self._ce_chans]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.