gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""
Parser for PHD files output by PHRED and used by PHRAP and
CONSED.
Works fine with PHRED 0.020425.c
Version 1.1, 03/09/2004
written by Cymon J. Cox ([email protected]) and Frank Kauff ([email protected])
Comments, bugs, problems, suggestions to one uf us are welcome!
Uses the Biopython Parser interface for parsing: ParserSupport.py
"""
import os
from types import *
from Bio import File
from Bio import Index
from Bio import Seq
from Bio import SeqRecord
from Bio.ParserSupport import *
from Bio.Alphabet import IUPAC
CKEYWORDS=['CHROMAT_FILE','ABI_THUMBPRINT','PHRED_VERSION','CALL_METHOD',\
'QUALITY_LEVELS','TIME','TRACE_ARRAY_MIN_INDEX','TRACE_ARRAY_MAX_INDEX',\
'TRIM','TRACE_PEAK_AREA_RATIO','CHEM','DYE']
class Record:
"""Hold information from a PHD file
"""
def __init__(self):
self.file_name = ''
self.comments={}
for kw in CKEYWORDS:
self.comments[kw.lower()]=None
self.sites = []
self.seq = ''
self.seq_trimmed = ''
class Iterator:
"""Iterates over a file of multiple PHD records
Methods:
next Return the next record from the stream, or None.
"""
def __init__(self, handle, parser=None):
"""__init__(self, handle, parser=None)
Create a new iterator. handle is a file-like object. parser
is an optional Parser object to change the results into another form.
If set to None, then the raw contents of the file will be returned.
"""
if type(handle) is not FileType and type(handle) is not InstanceType:
raise ValueError, "I expected a file handle or file-like object"
self._uhandle = File.UndoHandle(handle)
self._parser = parser
def next(self):
"""next(self) -> object
Return the next PHD record from the file. If no more records
return None.
"""
lines = []
while 1:
line = self._uhandle.readline()
if not line:
break
# If a new record, then put the line back and stop.
if lines and line[:14] == 'BEGIN_SEQUENCE':
self._uhandle.saveline(line)
break
lines.append(line)
if not lines:
return None
data = ''.join(lines)
if self._parser is not None:
return self._parser.parse(File.StringHandle(data))
return data
def __iter__(self):
return iter(self.next, None)
class RecordParser(AbstractParser):
"""Parses PHD file data into a Record object
"""
def __init__(self):
self._scanner = _Scanner()
self._consumer = _RecordConsumer()
def parse(self, handle):
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
self._scanner.feed(uhandle, self._consumer)
return self._consumer.data
class _Scanner:
"""Scans a PHD-formatted file
Methods:
feed - Feed one PHD record.
"""
def feed(self, handle, consumer):
"""feed(self, handle, consumer)
Feed in PHD data for scanning. handle is a file-like object
containing PHD data. consumer is a Consumer object that will
receive events as the PHD data is scanned.
"""
assert isinstance(handle, File.UndoHandle), \
"handle must be an UndoHandle"
if handle.peekline():
self._scan_record(handle, consumer)
def _scan_record(self, uhandle, consumer):
self._scan_begin_sequence(uhandle, consumer)
self._scan_comments(uhandle, consumer)
self._scan_dna(uhandle, consumer)
consumer.end_sequence()
def _scan_begin_sequence(self, uhandle, consumer):
read_and_call(uhandle, consumer.begin_sequence, start = 'BEGIN_SEQUENCE')
def _scan_comments(self, uhandle, consumer):
read_and_call_while(uhandle, consumer.noevent, blank=1)
read_and_call(uhandle, consumer.noevent, start = 'BEGIN_COMMENT')
read_and_call_while(uhandle, consumer.noevent, blank=1)
while 1:
for kw in CKEYWORDS:
if attempt_read_and_call(uhandle,getattr(consumer,kw.lower()),start=kw+':'):
break # recognized keyword: end for loop and do another while
else:
break # no keywords found: end while loop
read_and_call_while(uhandle, consumer.noevent, blank=1)
read_and_call(uhandle, consumer.noevent, start = 'END_COMMENT')
def _scan_dna(self, uhandle, consumer):
while 1:
line = uhandle.readline()
if is_blank_line(line) or line == 'BEGIN_DNA\n':
continue
elif line == 'END_DNA\n':
break
consumer.read_dna(line)
class _RecordConsumer(AbstractConsumer):
"""Consumer that converts a PHD record to a Record object
"""
def __init__(self):
self.data = None
def begin_sequence(self, line):
self.data = Record()
self.data.file_name = line[15:].rstrip()
def end_sequence(self):
self.data.seq = Seq.Seq(''.join([n[0] for n in self.data.sites]), IUPAC.IUPACAmbiguousDNA())
first = self.data.comments['trim'][0]
last = self.data.comments['trim'][1]
self.data.seq_trimmed = Seq.Seq(self.data.seq.tostring()[first : last], IUPAC.IUPACAmbiguousDNA())
def chromat_file(self, line):
self.data.comments['chromat_file'] = line[13:-1].strip()
def abi_thumbprint(self, line):
self.data.comments['abi_thumbprint'] = int(line[15:-1].strip())
def phred_version(self, line):
self.data.comments['phred_version'] = line[14:-1].strip()
def call_method(self, line):
self.data.comments['call_method'] = line[12:-1].strip()
def quality_levels(self, line):
self.data.comments['quality_levels'] = int(line[15:-1].strip())
def time(self, line):
self.data.comments['time'] = line[5:-1].strip()
def trace_array_min_index(self, line):
self.data.comments['trace_array_min_index'] = int(line[22:-1].strip())
def trace_array_max_index(self, line):
self.data.comments['trace_array_max_index'] = int(line[22:-1].strip())
def trim(self, line):
first, last, prob = line[5:-1].split()
self.data.comments['trim'] = (int(first), int(last), float(prob))
def trace_peak_area_ratio(self, line):
self.data.comments['trace_peak_area_ratio'] = float(line[22:-1].strip())
def chem(self, line):
self.data.comments['chem'] = line[5:-1].strip()
def dye(self, line):
self.data.comments['dye'] = line[4:-1].strip()
def read_dna(self, line):
base, quality, location = line.split()
self.data.sites.append((base, quality, location))
|
|
import datetime
import functools
from .base import EventBuilder
from .._misc import utils
from .. import _tl
from ..types import _custom
# TODO Either the properties are poorly named or they should be
# different events, but that would be a breaking change.
#
# TODO There are more "user updates", but bundling them all up
# in a single place will make it annoying to use (since
# the user needs to check for the existence of `None`).
#
# TODO Handle UpdateUserBlocked, UpdateUserName, UpdateUserPhone, UpdateUserPhoto
def _requires_action(function):
@functools.wraps(function)
def wrapped(self):
return None if self.action is None else function(self)
return wrapped
def _requires_status(function):
@functools.wraps(function)
def wrapped(self):
return None if self.status is None else function(self)
return wrapped
class UserUpdate(EventBuilder, _custom.chatgetter.ChatGetter, _custom.sendergetter.SenderGetter):
"""
Occurs whenever a user goes online, starts typing, etc.
Members:
status (:tl:`UserStatus`, optional):
The user status if the update is about going online or offline.
You should check this attribute first before checking any
of the seen within properties, since they will all be `None`
if the status is not set.
action (:tl:`SendMessageAction`, optional):
The "typing" action if any the user is performing if any.
You should check this attribute first before checking any
of the typing properties, since they will all be `None`
if the action is not set.
Example
.. code-block:: python
from telethon import events
@client.on(events.UserUpdate)
async def handler(event):
# If someone is uploading, say something
if event.uploading:
await client.send_message(event.user_id, 'What are you sending?')
"""
@classmethod
def _build(cls, client, update, entities):
chat_peer = None
status = None
if isinstance(update, _tl.UpdateUserStatus):
peer = _tl.PeerUser(update.user_id)
status = update.status
typing = None
elif isinstance(update, _tl.UpdateChannelUserTyping):
peer = update.from_id
chat_peer = _tl.PeerChannel(update.channel_id)
typing = update.action
elif isinstance(update, _tl.UpdateChatUserTyping):
peer = update.from_id
chat_peer = _tl.PeerChat(update.chat_id)
typing = update.action
elif isinstance(update, _tl.UpdateUserTyping):
peer = update.user_id
typing = update.action
else:
return None
self = cls.__new__(cls)
self._client = client
self._sender = entities.get(peer)
self._chat = entities.get(chat_peer or peer)
self.status = status
self.action = typing
return self
@property
def user(self):
"""Alias for `sender <telethon.tl.custom.sendergetter.SenderGetter.sender>`."""
return self.sender
async def get_user(self):
"""Alias for `get_sender <telethon.tl.custom.sendergetter.SenderGetter.get_sender>`."""
return await self.get_sender()
@property
def input_user(self):
"""Alias for `input_sender <telethon.tl.custom.sendergetter.SenderGetter.input_sender>`."""
return self.input_sender
@property
def user_id(self):
"""Alias for `sender_id <telethon.tl.custom.sendergetter.SenderGetter.sender_id>`."""
return self.sender_id
@property
@_requires_action
def typing(self):
"""
`True` if the action is typing a message.
"""
return isinstance(self.action, _tl.SendMessageTypingAction)
@property
@_requires_action
def uploading(self):
"""
`True` if the action is uploading something.
"""
return isinstance(self.action, (
_tl.SendMessageChooseContactAction,
_tl.SendMessageChooseStickerAction,
_tl.SendMessageUploadAudioAction,
_tl.SendMessageUploadDocumentAction,
_tl.SendMessageUploadPhotoAction,
_tl.SendMessageUploadRoundAction,
_tl.SendMessageUploadVideoAction
))
@property
@_requires_action
def recording(self):
"""
`True` if the action is recording something.
"""
return isinstance(self.action, (
_tl.SendMessageRecordAudioAction,
_tl.SendMessageRecordRoundAction,
_tl.SendMessageRecordVideoAction
))
@property
@_requires_action
def playing(self):
"""
`True` if the action is playing a game.
"""
return isinstance(self.action, _tl.SendMessageGamePlayAction)
@property
@_requires_action
def cancel(self):
"""
`True` if the action was cancelling other actions.
"""
return isinstance(self.action, _tl.SendMessageCancelAction)
@property
@_requires_action
def geo(self):
"""
`True` if what's being uploaded is a geo.
"""
return isinstance(self.action, _tl.SendMessageGeoLocationAction)
@property
@_requires_action
def audio(self):
"""
`True` if what's being recorded/uploaded is an audio.
"""
return isinstance(self.action, (
_tl.SendMessageRecordAudioAction,
_tl.SendMessageUploadAudioAction
))
@property
@_requires_action
def round(self):
"""
`True` if what's being recorded/uploaded is a round video.
"""
return isinstance(self.action, (
_tl.SendMessageRecordRoundAction,
_tl.SendMessageUploadRoundAction
))
@property
@_requires_action
def video(self):
"""
`True` if what's being recorded/uploaded is an video.
"""
return isinstance(self.action, (
_tl.SendMessageRecordVideoAction,
_tl.SendMessageUploadVideoAction
))
@property
@_requires_action
def contact(self):
"""
`True` if what's being uploaded (selected) is a contact.
"""
return isinstance(self.action, _tl.SendMessageChooseContactAction)
@property
@_requires_action
def document(self):
"""
`True` if what's being uploaded is document.
"""
return isinstance(self.action, _tl.SendMessageUploadDocumentAction)
@property
@_requires_action
def sticker(self):
"""
`True` if what's being uploaded is a sticker.
"""
return isinstance(self.action, _tl.SendMessageChooseStickerAction)
@property
@_requires_action
def photo(self):
"""
`True` if what's being uploaded is a photo.
"""
return isinstance(self.action, _tl.SendMessageUploadPhotoAction)
@property
@_requires_action
def last_seen(self):
"""
Exact `datetime.datetime` when the user was last seen if known.
"""
if isinstance(self.status, _tl.UserStatusOffline):
return self.status.was_online
@property
@_requires_status
def until(self):
"""
The `datetime.datetime` until when the user should appear online.
"""
if isinstance(self.status, _tl.UserStatusOnline):
return self.status.expires
def _last_seen_delta(self):
if isinstance(self.status, _tl.UserStatusOffline):
return datetime.datetime.now(tz=datetime.timezone.utc) - self.status.was_online
elif isinstance(self.status, _tl.UserStatusOnline):
return datetime.timedelta(days=0)
elif isinstance(self.status, _tl.UserStatusRecently):
return datetime.timedelta(days=1)
elif isinstance(self.status, _tl.UserStatusLastWeek):
return datetime.timedelta(days=7)
elif isinstance(self.status, _tl.UserStatusLastMonth):
return datetime.timedelta(days=30)
else:
return datetime.timedelta(days=365)
@property
@_requires_status
def online(self):
"""
`True` if the user is currently online,
"""
return self._last_seen_delta() <= datetime.timedelta(days=0)
@property
@_requires_status
def recently(self):
"""
`True` if the user was seen within a day.
"""
return self._last_seen_delta() <= datetime.timedelta(days=1)
@property
@_requires_status
def within_weeks(self):
"""
`True` if the user was seen within 7 days.
"""
return self._last_seen_delta() <= datetime.timedelta(days=7)
@property
@_requires_status
def within_months(self):
"""
`True` if the user was seen within 30 days.
"""
return self._last_seen_delta() <= datetime.timedelta(days=30)
|
|
# -*- coding: utf-8 -*-
'''
Management of firewalld
.. versionadded:: 2015.8.0
The following example applies changes to the public zone, blocks echo-reply
and echo-request packets, does not set the zone to be the default, enables
masquerading, and allows ports 22/tcp and 25/tcp.
.. code-block:: yaml
public:
firewalld.present:
- name: public
- block_icmp:
- echo-reply
- echo-request
- default: False
- masquerade: True
- ports:
- 22/tcp
- 25/tcp
The following example applies changes to the public zone, enables
masquerading and configures port forwarding TCP traffic from port 22
to 2222, and forwards TCP traffic from port 80 to 443 at 192.168.0.1.
.. code-block:: yaml
my_zone:
firewalld.present:
- name: public
- masquerade: True
- port_fwd:
- 22:2222:tcp
- 80:443:tcp:192.168.0.1
'''
# Import Python Libs
from __future__ import absolute_import
import logging
# Import Salt Libs
from salt.exceptions import CommandExecutionError
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
Ensure the firewall-cmd is available
'''
if salt.utils.which('firewall-cmd'):
return True
return False
def present(name,
block_icmp=None,
default=None,
masquerade=False,
ports=None,
port_fwd=None,
services=None):
'''
Ensure a zone has specific attributes.
'''
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
try:
zones = __salt__['firewalld.get_zones']()
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if name not in zones:
if not __opts__['test']:
try:
__salt__['firewalld.new_zone'](name)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
ret['changes'].update({name:
{'old': zones,
'new': name}})
if block_icmp:
new_icmp_types = []
try:
_valid_icmp_types = __salt__['firewalld.get_icmp_types']()
_current_icmp_blocks = __salt__['firewalld.list_icmp_block'](name)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
for icmp_type in set(block_icmp):
if icmp_type in _valid_icmp_types:
if icmp_type not in _current_icmp_blocks:
new_icmp_types.append(icmp_type)
if not __opts__['test']:
try:
__salt__['firewalld.block_icmp'](name, icmp_type)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
else:
log.error('{0} is an invalid ICMP type'.format(icmp_type))
if new_icmp_types:
ret['changes'].update({'icmp_blocks':
{'old': _current_icmp_blocks,
'new': new_icmp_types}})
if default:
try:
default_zone = __salt__['firewalld.default_zone']()
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if name != default_zone:
if not __opts__['test']:
try:
__salt__['firewalld.set_default_zone'](name)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
ret['changes'].update({'default':
{'old': default_zone,
'new': name}})
if masquerade:
try:
masquerade_ret = __salt__['firewalld.get_masquerade'](name)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if not masquerade_ret:
if not __opts__['test']:
try:
__salt__['firewalld.add_masquerade'](name)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
ret['changes'].update({'masquerade':
{'old': '',
'new': 'Masquerading successfully set.'}})
if ports:
new_ports = []
try:
_current_ports = __salt__['firewalld.list_ports'](name)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
for port in ports:
if port not in _current_ports:
new_ports.append(port)
if not __opts__['test']:
try:
__salt__['firewalld.add_port'](name, port)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if new_ports:
ret['changes'].update({'ports':
{'old': _current_ports,
'new': new_ports}})
if port_fwd:
new_port_fwds = []
try:
_current_port_fwd = __salt__['firewalld.list_port_fwd'](name)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
for port in port_fwd:
dstaddr = ''
rule_exists = False
if len(port.split(':')) > 3:
(src, dest, protocol, dstaddr) = port.split(':')
else:
(src, dest, protocol) = port.split(':')
for item in _current_port_fwd:
if (src == item['Source port'] and dest == item['Destination port'] and
protocol == item['Protocol'] and dstaddr == item['Destination address']):
rule_exists = True
if rule_exists is False:
new_port_fwds.append(port)
if not __opts__['test']:
try:
__salt__['firewalld.add_port_fwd'](name, src, dest, protocol, dstaddr)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if new_port_fwds:
ret['changes'].update({'port_fwd':
{'old': _current_port_fwd,
'new': new_port_fwds}})
if services:
new_services = []
try:
_current_services = __salt__['firewalld.list_services'](name)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
for service in services:
if service not in _current_services:
new_services.append(service)
if not __opts__['test']:
try:
__salt__['firewalld.add_service'](service, zone=name)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if new_services:
ret['changes'].update({'services':
{'old': _current_services,
'new': new_services}})
ret['result'] = True
if ret['changes'] == {}:
ret['comment'] = '\'{0}\' is already in the desired state.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Configuration for \'{0}\' will change.'.format(name)
return ret
ret['comment'] = '\'{0}\' was configured.'.format(name)
return ret
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import netaddr
from oslo_log import log
import six
from tempest_lib.common.utils import misc as misc_utils
from tempest_lib import exceptions as lib_exc
from tempest.common import fixed_network
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
from tempest import exceptions
from tempest.services.network import resources as net_resources
import tempest.test
CONF = config.CONF
LOG = log.getLogger(__name__)
class ScenarioTest(tempest.test.BaseTestCase):
"""Base class for scenario tests. Uses tempest own clients. """
credentials = ['primary']
@classmethod
def setup_clients(cls):
super(ScenarioTest, cls).setup_clients()
# Clients (in alphabetical order)
cls.flavors_client = cls.manager.flavors_client
cls.floating_ips_client = cls.manager.floating_ips_client
# Glance image client v1
cls.image_client = cls.manager.image_client
# Compute image client
cls.images_client = cls.manager.images_client
cls.keypairs_client = cls.manager.keypairs_client
# Nova security groups client
cls.security_groups_client = cls.manager.security_groups_client
cls.servers_client = cls.manager.servers_client
cls.volumes_client = cls.manager.volumes_client
cls.snapshots_client = cls.manager.snapshots_client
cls.interface_client = cls.manager.interfaces_client
# Neutron network client
cls.network_client = cls.manager.network_client
# Heat client
cls.orchestration_client = cls.manager.orchestration_client
# ## Methods to handle sync and async deletes
def setUp(self):
super(ScenarioTest, self).setUp()
self.cleanup_waits = []
# NOTE(mtreinish) This is safe to do in setUp instead of setUp class
# because scenario tests in the same test class should not share
# resources. If resources were shared between test cases then it
# should be a single scenario test instead of multiples.
# NOTE(yfried): this list is cleaned at the end of test_methods and
# not at the end of the class
self.addCleanup(self._wait_for_cleanups)
def delete_wrapper(self, delete_thing, *args, **kwargs):
"""Ignores NotFound exceptions for delete operations.
@param delete_thing: delete method of a resource. method will be
executed as delete_thing(*args, **kwargs)
"""
try:
# Tempest clients return dicts, so there is no common delete
# method available. Using a callable instead
delete_thing(*args, **kwargs)
except lib_exc.NotFound:
# If the resource is already missing, mission accomplished.
pass
def addCleanup_with_wait(self, waiter_callable, thing_id, thing_id_param,
cleanup_callable, cleanup_args=None,
cleanup_kwargs=None, ignore_error=True):
"""Adds wait for async resource deletion at the end of cleanups
@param waiter_callable: callable to wait for the resource to delete
@param thing_id: the id of the resource to be cleaned-up
@param thing_id_param: the name of the id param in the waiter
@param cleanup_callable: method to load pass to self.addCleanup with
the following *cleanup_args, **cleanup_kwargs.
usually a delete method.
"""
if cleanup_args is None:
cleanup_args = []
if cleanup_kwargs is None:
cleanup_kwargs = {}
self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
wait_dict = {
'waiter_callable': waiter_callable,
thing_id_param: thing_id
}
self.cleanup_waits.append(wait_dict)
def _wait_for_cleanups(self):
"""To handle async delete actions, a list of waits is added
which will be iterated over as the last step of clearing the
cleanup queue. That way all the delete calls are made up front
and the tests won't succeed unless the deletes are eventually
successful. This is the same basic approach used in the api tests to
limit cleanup execution time except here it is multi-resource,
because of the nature of the scenario tests.
"""
for wait in self.cleanup_waits:
waiter_callable = wait.pop('waiter_callable')
waiter_callable(**wait)
# ## Test functions library
#
# The create_[resource] functions only return body and discard the
# resp part which is not used in scenario tests
def create_keypair(self, client=None):
if not client:
client = self.keypairs_client
name = data_utils.rand_name(self.__class__.__name__)
# We don't need to create a keypair by pubkey in scenario
body = client.create_keypair(name)
self.addCleanup(client.delete_keypair, name)
return body
def create_server(self, name=None, image=None, flavor=None,
wait_on_boot=True, wait_on_delete=True,
create_kwargs=None):
"""Creates VM instance.
@param image: image from which to create the instance
@param wait_on_boot: wait for status ACTIVE before continue
@param wait_on_delete: force synchronous delete on cleanup
@param create_kwargs: additional details for instance creation
@return: server dict
"""
if name is None:
name = data_utils.rand_name(self.__class__.__name__)
if image is None:
image = CONF.compute.image_ref
if flavor is None:
flavor = CONF.compute.flavor_ref
if create_kwargs is None:
create_kwargs = {}
network = self.get_tenant_network()
create_kwargs = fixed_network.set_networks_kwarg(network,
create_kwargs)
LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
name, image, flavor)
server = self.servers_client.create_server(name, image, flavor,
**create_kwargs)
if wait_on_delete:
self.addCleanup(self.servers_client.wait_for_server_termination,
server['id'])
self.addCleanup_with_wait(
waiter_callable=self.servers_client.wait_for_server_termination,
thing_id=server['id'], thing_id_param='server_id',
cleanup_callable=self.delete_wrapper,
cleanup_args=[self.servers_client.delete_server, server['id']])
if wait_on_boot:
waiters.wait_for_server_status(self.servers_client,
server_id=server['id'],
status='ACTIVE')
# The instance retrieved on creation is missing network
# details, necessitating retrieval after it becomes active to
# ensure correct details.
server = self.servers_client.show_server(server['id'])
self.assertEqual(server['name'], name)
return server
def create_volume(self, size=None, name=None, snapshot_id=None,
imageRef=None, volume_type=None, wait_on_delete=True):
if name is None:
name = data_utils.rand_name(self.__class__.__name__)
volume = self.volumes_client.create_volume(
size=size, display_name=name, snapshot_id=snapshot_id,
imageRef=imageRef, volume_type=volume_type)
if wait_on_delete:
self.addCleanup(self.volumes_client.wait_for_resource_deletion,
volume['id'])
self.addCleanup(self.delete_wrapper,
self.volumes_client.delete_volume, volume['id'])
else:
self.addCleanup_with_wait(
waiter_callable=self.volumes_client.wait_for_resource_deletion,
thing_id=volume['id'], thing_id_param='id',
cleanup_callable=self.delete_wrapper,
cleanup_args=[self.volumes_client.delete_volume, volume['id']])
self.assertEqual(name, volume['display_name'])
self.volumes_client.wait_for_volume_status(volume['id'], 'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume = self.volumes_client.show_volume(volume['id'])
return volume
def _create_loginable_secgroup_rule(self, secgroup_id=None):
_client = self.security_groups_client
if secgroup_id is None:
sgs = _client.list_security_groups()
for sg in sgs:
if sg['name'] == 'default':
secgroup_id = sg['id']
# These rules are intended to permit inbound ssh and icmp
# traffic from all sources, so no group_id is provided.
# Setting a group_id would only permit traffic from ports
# belonging to the same security group.
rulesets = [
{
# ssh
'ip_proto': 'tcp',
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
},
{
# ping
'ip_proto': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
}
]
rules = list()
for ruleset in rulesets:
sg_rule = _client.create_security_group_rule(secgroup_id,
**ruleset)
self.addCleanup(self.delete_wrapper,
_client.delete_security_group_rule,
sg_rule['id'])
rules.append(sg_rule)
return rules
def _create_security_group(self):
# Create security group
sg_name = data_utils.rand_name(self.__class__.__name__)
sg_desc = sg_name + " description"
secgroup = self.security_groups_client.create_security_group(
sg_name, sg_desc)
self.assertEqual(secgroup['name'], sg_name)
self.assertEqual(secgroup['description'], sg_desc)
self.addCleanup(self.delete_wrapper,
self.security_groups_client.delete_security_group,
secgroup['id'])
# Add rules to the security group
self._create_loginable_secgroup_rule(secgroup['id'])
return secgroup
def get_remote_client(self, server_or_ip, username=None, private_key=None,
log_console_of_servers=None):
"""Get a SSH client to a remote server
@param server_or_ip a server object as returned by Tempest compute
client or an IP address to connect to
@param username name of the Linux account on the remote server
@param private_key the SSH private key to use
@param log_console_of_servers a list of server objects. Each server
in the list will have its console printed in the logs in case the
SSH connection failed to be established
@return a RemoteClient object
"""
if isinstance(server_or_ip, six.string_types):
ip = server_or_ip
else:
addrs = server_or_ip['addresses'][CONF.compute.network_for_ssh]
try:
ip = (addr['addr'] for addr in addrs if
netaddr.valid_ipv4(addr['addr'])).next()
except StopIteration:
raise lib_exc.NotFound("No IPv4 addresses to use for SSH to "
"remote server.")
if username is None:
username = CONF.scenario.ssh_user
# Set this with 'keypair' or others to log in with keypair or
# username/password.
if CONF.compute.ssh_auth_method == 'keypair':
password = None
if private_key is None:
private_key = self.keypair['private_key']
else:
password = CONF.compute.image_ssh_password
private_key = None
linux_client = remote_client.RemoteClient(ip, username,
pkey=private_key,
password=password)
try:
linux_client.validate_authentication()
except Exception as e:
message = ('Initializing SSH connection to %(ip)s failed. '
'Error: %(error)s' % {'ip': ip, 'error': e})
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
LOG.exception(message)
# If we don't explicitly set for which servers we want to
# log the console output then all the servers will be logged.
# See the definition of _log_console_output()
self._log_console_output(log_console_of_servers)
raise
return linux_client
def _image_create(self, name, fmt, path,
disk_format=None, properties=None):
if properties is None:
properties = {}
name = data_utils.rand_name('%s-' % name)
image_file = open(path, 'rb')
self.addCleanup(image_file.close)
params = {
'name': name,
'container_format': fmt,
'disk_format': disk_format or fmt,
'is_public': 'False',
}
params['properties'] = properties
image = self.image_client.create_image(**params)
self.addCleanup(self.image_client.delete_image, image['id'])
self.assertEqual("queued", image['status'])
self.image_client.update_image(image['id'], data=image_file)
return image['id']
def glance_image_create(self):
img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
img_container_format = CONF.scenario.img_container_format
img_disk_format = CONF.scenario.img_disk_format
img_properties = CONF.scenario.img_properties
LOG.debug("paths: img: %s, container_fomat: %s, disk_format: %s, "
"properties: %s, ami: %s, ari: %s, aki: %s" %
(img_path, img_container_format, img_disk_format,
img_properties, ami_img_path, ari_img_path, aki_img_path))
try:
self.image = self._image_create('scenario-img',
img_container_format,
img_path,
disk_format=img_disk_format,
properties=img_properties)
except IOError:
LOG.debug("A qcow2 image was not found. Try to get a uec image.")
kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
properties = {'kernel_id': kernel, 'ramdisk_id': ramdisk}
self.image = self._image_create('scenario-ami', 'ami',
path=ami_img_path,
properties=properties)
LOG.debug("image:%s" % self.image)
def _log_console_output(self, servers=None):
if not CONF.compute_feature_enabled.console_output:
LOG.debug('Console output not supported, cannot log')
return
if not servers:
servers = self.servers_client.list_servers()
servers = servers['servers']
for server in servers:
console_output = self.servers_client.get_console_output(
server['id'], length=None).data
LOG.debug('Console output for %s\nbody=\n%s',
server['id'], console_output)
def _log_net_info(self, exc):
# network debug is called as part of ssh init
if not isinstance(exc, lib_exc.SSHTimeout):
LOG.debug('Network information on a devstack host')
def create_server_snapshot(self, server, name=None):
# Glance client
_image_client = self.image_client
# Compute client
_images_client = self.images_client
if name is None:
name = data_utils.rand_name('scenario-snapshot')
LOG.debug("Creating a snapshot image for server: %s", server['name'])
image = _images_client.create_image(server['id'], name)
image_id = image.response['location'].split('images/')[1]
_image_client.wait_for_image_status(image_id, 'active')
self.addCleanup_with_wait(
waiter_callable=_image_client.wait_for_resource_deletion,
thing_id=image_id, thing_id_param='id',
cleanup_callable=self.delete_wrapper,
cleanup_args=[_image_client.delete_image, image_id])
snapshot_image = _image_client.get_image_meta(image_id)
image_name = snapshot_image['name']
self.assertEqual(name, image_name)
LOG.debug("Created snapshot image %s for server %s",
image_name, server['name'])
return snapshot_image
def nova_volume_attach(self):
volume = self.servers_client.attach_volume(
self.server['id'], self.volume['id'], '/dev/%s'
% CONF.compute.volume_device_name)
self.assertEqual(self.volume['id'], volume['id'])
self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
# Refresh the volume after the attachment
self.volume = self.volumes_client.show_volume(volume['id'])
def nova_volume_detach(self):
self.servers_client.detach_volume(self.server['id'], self.volume['id'])
self.volumes_client.wait_for_volume_status(self.volume['id'],
'available')
volume = self.volumes_client.show_volume(self.volume['id'])
self.assertEqual('available', volume['status'])
def rebuild_server(self, server_id, image=None,
preserve_ephemeral=False, wait=True,
rebuild_kwargs=None):
if image is None:
image = CONF.compute.image_ref
rebuild_kwargs = rebuild_kwargs or {}
LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
server_id, image, preserve_ephemeral)
self.servers_client.rebuild(server_id=server_id, image_ref=image,
preserve_ephemeral=preserve_ephemeral,
**rebuild_kwargs)
if wait:
waiters.wait_for_server_status(self.servers_client,
server_id, 'ACTIVE')
def ping_ip_address(self, ip_address, should_succeed=True,
ping_timeout=None):
timeout = ping_timeout or CONF.compute.ping_timeout
cmd = ['ping', '-c1', '-w1', ip_address]
def ping():
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return (proc.returncode == 0) == should_succeed
return tempest.test.call_until_true(ping, timeout, 1)
def check_vm_connectivity(self, ip_address,
username=None,
private_key=None,
should_connect=True):
"""
:param ip_address: server to test against
:param username: server's ssh username
:param private_key: server's ssh private key to be used
:param should_connect: True/False indicates positive/negative test
positive - attempt ping and ssh
negative - attempt ping and fail if succeed
:raises: AssertError if the result of the connectivity check does
not match the value of the should_connect param
"""
if should_connect:
msg = "Timed out waiting for %s to become reachable" % ip_address
else:
msg = "ip address %s is reachable" % ip_address
self.assertTrue(self.ping_ip_address(ip_address,
should_succeed=should_connect),
msg=msg)
if should_connect:
# no need to check ssh for negative connectivity
self.get_remote_client(ip_address, username, private_key)
def check_public_network_connectivity(self, ip_address, username,
private_key, should_connect=True,
msg=None, servers=None):
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
LOG.debug('checking network connections to IP %s with user: %s' %
(ip_address, username))
try:
self.check_vm_connectivity(ip_address,
username,
private_key,
should_connect=should_connect)
except Exception:
ex_msg = 'Public network connectivity check failed'
if msg:
ex_msg += ": " + msg
LOG.exception(ex_msg)
self._log_console_output(servers)
raise
def create_floating_ip(self, thing, pool_name=None):
"""Creates a floating IP and associates to a server using
Nova clients
"""
floating_ip = self.floating_ips_client.create_floating_ip(pool_name)
self.addCleanup(self.delete_wrapper,
self.floating_ips_client.delete_floating_ip,
floating_ip['id'])
self.floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], thing['id'])
return floating_ip
class NetworkScenarioTest(ScenarioTest):
"""Base class for network scenario tests.
This class provide helpers for network scenario tests, using the neutron
API. Helpers from ancestor which use the nova network API are overridden
with the neutron API.
This Class also enforces using Neutron instead of novanetwork.
Subclassed tests will be skipped if Neutron is not enabled
"""
credentials = ['primary', 'admin']
@classmethod
def skip_checks(cls):
super(NetworkScenarioTest, cls).skip_checks()
if not CONF.service_available.neutron:
raise cls.skipException('Neutron not available')
@classmethod
def resource_setup(cls):
super(NetworkScenarioTest, cls).resource_setup()
cls.tenant_id = cls.manager.identity_client.tenant_id
def _create_network(self, client=None, tenant_id=None,
namestart='network-smoke-'):
if not client:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
name = data_utils.rand_name(namestart)
result = client.create_network(name=name, tenant_id=tenant_id)
network = net_resources.DeletableNetwork(client=client,
**result['network'])
self.assertEqual(network.name, name)
self.addCleanup(self.delete_wrapper, network.delete)
return network
def _list_networks(self, *args, **kwargs):
"""List networks using admin creds """
networks_list = self.admin_manager.network_client.list_networks(
*args, **kwargs)
return networks_list['networks']
def _list_subnets(self, *args, **kwargs):
"""List subnets using admin creds """
subnets_list = self.admin_manager.network_client.list_subnets(
*args, **kwargs)
return subnets_list['subnets']
def _list_routers(self, *args, **kwargs):
"""List routers using admin creds """
routers_list = self.admin_manager.network_client.list_routers(
*args, **kwargs)
return routers_list['routers']
def _list_ports(self, *args, **kwargs):
"""List ports using admin creds """
ports_list = self.admin_manager.network_client.list_ports(
*args, **kwargs)
return ports_list['ports']
def _create_subnet(self, network, client=None, namestart='subnet-smoke',
**kwargs):
"""
Create a subnet for the given network within the cidr block
configured for tenant networks.
"""
if not client:
client = self.network_client
def cidr_in_use(cidr, tenant_id):
"""
:return True if subnet with cidr already exist in tenant
False else
"""
cidr_in_use = self._list_subnets(tenant_id=tenant_id, cidr=cidr)
return len(cidr_in_use) != 0
ip_version = kwargs.pop('ip_version', 4)
if ip_version == 6:
tenant_cidr = netaddr.IPNetwork(
CONF.network.tenant_network_v6_cidr)
num_bits = CONF.network.tenant_network_v6_mask_bits
else:
tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
num_bits = CONF.network.tenant_network_mask_bits
result = None
str_cidr = None
# Repeatedly attempt subnet creation with sequential cidr
# blocks until an unallocated block is found.
for subnet_cidr in tenant_cidr.subnet(num_bits):
str_cidr = str(subnet_cidr)
if cidr_in_use(str_cidr, tenant_id=network.tenant_id):
continue
subnet = dict(
name=data_utils.rand_name(namestart),
network_id=network.id,
tenant_id=network.tenant_id,
cidr=str_cidr,
ip_version=ip_version,
**kwargs
)
try:
result = client.create_subnet(**subnet)
break
except lib_exc.Conflict as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
self.assertIsNotNone(result, 'Unable to allocate tenant network')
subnet = net_resources.DeletableSubnet(client=client,
**result['subnet'])
self.assertEqual(subnet.cidr, str_cidr)
self.addCleanup(self.delete_wrapper, subnet.delete)
return subnet
def _create_port(self, network_id, client=None, namestart='port-quotatest',
**kwargs):
if not client:
client = self.network_client
name = data_utils.rand_name(namestart)
result = client.create_port(
name=name,
network_id=network_id,
**kwargs)
self.assertIsNotNone(result, 'Unable to allocate port')
port = net_resources.DeletablePort(client=client,
**result['port'])
self.addCleanup(self.delete_wrapper, port.delete)
return port
def _get_server_port_id_and_ip4(self, server, ip_addr=None):
ports = self._list_ports(device_id=server['id'],
fixed_ip=ip_addr)
self.assertEqual(len(ports), 1,
"Unable to determine which port to target.")
# it might happen here that this port has more then one ip address
# as in case of dual stack- when this port is created on 2 subnets
for ip46 in ports[0]['fixed_ips']:
ip = ip46['ip_address']
if netaddr.valid_ipv4(ip):
return ports[0]['id'], ip
def _get_network_by_name(self, network_name):
net = self._list_networks(name=network_name)
self.assertNotEqual(len(net), 0,
"Unable to get network by name: %s" % network_name)
return net_resources.AttributeDict(net[0])
def create_floating_ip(self, thing, external_network_id=None,
port_id=None, client=None):
"""Creates a floating IP and associates to a resource/port using
Neutron client
"""
if not external_network_id:
external_network_id = CONF.network.public_network_id
if not client:
client = self.network_client
if not port_id:
port_id, ip4 = self._get_server_port_id_and_ip4(thing)
else:
ip4 = None
result = client.create_floatingip(
floating_network_id=external_network_id,
port_id=port_id,
tenant_id=thing['tenant_id'],
fixed_ip_address=ip4
)
floating_ip = net_resources.DeletableFloatingIp(
client=client,
**result['floatingip'])
self.addCleanup(self.delete_wrapper, floating_ip.delete)
return floating_ip
def _associate_floating_ip(self, floating_ip, server):
port_id, _ = self._get_server_port_id_and_ip4(server)
floating_ip.update(port_id=port_id)
self.assertEqual(port_id, floating_ip.port_id)
return floating_ip
def _disassociate_floating_ip(self, floating_ip):
"""
:param floating_ip: type DeletableFloatingIp
"""
floating_ip.update(port_id=None)
self.assertIsNone(floating_ip.port_id)
return floating_ip
def check_floating_ip_status(self, floating_ip, status):
"""Verifies floatingip reaches the given status
:param floating_ip: net_resources.DeletableFloatingIp floating IP to
to check status
:param status: target status
:raises: AssertionError if status doesn't match
"""
def refresh():
floating_ip.refresh()
return status == floating_ip.status
tempest.test.call_until_true(refresh,
CONF.network.build_timeout,
CONF.network.build_interval)
self.assertEqual(status, floating_ip.status,
message="FloatingIP: {fp} is at status: {cst}. "
"failed to reach status: {st}"
.format(fp=floating_ip, cst=floating_ip.status,
st=status))
LOG.info("FloatingIP: {fp} is at status: {st}"
.format(fp=floating_ip, st=status))
def _check_tenant_network_connectivity(self, server,
username,
private_key,
should_connect=True,
servers_for_debug=None):
if not CONF.network.tenant_networks_reachable:
msg = 'Tenant networks not configured to be reachable.'
LOG.info(msg)
return
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
try:
for net_name, ip_addresses in six.iteritems(server['addresses']):
for ip_address in ip_addresses:
self.check_vm_connectivity(ip_address['addr'],
username,
private_key,
should_connect=should_connect)
except Exception as e:
LOG.exception('Tenant network connectivity check failed')
self._log_console_output(servers_for_debug)
self._log_net_info(e)
raise
def _check_remote_connectivity(self, source, dest, should_succeed=True):
"""
check ping server via source ssh connection
:param source: RemoteClient: an ssh connection from which to ping
:param dest: and IP to ping against
:param should_succeed: boolean should ping succeed or not
:returns: boolean -- should_succeed == ping
:returns: ping is false if ping failed
"""
def ping_remote():
try:
source.ping_host(dest)
except lib_exc.SSHExecCommandFailed:
LOG.warn('Failed to ping IP: %s via a ssh connection from: %s.'
% (dest, source.ssh_client.host))
return not should_succeed
return should_succeed
return tempest.test.call_until_true(ping_remote,
CONF.compute.ping_timeout,
1)
def _create_security_group(self, client=None, tenant_id=None,
namestart='secgroup-smoke'):
if client is None:
client = self.network_client
if tenant_id is None:
tenant_id = client.tenant_id
secgroup = self._create_empty_security_group(namestart=namestart,
client=client,
tenant_id=tenant_id)
# Add rules to the security group
rules = self._create_loginable_secgroup_rule(client=client,
secgroup=secgroup)
for rule in rules:
self.assertEqual(tenant_id, rule.tenant_id)
self.assertEqual(secgroup.id, rule.security_group_id)
return secgroup
def _create_empty_security_group(self, client=None, tenant_id=None,
namestart='secgroup-smoke'):
"""Create a security group without rules.
Default rules will be created:
- IPv4 egress to any
- IPv6 egress to any
:param tenant_id: secgroup will be created in this tenant
:returns: DeletableSecurityGroup -- containing the secgroup created
"""
if client is None:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
sg_name = data_utils.rand_name(namestart)
sg_desc = sg_name + " description"
sg_dict = dict(name=sg_name,
description=sg_desc)
sg_dict['tenant_id'] = tenant_id
result = client.create_security_group(**sg_dict)
secgroup = net_resources.DeletableSecurityGroup(
client=client,
**result['security_group']
)
self.assertEqual(secgroup.name, sg_name)
self.assertEqual(tenant_id, secgroup.tenant_id)
self.assertEqual(secgroup.description, sg_desc)
self.addCleanup(self.delete_wrapper, secgroup.delete)
return secgroup
def _default_security_group(self, client=None, tenant_id=None):
"""Get default secgroup for given tenant_id.
:returns: DeletableSecurityGroup -- default secgroup for given tenant
"""
if client is None:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
sgs = [
sg for sg in client.list_security_groups().values()[0]
if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
]
msg = "No default security group for tenant %s." % (tenant_id)
self.assertTrue(len(sgs) > 0, msg)
return net_resources.DeletableSecurityGroup(client=client,
**sgs[0])
def _create_security_group_rule(self, secgroup=None, client=None,
tenant_id=None, **kwargs):
"""Create a rule from a dictionary of rule parameters.
Create a rule in a secgroup. if secgroup not defined will search for
default secgroup in tenant_id.
:param secgroup: type DeletableSecurityGroup.
:param tenant_id: if secgroup not passed -- the tenant in which to
search for default secgroup
:param kwargs: a dictionary containing rule parameters:
for example, to allow incoming ssh:
rule = {
direction: 'ingress'
protocol:'tcp',
port_range_min: 22,
port_range_max: 22
}
"""
if client is None:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
if secgroup is None:
secgroup = self._default_security_group(client=client,
tenant_id=tenant_id)
ruleset = dict(security_group_id=secgroup.id,
tenant_id=secgroup.tenant_id)
ruleset.update(kwargs)
sg_rule = client.create_security_group_rule(**ruleset)
sg_rule = net_resources.DeletableSecurityGroupRule(
client=client,
**sg_rule['security_group_rule']
)
self.addCleanup(self.delete_wrapper, sg_rule.delete)
self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
self.assertEqual(secgroup.id, sg_rule.security_group_id)
return sg_rule
def _create_loginable_secgroup_rule(self, client=None, secgroup=None):
"""These rules are intended to permit inbound ssh and icmp
traffic from all sources, so no group_id is provided.
Setting a group_id would only permit traffic from ports
belonging to the same security group.
"""
if client is None:
client = self.network_client
rules = []
rulesets = [
dict(
# ssh
protocol='tcp',
port_range_min=22,
port_range_max=22,
),
dict(
# ping
protocol='icmp',
),
dict(
# ipv6-icmp for ping6
protocol='icmp',
ethertype='IPv6',
)
]
for ruleset in rulesets:
for r_direction in ['ingress', 'egress']:
ruleset['direction'] = r_direction
try:
sg_rule = self._create_security_group_rule(
client=client, secgroup=secgroup, **ruleset)
except lib_exc.Conflict as ex:
# if rule already exist - skip rule and continue
msg = 'Security group rule already exists'
if msg not in ex._error_string:
raise ex
else:
self.assertEqual(r_direction, sg_rule.direction)
rules.append(sg_rule)
return rules
def _ssh_to_server(self, server, private_key):
ssh_login = CONF.compute.image_ssh_user
return self.get_remote_client(server,
username=ssh_login,
private_key=private_key)
def _get_router(self, client=None, tenant_id=None):
"""Retrieve a router for the given tenant id.
If a public router has been configured, it will be returned.
If a public router has not been configured, but a public
network has, a tenant router will be created and returned that
routes traffic to the public network.
"""
if not client:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
router_id = CONF.network.public_router_id
network_id = CONF.network.public_network_id
if router_id:
body = client.show_router(router_id)
return net_resources.AttributeDict(**body['router'])
elif network_id:
router = self._create_router(client, tenant_id)
router.set_gateway(network_id)
return router
else:
raise Exception("Neither of 'public_router_id' or "
"'public_network_id' has been defined.")
def _create_router(self, client=None, tenant_id=None,
namestart='router-smoke'):
if not client:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
name = data_utils.rand_name(namestart)
result = client.create_router(name=name,
admin_state_up=True,
tenant_id=tenant_id)
router = net_resources.DeletableRouter(client=client,
**result['router'])
self.assertEqual(router.name, name)
self.addCleanup(self.delete_wrapper, router.delete)
return router
def _update_router_admin_state(self, router, admin_state_up):
router.update(admin_state_up=admin_state_up)
self.assertEqual(admin_state_up, router.admin_state_up)
def create_networks(self, client=None, tenant_id=None,
dns_nameservers=None):
"""Create a network with a subnet connected to a router.
The baremetal driver is a special case since all nodes are
on the same shared network.
:param client: network client to create resources with.
:param tenant_id: id of tenant to create resources in.
:param dns_nameservers: list of dns servers to send to subnet.
:returns: network, subnet, router
"""
if CONF.baremetal.driver_enabled:
# NOTE(Shrews): This exception is for environments where tenant
# credential isolation is available, but network separation is
# not (the current baremetal case). Likely can be removed when
# test account mgmt is reworked:
# https://blueprints.launchpad.net/tempest/+spec/test-accounts
if not CONF.compute.fixed_network_name:
m = 'fixed_network_name must be specified in config'
raise exceptions.InvalidConfiguration(m)
network = self._get_network_by_name(
CONF.compute.fixed_network_name)
router = None
subnet = None
else:
network = self._create_network(client=client, tenant_id=tenant_id)
router = self._get_router(client=client, tenant_id=tenant_id)
subnet_kwargs = dict(network=network, client=client)
# use explicit check because empty list is a valid option
if dns_nameservers is not None:
subnet_kwargs['dns_nameservers'] = dns_nameservers
subnet = self._create_subnet(**subnet_kwargs)
subnet.add_to_router(router.id)
return network, subnet, router
def create_server(self, name=None, image=None, flavor=None,
wait_on_boot=True, wait_on_delete=True,
create_kwargs=None):
vnic_type = CONF.network.port_vnic_type
# If vnic_type is configured create port for
# every network
if vnic_type:
ports = []
networks = []
create_port_body = {'binding:vnic_type': vnic_type,
'namestart': 'port-smoke'}
if create_kwargs:
net_client = create_kwargs.get("network_client",
self.network_client)
# Convert security group names to security group ids
# to pass to create_port
if create_kwargs.get('security_groups'):
security_groups = net_client.list_security_groups().get(
'security_groups')
sec_dict = dict([(s['name'], s['id'])
for s in security_groups])
sec_groups_names = [s['name'] for s in create_kwargs[
'security_groups']]
security_groups_ids = [sec_dict[s]
for s in sec_groups_names]
if security_groups_ids:
create_port_body[
'security_groups'] = security_groups_ids
networks = create_kwargs.get('networks')
else:
net_client = self.network_client
# If there are no networks passed to us we look up
# for the tenant's private networks and create a port
# if there is only one private network. The same behaviour
# as we would expect when passing the call to the clients
# with no networks
if not networks:
networks = net_client.list_networks(filters={
'router:external': False})
self.assertEqual(1, len(networks),
"There is more than one"
" network for the tenant")
for net in networks:
net_id = net['uuid']
port = self._create_port(network_id=net_id,
client=net_client,
**create_port_body)
ports.append({'port': port.id})
if ports:
create_kwargs['networks'] = ports
return super(NetworkScenarioTest, self).create_server(
name=name, image=image, flavor=flavor,
wait_on_boot=wait_on_boot, wait_on_delete=wait_on_delete,
create_kwargs=create_kwargs)
# power/provision states as of icehouse
class BaremetalPowerStates(object):
"""Possible power states of an Ironic node."""
POWER_ON = 'power on'
POWER_OFF = 'power off'
REBOOT = 'rebooting'
SUSPEND = 'suspended'
class BaremetalProvisionStates(object):
"""Possible provision states of an Ironic node."""
NOSTATE = None
INIT = 'initializing'
ACTIVE = 'active'
BUILDING = 'building'
DEPLOYWAIT = 'wait call-back'
DEPLOYING = 'deploying'
DEPLOYFAIL = 'deploy failed'
DEPLOYDONE = 'deploy complete'
DELETING = 'deleting'
DELETED = 'deleted'
ERROR = 'error'
class BaremetalScenarioTest(ScenarioTest):
credentials = ['primary', 'admin']
@classmethod
def skip_checks(cls):
super(BaremetalScenarioTest, cls).skip_checks()
if (not CONF.service_available.ironic or
not CONF.baremetal.driver_enabled):
msg = 'Ironic not available or Ironic compute driver not enabled'
raise cls.skipException(msg)
@classmethod
def setup_clients(cls):
super(BaremetalScenarioTest, cls).setup_clients()
cls.baremetal_client = cls.admin_manager.baremetal_client
@classmethod
def resource_setup(cls):
super(BaremetalScenarioTest, cls).resource_setup()
# allow any issues obtaining the node list to raise early
cls.baremetal_client.list_nodes()
def _node_state_timeout(self, node_id, state_attr,
target_states, timeout=10, interval=1):
if not isinstance(target_states, list):
target_states = [target_states]
def check_state():
node = self.get_node(node_id=node_id)
if node.get(state_attr) in target_states:
return True
return False
if not tempest.test.call_until_true(
check_state, timeout, interval):
msg = ("Timed out waiting for node %s to reach %s state(s) %s" %
(node_id, state_attr, target_states))
raise exceptions.TimeoutException(msg)
def wait_provisioning_state(self, node_id, state, timeout):
self._node_state_timeout(
node_id=node_id, state_attr='provision_state',
target_states=state, timeout=timeout)
def wait_power_state(self, node_id, state):
self._node_state_timeout(
node_id=node_id, state_attr='power_state',
target_states=state, timeout=CONF.baremetal.power_timeout)
def wait_node(self, instance_id):
"""Waits for a node to be associated with instance_id."""
def _get_node():
node = None
try:
node = self.get_node(instance_id=instance_id)
except lib_exc.NotFound:
pass
return node is not None
if not tempest.test.call_until_true(
_get_node, CONF.baremetal.association_timeout, 1):
msg = ('Timed out waiting to get Ironic node by instance id %s'
% instance_id)
raise exceptions.TimeoutException(msg)
def get_node(self, node_id=None, instance_id=None):
if node_id:
_, body = self.baremetal_client.show_node(node_id)
return body
elif instance_id:
_, body = self.baremetal_client.show_node_by_instance_uuid(
instance_id)
if body['nodes']:
return body['nodes'][0]
def get_ports(self, node_uuid):
ports = []
_, body = self.baremetal_client.list_node_ports(node_uuid)
for port in body['ports']:
_, p = self.baremetal_client.show_port(port['uuid'])
ports.append(p)
return ports
def add_keypair(self):
self.keypair = self.create_keypair()
def verify_connectivity(self, ip=None):
if ip:
dest = self.get_remote_client(ip)
else:
dest = self.get_remote_client(self.instance)
dest.validate_authentication()
def boot_instance(self):
create_kwargs = {
'key_name': self.keypair['name']
}
self.instance = self.create_server(
wait_on_boot=False, create_kwargs=create_kwargs)
self.wait_node(self.instance['id'])
self.node = self.get_node(instance_id=self.instance['id'])
self.wait_power_state(self.node['uuid'], BaremetalPowerStates.POWER_ON)
self.wait_provisioning_state(
self.node['uuid'],
[BaremetalProvisionStates.DEPLOYWAIT,
BaremetalProvisionStates.ACTIVE],
timeout=15)
self.wait_provisioning_state(self.node['uuid'],
BaremetalProvisionStates.ACTIVE,
timeout=CONF.baremetal.active_timeout)
waiters.wait_for_server_status(self.servers_client,
self.instance['id'], 'ACTIVE')
self.node = self.get_node(instance_id=self.instance['id'])
self.instance = self.servers_client.show_server(self.instance['id'])
def terminate_instance(self):
self.servers_client.delete_server(self.instance['id'])
self.wait_power_state(self.node['uuid'],
BaremetalPowerStates.POWER_OFF)
self.wait_provisioning_state(
self.node['uuid'],
BaremetalProvisionStates.NOSTATE,
timeout=CONF.baremetal.unprovision_timeout)
class EncryptionScenarioTest(ScenarioTest):
"""
Base class for encryption scenario tests
"""
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(EncryptionScenarioTest, cls).setup_clients()
cls.admin_volume_types_client = cls.os_adm.volume_types_client
def _wait_for_volume_status(self, status):
self.status_timeout(
self.volume_client.volumes, self.volume.id, status)
def nova_boot(self):
self.keypair = self.create_keypair()
create_kwargs = {'key_name': self.keypair['name']}
self.server = self.create_server(image=self.image,
create_kwargs=create_kwargs)
def create_volume_type(self, client=None, name=None):
if not client:
client = self.admin_volume_types_client
if not name:
name = 'generic'
randomized_name = data_utils.rand_name('scenario-type-' + name)
LOG.debug("Creating a volume type: %s", randomized_name)
body = client.create_volume_type(
randomized_name)
self.assertIn('id', body)
self.addCleanup(client.delete_volume_type, body['id'])
return body
def create_encryption_type(self, client=None, type_id=None, provider=None,
key_size=None, cipher=None,
control_location=None):
if not client:
client = self.admin_volume_types_client
if not type_id:
volume_type = self.create_volume_type()
type_id = volume_type['id']
LOG.debug("Creating an encryption type for volume type: %s", type_id)
client.create_encryption_type(
type_id, provider=provider, key_size=key_size, cipher=cipher,
control_location=control_location)
class SwiftScenarioTest(ScenarioTest):
"""
Provide harness to do Swift scenario tests.
Subclasses implement the tests that use the methods provided by this
class.
"""
@classmethod
def skip_checks(cls):
super(SwiftScenarioTest, cls).skip_checks()
if not CONF.service_available.swift:
skip_msg = ("%s skipped as swift is not available" %
cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
super(SwiftScenarioTest, cls).setup_credentials()
operator_role = CONF.object_storage.operator_role
cls.os_operator = cls.get_client_manager(roles=[operator_role])
@classmethod
def setup_clients(cls):
super(SwiftScenarioTest, cls).setup_clients()
# Clients for Swift
cls.account_client = cls.os_operator.account_client
cls.container_client = cls.os_operator.container_client
cls.object_client = cls.os_operator.object_client
def get_swift_stat(self):
"""get swift status for our user account."""
self.account_client.list_account_containers()
LOG.debug('Swift status information obtained successfully')
def create_container(self, container_name=None):
name = container_name or data_utils.rand_name(
'swift-scenario-container')
self.container_client.create_container(name)
# look for the container to assure it is created
self.list_and_check_container_objects(name)
LOG.debug('Container %s created' % (name))
self.addCleanup(self.delete_wrapper,
self.container_client.delete_container,
name)
return name
def delete_container(self, container_name):
self.container_client.delete_container(container_name)
LOG.debug('Container %s deleted' % (container_name))
def upload_object_to_container(self, container_name, obj_name=None):
obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
obj_data = data_utils.arbitrary_string()
self.object_client.create_object(container_name, obj_name, obj_data)
self.addCleanup(self.delete_wrapper,
self.object_client.delete_object,
container_name,
obj_name)
return obj_name, obj_data
def delete_object(self, container_name, filename):
self.object_client.delete_object(container_name, filename)
self.list_and_check_container_objects(container_name,
not_present_obj=[filename])
def list_and_check_container_objects(self, container_name,
present_obj=None,
not_present_obj=None):
"""
List objects for a given container and assert which are present and
which are not.
"""
if present_obj is None:
present_obj = []
if not_present_obj is None:
not_present_obj = []
_, object_list = self.container_client.list_container_contents(
container_name)
if present_obj:
for obj in present_obj:
self.assertIn(obj, object_list)
if not_present_obj:
for obj in not_present_obj:
self.assertNotIn(obj, object_list)
def change_container_acl(self, container_name, acl):
metadata_param = {'metadata_prefix': 'x-container-',
'metadata': {'read': acl}}
self.container_client.update_container_metadata(container_name,
**metadata_param)
resp, _ = self.container_client.list_container_metadata(container_name)
self.assertEqual(resp['x-container-read'], acl)
def download_and_verify(self, container_name, obj_name, expected_data):
_, obj = self.object_client.get_object(container_name, obj_name)
self.assertEqual(obj, expected_data)
|
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from neutron.tests.api import base
from neutron.tests.tempest import config
from neutron.tests.tempest import exceptions
from neutron.tests.tempest import test
CONF = config.CONF
class FWaaSExtensionTestJSON(base.BaseNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
List firewall rules
Create firewall rule
Update firewall rule
Delete firewall rule
Show firewall rule
List firewall policies
Create firewall policy
Update firewall policy
Insert firewall rule to policy
Remove firewall rule from policy
Insert firewall rule after/before rule in policy
Update firewall policy audited attribute
Delete firewall policy
Show firewall policy
List firewall
Create firewall
Update firewall
Delete firewall
Show firewall
"""
@classmethod
def resource_setup(cls):
super(FWaaSExtensionTestJSON, cls).resource_setup()
if not test.is_extension_enabled('fwaas', 'network'):
msg = "FWaaS Extension not enabled."
raise cls.skipException(msg)
cls.fw_rule = cls.create_firewall_rule("allow", "tcp")
cls.fw_policy = cls.create_firewall_policy()
def _try_delete_policy(self, policy_id):
# delete policy, if it exists
try:
self.client.delete_firewall_policy(policy_id)
# if policy is not found, this means it was deleted in the test
except lib_exc.NotFound:
pass
def _try_delete_rule(self, rule_id):
# delete rule, if it exists
try:
self.client.delete_firewall_rule(rule_id)
# if rule is not found, this means it was deleted in the test
except lib_exc.NotFound:
pass
def _try_delete_firewall(self, fw_id):
# delete firewall, if it exists
try:
self.client.delete_firewall(fw_id)
# if firewall is not found, this means it was deleted in the test
except lib_exc.NotFound:
pass
self.client.wait_for_resource_deletion('firewall', fw_id)
def _wait_until_ready(self, fw_id):
target_states = ('ACTIVE', 'CREATED')
def _wait():
firewall = self.client.show_firewall(fw_id)
firewall = firewall['firewall']
return firewall['status'] in target_states
if not test.call_until_true(_wait, CONF.network.build_timeout,
CONF.network.build_interval):
m = ("Timed out waiting for firewall %s to reach %s state(s)" %
(fw_id, target_states))
raise exceptions.TimeoutException(m)
@test.idempotent_id('1b84cf01-9c09-4ce7-bc72-b15e39076468')
def test_list_firewall_rules(self):
# List firewall rules
fw_rules = self.client.list_firewall_rules()
fw_rules = fw_rules['firewall_rules']
self.assertIn((self.fw_rule['id'],
self.fw_rule['name'],
self.fw_rule['action'],
self.fw_rule['protocol'],
self.fw_rule['ip_version'],
self.fw_rule['enabled']),
[(m['id'],
m['name'],
m['action'],
m['protocol'],
m['ip_version'],
m['enabled']) for m in fw_rules])
@test.idempotent_id('563564f7-7077-4f5e-8cdc-51f37ae5a2b9')
def test_create_update_delete_firewall_rule(self):
# Create firewall rule
body = self.client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
action="allow",
protocol="tcp")
fw_rule_id = body['firewall_rule']['id']
# Update firewall rule
body = self.client.update_firewall_rule(fw_rule_id,
shared=True)
self.assertTrue(body["firewall_rule"]['shared'])
# Delete firewall rule
self.client.delete_firewall_rule(fw_rule_id)
# Confirm deletion
fw_rules = self.client.list_firewall_rules()
self.assertNotIn(fw_rule_id,
[m['id'] for m in fw_rules['firewall_rules']])
@test.idempotent_id('3ff8c08e-26ff-4034-ae48-810ed213a998')
def test_show_firewall_rule(self):
# show a created firewall rule
fw_rule = self.client.show_firewall_rule(self.fw_rule['id'])
for key, value in fw_rule['firewall_rule'].iteritems():
self.assertEqual(self.fw_rule[key], value)
@test.idempotent_id('1086dd93-a4c0-4bbb-a1bd-6d4bc62c199f')
def test_list_firewall_policies(self):
fw_policies = self.client.list_firewall_policies()
fw_policies = fw_policies['firewall_policies']
self.assertIn((self.fw_policy['id'],
self.fw_policy['name'],
self.fw_policy['firewall_rules']),
[(m['id'],
m['name'],
m['firewall_rules']) for m in fw_policies])
@test.idempotent_id('bbf37b6c-498c-421e-9c95-45897d3ed775')
def test_create_update_delete_firewall_policy(self):
# Create firewall policy
body = self.client.create_firewall_policy(
name=data_utils.rand_name("fw-policy"))
fw_policy_id = body['firewall_policy']['id']
self.addCleanup(self._try_delete_policy, fw_policy_id)
# Update firewall policy
body = self.client.update_firewall_policy(fw_policy_id,
shared=True,
name="updated_policy")
updated_fw_policy = body["firewall_policy"]
self.assertTrue(updated_fw_policy['shared'])
self.assertEqual("updated_policy", updated_fw_policy['name'])
# Delete firewall policy
self.client.delete_firewall_policy(fw_policy_id)
# Confirm deletion
fw_policies = self.client.list_firewall_policies()
fw_policies = fw_policies['firewall_policies']
self.assertNotIn(fw_policy_id, [m['id'] for m in fw_policies])
@test.idempotent_id('1df59b3a-517e-41d4-96f6-fc31cf4ecff2')
def test_show_firewall_policy(self):
# show a created firewall policy
fw_policy = self.client.show_firewall_policy(self.fw_policy['id'])
fw_policy = fw_policy['firewall_policy']
for key, value in fw_policy.iteritems():
self.assertEqual(self.fw_policy[key], value)
@test.idempotent_id('02082a03-3cdd-4789-986a-1327dd80bfb7')
def test_create_show_delete_firewall(self):
# Create tenant network resources required for an ACTIVE firewall
network = self.create_network()
subnet = self.create_subnet(network)
router = self.create_router(
data_utils.rand_name('router-'),
admin_state_up=True)
self.client.add_router_interface_with_subnet_id(
router['id'], subnet['id'])
# Create firewall
body = self.client.create_firewall(
name=data_utils.rand_name("firewall"),
firewall_policy_id=self.fw_policy['id'])
created_firewall = body['firewall']
firewall_id = created_firewall['id']
self.addCleanup(self._try_delete_firewall, firewall_id)
# Wait for the firewall resource to become ready
self._wait_until_ready(firewall_id)
# show a created firewall
firewall = self.client.show_firewall(firewall_id)
firewall = firewall['firewall']
for key, value in firewall.iteritems():
if key == 'status':
continue
self.assertEqual(created_firewall[key], value)
# list firewall
firewalls = self.client.list_firewalls()
firewalls = firewalls['firewalls']
self.assertIn((created_firewall['id'],
created_firewall['name'],
created_firewall['firewall_policy_id']),
[(m['id'],
m['name'],
m['firewall_policy_id']) for m in firewalls])
# Delete firewall
self.client.delete_firewall(firewall_id)
@test.attr(type='smoke')
@test.idempotent_id('53305b4b-9897-4e01-87c0-2ae386083180')
def test_firewall_rule_insertion_position_removal_rule_from_policy(self):
# Create firewall rule
body = self.client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
action="allow",
protocol="tcp")
fw_rule_id1 = body['firewall_rule']['id']
self.addCleanup(self._try_delete_rule, fw_rule_id1)
# Create firewall policy
body = self.client.create_firewall_policy(
name=data_utils.rand_name("fw-policy"))
fw_policy_id = body['firewall_policy']['id']
self.addCleanup(self._try_delete_policy, fw_policy_id)
# Insert rule to firewall policy
self.client.insert_firewall_rule_in_policy(
fw_policy_id, fw_rule_id1, '', '')
# Verify insertion of rule in policy
self.assertIn(fw_rule_id1, self._get_list_fw_rule_ids(fw_policy_id))
# Create another firewall rule
body = self.client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
action="allow",
protocol="icmp")
fw_rule_id2 = body['firewall_rule']['id']
self.addCleanup(self._try_delete_rule, fw_rule_id2)
# Insert rule to firewall policy after the first rule
self.client.insert_firewall_rule_in_policy(
fw_policy_id, fw_rule_id2, fw_rule_id1, '')
# Verify the posiition of rule after insertion
fw_rule = self.client.show_firewall_rule(
fw_rule_id2)
self.assertEqual(int(fw_rule['firewall_rule']['position']), 2)
# Remove rule from the firewall policy
self.client.remove_firewall_rule_from_policy(
fw_policy_id, fw_rule_id2)
# Insert rule to firewall policy before the first rule
self.client.insert_firewall_rule_in_policy(
fw_policy_id, fw_rule_id2, '', fw_rule_id1)
# Verify the posiition of rule after insertion
fw_rule = self.client.show_firewall_rule(
fw_rule_id2)
self.assertEqual(int(fw_rule['firewall_rule']['position']), 1)
# Remove rule from the firewall policy
self.client.remove_firewall_rule_from_policy(
fw_policy_id, fw_rule_id2)
# Verify removal of rule from firewall policy
self.assertNotIn(fw_rule_id2, self._get_list_fw_rule_ids(fw_policy_id))
# Remove rule from the firewall policy
self.client.remove_firewall_rule_from_policy(
fw_policy_id, fw_rule_id1)
# Verify removal of rule from firewall policy
self.assertNotIn(fw_rule_id1, self._get_list_fw_rule_ids(fw_policy_id))
def _get_list_fw_rule_ids(self, fw_policy_id):
fw_policy = self.client.show_firewall_policy(
fw_policy_id)
return [ruleid for ruleid in fw_policy['firewall_policy']
['firewall_rules']]
@test.idempotent_id('8515ca8a-0d2f-4298-b5ff-6f924e4587ca')
def test_update_firewall_policy_audited_attribute(self):
# Create firewall rule
body = self.client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
action="allow",
protocol="icmp")
fw_rule_id = body['firewall_rule']['id']
self.addCleanup(self._try_delete_rule, fw_rule_id)
# Create firewall policy
body = self.client.create_firewall_policy(
name=data_utils.rand_name('fw-policy'))
fw_policy_id = body['firewall_policy']['id']
self.addCleanup(self._try_delete_policy, fw_policy_id)
self.assertFalse(body['firewall_policy']['audited'])
# Update firewall policy audited attribute to ture
self.client.update_firewall_policy(fw_policy_id,
audited=True)
# Insert Firewall rule to firewall policy
self.client.insert_firewall_rule_in_policy(
fw_policy_id, fw_rule_id, '', '')
body = self.client.show_firewall_policy(
fw_policy_id)
self.assertFalse(body['firewall_policy']['audited'])
|
|
"""Calendar serializers."""
from django.utils.translation import ugettext as _
from rest_framework import serializers
from modoboa.admin import models as admin_models
from modoboa.lib import fields as lib_fields
from . import backends
from . import models
class CalDAVCalendarMixin(object):
"""Mixin for calendar serializers."""
def create_remote_calendar(self, calendar):
"""Create caldav calendar."""
request = self.context["request"]
backend = backends.get_backend_from_request("caldav_", request)
backend.create_calendar(calendar.encoded_url)
def update_remote_calendar(self, calendar):
"""Update caldav calendar."""
request = self.context["request"]
backend = backends.get_backend_from_request("caldav_", request)
backend.update_calendar(calendar)
class UserCalendarSerializer(CalDAVCalendarMixin, serializers.ModelSerializer):
"""User calendar serializer."""
class Meta:
model = models.UserCalendar
fields = ("pk", "name", "color", "path", "full_url", "share_url")
read_only_fields = ("pk", "path", "full_url", "share_url")
def create(self, validated_data):
"""Use current user."""
user = self.context["request"].user
calendar = models.UserCalendar.objects.create(
mailbox=user.mailbox, **validated_data)
self.create_remote_calendar(calendar)
return calendar
def update(self, instance, validated_data):
"""Update calendar."""
old_name = instance.name
old_color = instance.color
for key, value in validated_data.items():
setattr(instance, key, value)
instance.save()
if old_name != instance.name or old_color != instance.color:
self.update_remote_calendar(instance)
return instance
class DomainSerializer(serializers.ModelSerializer):
"""Domain serializer."""
pk = serializers.IntegerField()
name = serializers.CharField()
class Meta:
model = admin_models.Domain
fields = ("pk", "name")
read_only_fields = ("pk", "name", )
class SharedCalendarSerializer(
CalDAVCalendarMixin, serializers.ModelSerializer):
"""Shared calendar serializer."""
domain = DomainSerializer()
class Meta:
model = models.SharedCalendar
fields = (
"pk", "name", "color", "path", "domain", "full_url", "share_url"
)
read_only_fields = ("pk", "path", "full_url", "share_url")
def create(self, validated_data):
"""Create shared calendar."""
domain = validated_data.pop("domain")
calendar = models.SharedCalendar(**validated_data)
calendar.domain_id = domain["pk"]
calendar.save()
self.create_remote_calendar(calendar)
return calendar
def update(self, instance, validated_data):
"""Update calendar."""
domain = validated_data.pop("domain")
old_name = instance.name
old_color = instance.color
for key, value in validated_data.items():
setattr(instance, key, value)
instance.domain_id = domain["pk"]
instance.save()
if old_name != instance.name or old_color != instance.color:
self.update_remote_calendar(instance)
return instance
class AttendeeSerializer(serializers.Serializer):
"""Attendee serializer."""
display_name = serializers.CharField()
email = serializers.EmailField()
class EventSerializer(serializers.Serializer):
"""Base event serializer (fullcalendar output)."""
id = serializers.CharField(read_only=True)
title = serializers.CharField()
start = serializers.DateTimeField()
end = serializers.DateTimeField()
allDay = serializers.BooleanField(default=False)
color = serializers.CharField(read_only=True)
description = serializers.CharField(required=False)
attendees = AttendeeSerializer(many=True, required=False)
class ROEventSerializer(EventSerializer):
"""Event serializer for read operations."""
def __init__(self, *args, **kwargs):
"""Set calendar field based on type."""
calendar_type = kwargs.pop("calendar_type")
super(ROEventSerializer, self).__init__(*args, **kwargs)
self.fields["calendar"] = (
UserCalendarSerializer() if calendar_type == "user"
else SharedCalendarSerializer()
)
class WritableEventSerializer(EventSerializer):
"""Event serializer for write operations."""
calendar = serializers.PrimaryKeyRelatedField(
queryset=models.UserCalendar.objects.none())
new_calendar_type = serializers.CharField(required=False)
def __init__(self, *args, **kwargs):
"""Set calendar list."""
calendar_type = kwargs.pop("calendar_type")
super(EventSerializer, self).__init__(*args, **kwargs)
self.update_calendar_field(calendar_type)
def update_calendar_field(self, calendar_type):
"""Update field based on given type."""
user = self.context["request"].user
if user.is_anonymous:
return
if calendar_type == "user":
self.fields["calendar"].queryset = (
models.UserCalendar.objects.filter(mailbox__user=user)
)
elif hasattr(user, "mailbox"):
self.fields["calendar"].queryset = (
models.SharedCalendar.objects.filter(
domain=user.mailbox.domain)
)
def validate(self, data):
"""Make sure dates are present with allDay flag."""
errors = {}
if "allDay" in data:
if "start" not in data:
errors["start"] = _("This field is required.")
if "end" not in data:
errors["end"] = _("This field is required.")
if errors:
raise serializers.ValidationError(errors)
return data
class MailboxSerializer(serializers.ModelSerializer):
"""Mailbox serializer."""
pk = serializers.IntegerField()
full_address = lib_fields.DRFEmailFieldUTF8()
class Meta:
model = admin_models.Mailbox
fields = ("pk", "full_address")
read_only_fields = ("pk", "full_address", )
class AccessRuleSerializer(serializers.ModelSerializer):
"""AccessRule serializer."""
mailbox = MailboxSerializer()
class Meta:
model = models.AccessRule
fields = ("pk", "mailbox", "calendar", "read", "write")
def create(self, validated_data):
"""Create access rule."""
mailbox = validated_data.pop("mailbox")
rule = models.AccessRule(**validated_data)
rule.mailbox_id = mailbox["pk"]
rule.save()
return rule
def update(self, instance, validated_data):
"""Update access rule."""
mailbox = validated_data.pop("mailbox")
for key, value in validated_data.items():
setattr(instance, key, value)
instance.mailbox_id = mailbox["pk"]
instance.save()
return instance
class CheckTokenSerializer(serializers.Serializer):
"""Serializer for the check_token action."""
calendar = serializers.CharField()
token = serializers.CharField()
class ImportFromFileSerializer(serializers.Serializer):
"""Serializer for the import_from_file action."""
ics_file = serializers.FileField()
|
|
from corrdb.common import logAccess, logStat, logTraffic, crossdomain, basicAuthSession
from corrdb.common.models import UserModel
from corrdb.common.models import ApplicationModel
from corrdb.common.models import ProjectModel
from corrdb.common.models import EnvironmentModel
from corrdb.common.models import RecordModel
from corrdb.common.models import TrafficModel
from corrdb.common.models import StatModel
from corrdb.common.models import VersionModel
from corrdb.common.models import BundleModel
from flask_stormpath import user
from flask_stormpath import login_required
from flask_api import status
import flask as fk
from cloud import app, cloud_response, storage_manager, access_manager, CLOUD_URL, VIEW_HOST, VIEW_PORT, MODE, ACC_SEC, CNT_SEC
import datetime
import simplejson as json
import traceback
import smtplib
from email.mime.text import MIMEText
import mimetypes
@app.route(CLOUD_URL + '/private/env/remove/<env_id>', methods=['GET','POST','PUT','UPDATE','DELETE','POST', 'OPTIONS'])
@crossdomain(fk=fk, app=app, origin='*')
def env_remove(env_id):
logTraffic(CLOUD_URL, endpoint='/private/env/remove/<env_id>')
hash_session = basicAuthSession(fk.request)
if fk.request.method in ['GET', 'DELETE']:
access_resp = access_manager.check_cloud(hash_session, ACC_SEC, CNT_SEC)
current_user = access_resp[1]
if current_user is None :
return fk.Response('Unauthorized action on this environment.', status.HTTP_401_UNAUTHORIZED)
else:
try:
logAccess(fk, access_resp[1], CLOUD_URL, 'cloud', '/private/env/remove/<env_id>')
env = EnvironmentModel.objects.with_id(env_id)
except:
print(str(traceback.print_exc()))
if env is None:
return fk.Response('Unable to find this environment.', status.HTTP_404_NOT_FOUND)
else:
# result = storage_manager.delete_env_files(env)
# if result:
# implement project history en removal: project.history.append(str(env.id))
count = 0
for project in ProjectModel.objects(owner=current_user):
try:
project.history.remove(str(env_id))
project.save()
count = count + 1
except:
pass
if count > 0:
env.delete()
return cloud_response(200, 'Deletion succeeded', 'The environment %s was succesfully deleted.'%env_id)
else:
return fk.Response('Endpoint does not support this HTTP method.', status.HTTP_405_METHOD_NOT_ALLOWED)
@app.route(CLOUD_URL + '/private/env/view/<env_id>', methods=['GET','POST','PUT','UPDATE','DELETE','POST', 'OPTIONS'])
@crossdomain(fk=fk, app=app, origin='*')
def env_view(env_id):
logTraffic(CLOUD_URL, endpoint='/private/env/view/<env_id>')
hash_session = basicAuthSession(fk.request)
if fk.request.method == 'GET':
caccess_resp = access_manager.check_cloud(hash_session, ACC_SEC, CNT_SEC)
current_user = access_resp[1]
if current_user is not None:
try:
logAccess(fk, access_resp[1], CLOUD_URL, 'cloud', '/private/env/view/<env_id>')
env = EnvironmentModel.objects.with_id(env_id)
# Make sure user own or used this environment.
owned = False
for project in ProjectModel.objects(owner=current_user):
if str(env.id) in project.history:
owned = True
break
if not owned:
env = None
except:
env = None
print(str(traceback.print_exc()))
if env is None:
return fk.Response('Unable to find this environment.', status.HTTP_404_NOT_FOUND)
else:
return fk.Response(env.to_json(), mimetype='application/json')
else:
return fk.Response('Unauthorized action on this environment.', status.HTTP_401_UNAUTHORIZED)
else:
return fk.Response('Endpoint does not support this HTTP method.', status.HTTP_405_METHOD_NOT_ALLOWED)
@app.route(CLOUD_URL + '/private/env/create/<record_id>', methods=['GET','POST','PUT','UPDATE','DELETE','POST', 'OPTIONS'])
@crossdomain(fk=fk, app=app, origin='*')
def env_create(record_id):
logTraffic(CLOUD_URL, endpoint='/private/env/create/<record_id>')
hash_session = basicAuthSession(fk.request)
if fk.request.method == 'POST':
access_resp = access_manager.check_cloud(hash_session, ACC_SEC, CNT_SEC)
current_user = access_resp[1]
if current_user is None:
return fk.redirect('{0}:{1}/error/?code=401'.format(VIEW_HOST, VIEW_PORT))
else:
logAccess(fk, access_resp[1], CLOUD_URL, 'cloud', '/private/env/create/<record_id>')
try:
record = RecordModel.objects.with_id(record_id)
except:
print(str(traceback.print_exc()))
if record is None:
return fk.Response('Unable to find the referenced record.', status.HTTP_404_NOT_FOUND)
else:
if fk.request.data:
data = json.loads(fk.request.data)
try:
env = EnvironmentModel(created_at=str(datetime.datetime.utcnow()))
application_name = data.get("app", None)
if application_name and application_name != '':
application = ApplicationModel.objects(name=application_name).first()
if application:
# Maybe not put record increment here.
application.records = application.records + 1
application.save()
if str(current_user.id) not in application.users:
application.users.append(str(current_user.id))
application.save()
env.application = application
group = data.get("group", "unknown")
system = data.get("system", "undefined")
env.group = group
env.system = system
env.save()
project = record.project
if record.environment:
project.history.remove(str(record.environment.id))
record.environment = env
record.save()
project.history.append(str(env.id))
project.save()
return cloud_response(201, 'Environment successfully created.', project.history)
except:
print(str(traceback.print_exc()))
return fk.Response('Failure to process. Contact admin if it persists.', status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return fk.Response('No content provided for the creation.', status.HTTP_204_NO_CONTENT)
else:
return fk.redirect('{0}:{1}/error/?code=405'.format(VIEW_HOST, VIEW_PORT))
@app.route(CLOUD_URL + '/private/env/next/<project_id>', methods=['GET','POST','PUT','UPDATE','DELETE','POST', 'OPTIONS'])
@crossdomain(fk=fk, app=app, origin='*')
def env_next(project_id):
logTraffic(CLOUD_URL, endpoint='/private/env/next/<project_id>')
hash_session = basicAuthSession(fk.request)
if fk.request.method == 'POST':
access_resp = access_manager.check_cloud(hash_session, ACC_SEC, CNT_SEC)
current_user = access_resp[1]
if current_user is None:
return fk.redirect('{0}:{1}/error/?code=401'.format(VIEW_HOST, VIEW_PORT))
else:
logAccess(fk, access_resp[1], CLOUD_URL, 'cloud', '/private/env/next/<project_id>')
if current_user.quota >= current_user.max_quota*1024*1024*1024:
return fk.Response('You have exceeded your allowed maximum quota.', status.HTTP_401_UNAUTHORIZED)
try:
project = ProjectModel.objects.with_id(project_id)
except:
print(str(traceback.print_exc()))
if project is None:
return fk.Response('Unable to find the referenced project.', status.HTTP_404_NOT_FOUND)
else:
if project.owner != current_user:
return fk.Response('Unauthorized action on this project.', status.HTTP_401_UNAUTHORIZED)
if fk.request.data:
data = json.loads(fk.request.data)
try:
env = EnvironmentModel(created_at=str(datetime.datetime.utcnow()))
application_name = data.get("app", None)
if application_name and application_name != '':
application = ApplicationModel.objects(name=application_name).first()
if application:
application.records = application.records + 1
application.save()
if str(current_user.id) not in application.users:
application.users.append(str(current_user.id))
application.save()
env.application = application
group = data.get("group", "unknown")
system = data.get("system", "undefined")
env.group = group
env.system = system
env.save()
version = VersionModel(created_at=str(datetime.datetime.utcnow()))
system = data.get("version", "unknown")
vc_location = data.get("version-location")
vc_baselines = vc_location.split("|")
if len(vc_baselines) > 0:
version.baseline = vc_baselines[0]
if len(vc_baselines) > 1:
version.marker = vc_baselines[1]
version.system = system
version.save()
env.version = version
env.save()
bundle = BundleModel(created_at=str(datetime.datetime.utcnow()))
scope = data.get("env-location", "unknown")
bundle.scope = scope
if scope == "remote":
bundle.storage = data.get("bundle-location", "unknown")
bundle.save()
env.bundle = bundle
env.save()
project.history.append(str(env.id))
project.save()
project_content = json.loads(project.summary_json())
project_content["env"] = {"bundle-id":str(bundle.id)}
return cloud_response(201, 'Environment successfully created.', project_content)
except:
print(str(traceback.print_exc()))
return fk.Response('Failure to process. Contact admin if it persists.', status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return fk.Response('No content provided for the creation.', status.HTTP_204_NO_CONTENT)
else:
return fk.redirect('{0}:{1}/error/?code=405'.format(VIEW_HOST, VIEW_PORT))
@app.route(CLOUD_URL + '/private/<hash_session>/env/download/<env_id>', methods=['GET','POST','PUT','UPDATE','DELETE','POST', 'OPTIONS'])
@crossdomain(fk=fk, app=app, origin='*')
def download_env(hash_session, env_id):
logTraffic(CLOUD_URL, endpoint='/private/<hash_session>/env/download/<env_id>')
if fk.request.method == 'GET':
access_resp = access_manager.check_cloud(hash_session, ACC_SEC, CNT_SEC)
current_user = access_resp[1]
try:
env = EnvironmentModel.objects.with_id(env_id)
project = None
if current_user:
for pro in ProjectModel.objects(owner=current_user):
if str(env.id) in pro.history:
project = pro
break
else:
for pro in ProjectModel.objects:
if str(env.id) in pro.history:
project = pro
break
except:
env = None
project = None
print(str(traceback.print_exc()))
return fk.Response('Failure to process. Contact admin if it persists.', status.HTTP_500_INTERNAL_SERVER_ERROR)
if env is None or project is None:
return fk.redirect('{0}:{1}/error/?code=204'.format(VIEW_HOST, VIEW_PORT))
else:
# Envs are free for download.
if current_user:
logAccess(fk, access_resp[1], CLOUD_URL, 'cloud', '/private/<hash_session>/env/download/<env_id>')
prepared = storage_manager.prepare_env(project, env)
if prepared[0] == None:
print("Unable to retrieve a env to download.")
return fk.redirect('{0}:{1}/error/?code=204'.format(VIEW_HOST, VIEW_PORT))
else:
return fk.send_file(prepared[0], as_attachment=True, attachment_filename=prepared[1], mimetype='application/zip')
else:
return fk.redirect('{0}:{1}/error/?code=405'.format(VIEW_HOST, VIEW_PORT))
@app.route(CLOUD_URL + '/private/env/edit/<env_id>', methods=['GET','POST','PUT','UPDATE','DELETE','POST', 'OPTIONS'])
@crossdomain(fk=fk, app=app, origin='*')
def env_edit(env_id):
logTraffic(CLOUD_URL, endpoint='/private/env/edit/<env_id>')
hash_session = basicAuthSession(fk.request)
if fk.request.method == 'POST':
access_resp = access_manager.check_cloud(hash_session, ACC_SEC, CNT_SEC)
current_user = access_resp[1]
if current_user is None:
return fk.Response('Unauthorized action on this environment.', status.HTTP_401_UNAUTHORIZED)
else:
logAccess(fk, access_resp[1], CLOUD_URL, 'cloud', '/private/env/edit/<env_id>')
try:
env = EnvironmentModel.objects.with_id(env_id)
owned = False
for project in ProjectModel.objects(owner=current_user):
if str(env.id) in project.history:
owned = True
break
if not owned:
env = None
except:
print(str(traceback.print_exc()))
if env is None:
return fk.Response('Unable to find this environment.', status.HTTP_404_NOT_FOUND)
else:
if fk.request.data:
data = json.loads(fk.request.data)
try:
group = data.get("group", env.group)
system = data.get("system", env.system)
env.group = group
env.system = system
env.save()
return fk.Response('Environment edited', status.HTTP_200_OK)
except:
print(str(traceback.print_exc()))
return fk.Response('Failure to process. Contact admin if it persists.', status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return fk.Response('No content provided for the update.', status.HTTP_204_NO_CONTENT)
else:
return fk.Response('Endpoint does not support this HTTP method.', status.HTTP_405_METHOD_NOT_ALLOWED)
@app.route(CLOUD_URL + '/public/env/view/<env_id>', methods=['GET','POST','PUT','UPDATE','DELETE','POST', 'OPTIONS'])
@crossdomain(fk=fk, app=app, origin='*')
def public_env_view(env_id):
logTraffic(CLOUD_URL, endpoint='/public/env/view/<env_id>')
if fk.request.method == 'GET':
try:
env = EnvironmentModel.objects.with_id(env_id)
except:
env = None
print(str(traceback.print_exc()))
if env is None:
return fk.Response('Unable to find this environment.', status.HTTP_404_NOT_FOUND)
else:
return fk.Response(env.to_json(), mimetype='application/json')
else:
return fk.Response('Endpoint does not support this HTTP method.', status.HTTP_405_METHOD_NOT_ALLOWED)
|
|
from __future__ import print_function
from __future__ import division
# from pympler import summary
# from pympler import muppy
# from pympler import tracker
import weakref
import progressbar
from progressbar import *
#own files
from h5tools import *
from tools import *
from features import *
from settings import *
from classifier import *
import pylab
import vigra
import os
import colorama
colorama.init()
from termcolor import colored,cprint
from colorama import Fore, Back, Style
import fastfilters
import commentjson as json
import pprint
import h5py
from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor
import threading
import multiprocessing
def train(settings):
featuresList = []
labelsList = []
for trainingInstanceDataDict in settings.trainignInstancesDataDicts():
# remember opend h5Files
openH5Files = []
# get the labels
f, d = trainingInstanceDataDict['labels']['file']
labelsDset = h5py.File(f)[d]
openH5Files.append(f)
# open all dsets where we need to compute feature on
dataH5Dsets , openH5Files = settings.getDataH5Dsets(trainingInstanceDataDict, openH5Files)
# extract the features and the labels
f, l = extractTrainingData(settings=settings, dataH5Dsets=dataH5Dsets,
labelsH5Dset=labelsDset)
featuresList.append(f)
labelsList.append(l)
closeAllH5Files(openH5Files)
features = numpy.concatenate(featuresList,axis=0)
labels = numpy.concatenate(labelsList, axis=0)
# substract 1 from labels
assert labels.min() == 1
labels -=1
trainClassifier(settings, features, labels)
def extractTrainingData(settings, dataH5Dsets, labelsH5Dset):
# check and get shapes
shape = getShape(dataH5Dsets, labelsH5Dset)
featuresList = []
labelsList = []
lock = threading.Lock()
outerFeatureOperatorList, maxHaloList = settings.getFeatureOperators()
numberOfFeatures = 0
for fOps in outerFeatureOperatorList:
for fOp in fOps:
print(fOp,fOp.numberOfFeatures())
numberOfFeatures += fOp.numberOfFeatures()
#print("\n\n\n")
#print("keys",dataH5Dsets.keys())
#print(maxHaloList)
##print(outerFeatureOperatorList[1])
#sys.exit()
lockDict = {
}
for key in dataH5Dsets.keys():
lockDict[key] = threading.Lock()
print("totalshape",shape)
nBlocks = 0
for blockIndex, blockBegin, blockEnd in blockYielder((0,0,0), shape, settings.featureBlockShape):
nBlocks +=1
widgets = ['Training: ', Percentage(), ' ',Counter(),'/',str(nBlocks),'', Bar(marker='0',left='[',right=']'),
' ', ETA()] #see docs for other options
#
bar = progressbar.ProgressBar(maxval=nBlocks,widgets=widgets)
doneBlocks = [0]
bar.start()
def f(blockIndex, blockBegin, blockEnd):
if(settings.useTrainingBlock(blockIndex, blockBegin, blockEnd)):
labels = loadLabelsBlock(labelsH5Dset, blockBegin, blockEnd)
#with lock:
# print("np unique",numpy.unique(labels))
if labels.any():
labels,blockBegin,blockEnd,whereLabels = labelsBoundingBox(labels,blockBegin, blockEnd)
blockShape = [be-bb for be,bb in zip(blockEnd, blockBegin)]
featureArray = numpy.zeros( (blockShape+[numberOfFeatures]), dtype='float32')
fIndex = 0
for featureOperatorList, maxHalo, dataName in zip(outerFeatureOperatorList, maxHaloList, dataH5Dsets.keys()):
#print("dataName",dataName,featureOperatorList)
# the dataset
dset = dataH5Dsets[dataName]
# add halo to block begin and end
gBegin, gEnd ,lBegin, lEnd = addHalo(shape, blockBegin, blockEnd, maxHalo)
slicing = getSlicing(lBegin, lEnd)
with lockDict[dataName]:
# we load the data with the maximum margin
data = loadData(dset, gBegin, gEnd).squeeze()
# compute the features
for featureOp in featureOperatorList:
nf = featureOp.numberOfFeatures()
subFeatureArray = featureArray[:,:,:,fIndex:fIndex+nf]
fIndex += nf
featureOp(data, slicing, subFeatureArray)
labels = labels[whereLabels[0,:],whereLabels[1,:],whereLabels[2,:]]
with lock:
f = featureArray[whereLabels[0,:], whereLabels[1,:], whereLabels[2,:], :]
#print("appending features:",f.shape)
featuresList.append(featureArray[whereLabels[0,:], whereLabels[1,:], whereLabels[2,:], :])
labelsList.append(labels)
with lock:
#print(doneBlocks)
doneBlocks[0] += 1
bar.update(doneBlocks[0])
nWorker = multiprocessing.cpu_count()
#nWorker = 1
forEachBlock(shape=shape, blockShape=settings.featureBlockShape,f=f, nWorker=nWorker)
bar.finish()
features = numpy.concatenate(featuresList,axis=0)
labels = numpy.concatenate(labelsList, axis=0)
print(numpy.bincount(labels))
return features,labels
def trainClassifier(settings, features, labels):
print("train classifier")
setup = settings.settingsDict['setup']
f = setup["classifier"]["training_set"]
if os.path.exists(f):
os.remove(f)
h5file = h5py.File(f,'w')
h5file['features'] = features
h5file['labels'] = labels
h5file.close()
nClasses = labels.max() + 1
clfSetup = setup["classifier"]
clfType = clfSetup["type"]
clfSettings = clfSetup["settings"]
if clfType == "xgb":
clf = XGBClassifier(nClasses=nClasses, **clfSettings)
clf.train(X=features, Y=labels)
# save classifer
clf.save(clfSetup["filename"])
elif clfType == "rf":
clf = RfClassifier(**clfSettings)
clf.train(X=features, Y=labels)
clf.save(clfSetup["filename"])
else:
raise RuntimeError(" %s is a non supported classifer" %clfType)
def loadClassifer(settings):
clfSetup = settings.settingsDict['setup']["classifier"]
clfType = clfSetup["type"]
clfSettings = clfSetup["settings"]
if clfType == "xgb":
nt = nWorker = multiprocessing.cpu_count()
nt = max(1, nt//4)
nt = 1
clf = XGBClassifier(nClasses=settings.numberOfClasses,**clfSettings)
clf.load(clfSetup["filename"], nThreads=5)
return clf
elif clfType == "rf":
clf = RfClassifier(**clfSettings)
clf.load(clfSetup["filename"], nThreads=1)
return clf
else:
raise RuntimeError(" %s is a non supported classifer" %clfType)
class PredictionFunctor(object):
def __init__(self, settings, shape, clf, dataH5Dsets, predictionDset,
predictionDtype, roiBegin, roiEnd):
self.settings = settings
self.shape = shape
self.clf = clf
self.dataH5Dsets = dataH5Dsets
self.predictionDset = predictionDset
self.predictionDtype = predictionDtype
self.roiBegin = roiBegin
self.roiEnd = roiEnd
outerFeatureOperatorList, maxHaloList = settings.getFeatureOperators()
self.outerFeatureOperatorList = outerFeatureOperatorList
self.maxHaloList = maxHaloList
self.lock = threading.Lock()
self.lockDict = {}
for key in dataH5Dsets.keys():
self.lockDict[key] = threading.Lock()
self.numberOfFeatures = 0
for fOps in outerFeatureOperatorList:
for fOp in fOps:
self.numberOfFeatures += fOp.numberOfFeatures()
print("numberOfFeatures", self.numberOfFeatures)
def __call__(self, blockIndex, blockBegin, blockEnd):
blockShape = (
blockEnd[0] - blockBegin[0],
blockEnd[1] - blockBegin[1],
blockEnd[2] - blockBegin[2]
)
with self.lock:
print("alloc")
featureArray = numpy.zeros( (blockShape+(self.numberOfFeatures,)), dtype='float32')
fIndex = 0
for featureOperatorList, maxHalo, dataName in zip(self.outerFeatureOperatorList, self.maxHaloList, self.dataH5Dsets.keys()):
# the dataset
dset = self.dataH5Dsets[dataName]
# add halo to block begin and end
gBegin, gEnd ,lBegin, lEnd = addHalo(self.shape, blockBegin, blockEnd, maxHalo)
slicing = getSlicing(lBegin, lEnd)
# we load the data with the maximum margin
with self.lockDict[dataName]:
data = loadData(dset, gBegin, gEnd).squeeze()
# compute the features
for i,featureOp in enumerate(featureOperatorList):
nf = featureOp.numberOfFeatures()
subFeatureArray = featureArray[:,:,:,fIndex:fIndex+nf]
fIndex += nf
featureOp(data, slicing, subFeatureArray)
#if(i==1 and dataName=='pmap'):
# for x in range(subFeatureArray.shape[3]):
# p = int(subFeatureArray.shape[2]/2)
# fImg = subFeatureArray[:,:,p,x]
# print(data.shape)
# rImg = data[slicing+[slice(0,1)]][:,:,p,0]
# f = pylab.figure()
# f.add_subplot(2, 1, 1)
# pylab.imshow(fImg,cmap='gray')
# f.add_subplot(2, 1, 2)
# pylab.imshow(rImg,cmap='gray')
# pylab.show()
featuresFlat = featureArray.reshape([-1,self.numberOfFeatures])
if self.clf.needsLockedPrediction():
with self.lock:
probsFlat = self.clf.predict(featuresFlat)
probs = probsFlat.reshape(tuple(blockShape)+(settings.numberOfClasses,))
print("mima",probs.min(),probs.max())
if self.predictionDtype == 'uint8':
probs *= 255.0
probs = numpy.round(probs,0).astype('uint8')
self.predictionDset[blockBegin[0]:blockEnd[0],blockBegin[1]:blockEnd[1],blockBegin[2]:blockEnd[2],:] = probs[:,:,:,:]
else:
probsFlat = self.clf.predict(featuresFlat)
probs = probsFlat.reshape(tuple(blockShape)+(settings.numberOfClasses,))
if self.predictionDtype == 'uint8':
probs *= 255.0
probs = numpy.round(probs,0).astype('uint8')
with self.lock:
print("mima",probs.min(),probs.max())
dsetBegin = [bb-rb for bb,rb in zip(blockBegin, self.roiBegin)]
dsetEnd = [be-rb for bb,rb in zip(blockEnd, self.roiBegin)]
self.predictionDset[dsetBegin[0]:dsetEnd[0],dsetBegin[1]:dsetEnd[1],dsetBegin[2]:dsetEnd[2],:] = probs[:,:,:,:]
def predict(settings):
with Timer("load classifier:"):
clf = loadClassifer(settings)
nClasses = settings.numberOfClasses
for predictionInstanceDataDict in settings.predictionInstancesDataDicts():
print("pred data dict",predictionInstanceDataDict)
# remember opend h5Files
openH5Files = []
# open all dsets where we need to compute feature on
dataH5Dsets , openH5Files = settings.getDataH5Dsets(predictionInstanceDataDict, openH5Files)
# get and check shape
shape = getShape(dataH5Dsets)
# allocate output file
roiBegin, roiEnd = predictionInstanceDataDict['prediction'].get('roi',[[0,0,0],shape])
roiShape = [re-rb for re,rb in zip(roiEnd, roiBegin)]
f, d = predictionInstanceDataDict['prediction']['file']
if os.path.exists(f):
os.remove(f)
f = h5py.File(f)
openH5Files.append(f)
pshape = roiShape + [nClasses]
predictionDtype = predictionInstanceDataDict['prediction']['dtype']
chunkShape = tuple([min(s,c) for s,c in zip(pshape[0:3],(100,100,100))]) + (settings.numberOfClasses,)
predictionDset = f.create_dataset(d,shape=pshape, chunks=chunkShape, dtype=predictionDtype)
outerFeatureOperatorList, maxHaloList = settings.getFeatureOperators()
lock = threading.Lock()
lockDict = {
}
for key in dataH5Dsets.keys():
lockDict[key] = threading.Lock()
outerFeatureOperatorList, maxHaloList = settings.getFeatureOperators()
numberOfFeatures = 0
for fOps in outerFeatureOperatorList:
for fOp in fOps:
numberOfFeatures += fOp.numberOfFeatures()
nBlocks = 0
for blockIndex, blockBegin, blockEnd in blockYielder(roiBegin, roiEnd, settings.featureBlockShape):
nBlocks +=1
widgets = ['Prediction: ', Percentage(), ' ',Counter(),'/',str(nBlocks), Bar(marker='0',left='[',right=']'),
' ', ETA()] #see docs for other options
#
bar = progressbar.ProgressBar(maxval=nBlocks,widgets=widgets)
doneBlocks = [0]
bar.start()
def f(blockIndex, blockBegin, blockEnd):
blockShape = (
blockEnd[0] - blockBegin[0],
blockEnd[1] - blockBegin[1],
blockEnd[2] - blockBegin[2]
)
#with lock:
# print("alloc")
featureArray = numpy.zeros( (blockShape+(numberOfFeatures,)), dtype='float32')
fIndex = 0
for featureOperatorList, maxHalo, dataName in zip(outerFeatureOperatorList, maxHaloList, dataH5Dsets.keys()):
# the dataset
dset = dataH5Dsets[dataName]
# add halo to block begin and end
gBegin, gEnd ,lBegin, lEnd = addHalo(shape, blockBegin, blockEnd, maxHalo)
slicing = getSlicing(lBegin, lEnd)
# we load the data with the maximum margin
with lockDict[dataName]:
data = loadData(dset, gBegin, gEnd).squeeze()
# compute the features
for i,featureOp in enumerate(featureOperatorList):
nf = featureOp.numberOfFeatures()
subFeatureArray = featureArray[:,:,:,fIndex:fIndex+nf]
fIndex += nf
featureOp(data, slicing, subFeatureArray)
featuresFlat = featureArray.reshape([-1,numberOfFeatures])
if clf.needsLockedPrediction():
with lock:
doneBlocks[0] += 1
bar.update(doneBlocks[0])
probsFlat = clf.predict(featuresFlat)
probs = probsFlat.reshape(tuple(blockShape)+(settings.numberOfClasses,))
#print("mima",probs.min(),probs.max())
if predictionDtype == 'uint8':
probs *= 255.0
probs = numpy.round(probs,0).astype('uint8')
dsetBegin = [bb-rb for bb,rb in zip(blockBegin, roiBegin)]
dsetEnd = [be-rb for be,rb in zip(blockEnd, roiBegin)]
predictionDset[dsetBegin[0]:dsetEnd[0],dsetBegin[1]:dsetEnd[1],dsetBegin[2]:dsetEnd[2],:] = probs[:,:,:,:]
else:
doneBlocks[0] += 1
bar.update(doneBlocks[0])
probsFlat = clf.predict(featuresFlat)
probs = probsFlat.reshape(tuple(blockShape)+(settings.numberOfClasses,))
if predictionDtype == 'uint8':
probs *= 255.0
probs = numpy.round(probs,0).astype('uint8')
with lock:
#print("mima",probs.min(),probs.max())
dsetBegin = [bb-rb for bb,rb in zip(blockBegin, roiBegin)]
dsetEnd = [be-rb for be,rb in zip(blockEnd, roiBegin)]
predictionDset[dsetBegin[0]:dsetEnd[0],dsetBegin[1]:dsetEnd[1],dsetBegin[2]:dsetEnd[2],:] = probs[:,:,:,:]
nWorker = multiprocessing.cpu_count()
#nWorker = 1
#/=
forEachBlock(shape=shape, roiBegin=roiBegin, roiEnd=roiEnd, blockShape=settings.featureBlockShape,f=f, nWorker=nWorker)
bar.finish()
closeAllH5Files(openH5Files)
def importVars(filename, globalVars = None):
if globalVars is None:
globalVars = dict()
localVars = dict()
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
exec(code, globalVars, localVars)
return localVars
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("mode", type=str,choices=['train','predict'],
help="train or predict")
parser.add_argument('settings', nargs='*', default=os.getcwd())
args = parser.parse_args()
# normaly we use arguments
settingsFile = args.settings[0]
settingsDict = importVars(settingsFile)['settingsDict']
if args.mode=='train':
print("TRAINING:")
settings = Settings(settingsDict)
train(settings=settings)
elif args.mode == 'predict':
print("PREDICTION:")
if len(args.settings) != 2:
parser.error('if mode == predict a valid prediction_settings filename is needed')
predictionSettingsFile = args.settings[1]
predictionSettingsDict = importVars(predictionSettingsFile)['predictionSettingsDict']
settings = Settings(settingsDict, predictionSettingsDict)
predict(settings=settings)
|
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import inspect
import mixins
from xml.etree import ElementTree as ET
def capfirst(s):
return s[0].upper() + s[1:]
_mixin_classes = None
def load_mixin_classes():
return dict(inspect.getmembers(mixins, inspect.isclass))
def get_mixin_classes():
global _mixin_classes
if _mixin_classes is None:
_mixin_classes = load_mixin_classes()
return _mixin_classes
class SpecList(object):
def __init__(self, module_specs=[], custom_code=""):
self.module_specs = module_specs
self.custom_code = custom_code
def write_to_xml(self, fname):
root = ET.Element("specs")
subelt = ET.Element("customCode")
subelt.text = self.custom_code
root.append(subelt)
for spec in self.module_specs:
root.append(spec.to_xml())
tree = ET.ElementTree(root)
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
indent(tree.getroot())
tree.write(fname)
@staticmethod
def read_from_xml(fname):
module_specs = []
custom_code = ""
tree = ET.parse(fname)
for elt in tree.getroot():
if elt.tag == "moduleSpec":
module_specs.append(ModuleSpec.from_xml(elt))
elif elt.tag == "customCode":
custom_code = elt.text
retval = SpecList(module_specs, custom_code)
return retval
class ModuleSpec(object):
attrs = ["name", "superklass", "docstring", "output_type"]
def __init__(self, name, superklass, code_ref, docstring="", port_specs=[],
output_port_specs=[], output_type=None):
self.name = name
self.superklass = superklass
self.code_ref = code_ref
self.docstring = docstring
self.port_specs = port_specs
self.output_port_specs = output_port_specs
self.output_type = output_type
self._mixin_class = None
self._mixin_functions = None
def to_xml(self, elt=None):
if elt is None:
elt = ET.Element("moduleSpec")
elt.set("name", self.name)
elt.set("superclass", self.superklass)
elt.set("code_ref", self.code_ref)
if self.output_type is not None:
elt.set("output_type", self.output_type)
subelt = ET.Element("docstring")
subelt.text = str(self.docstring)
elt.append(subelt)
for port_spec in self.port_specs:
subelt = port_spec.to_xml()
elt.append(subelt)
for port_spec in self.output_port_specs:
subelt = port_spec.to_xml()
elt.append(subelt)
return elt
@classmethod
def from_xml(cls, elt):
name = elt.get("name", "")
superklass = elt.get("superclass", "")
code_ref = elt.get("code_ref", "")
output_type = elt.get("output_type", None)
docstring = ""
port_specs = []
output_port_specs = []
for child in elt.getchildren():
if child.tag == "inputPortSpec":
port_specs.append(InputPortSpec.from_xml(child))
elif child.tag == "outputPortSpec":
output_port_specs.append(OutputPortSpec.from_xml(child))
elif child.tag == "docstring":
if child.text:
docstring = child.text
return cls(name, superklass, code_ref, docstring, port_specs,
output_port_specs, output_type)
def get_returned_output_port_specs(self):
return [ps for ps in self.output_port_specs
if ps.property_key is not None]
def get_input_args(self):
args = [ps for ps in self.port_specs if ps.in_args]
args.sort(key=lambda ps: ps.arg_pos)
if len(args) > 1 and len(args) != (args[-1].arg_pos + 1):
raise ValueError("Argument positions are numbered incorrectly")
return args
def get_output_port_spec(self, compute_name):
for ps in self.output_port_specs:
if ps.compute_name == compute_name:
return ps
return None
def get_mixin_name(self):
return self.name + "Mixin"
def has_mixin(self):
if self._mixin_class is None:
mixin_classes = get_mixin_classes()
if self.get_mixin_name() in mixin_classes:
self._mixin_class = mixin_classes[self.get_mixin_name()]
else:
self._mixin_class = False
return (self._mixin_class is not False)
def get_mixin_function(self, f_name):
if not self.has_mixin():
return None
if self._mixin_functions is None:
self._mixin_functions = \
dict(inspect.getmembers(self._mixin_class, inspect.ismethod))
if f_name in self._mixin_functions:
s = inspect.getsource(self._mixin_functions[f_name])
return s[s.find(':')+1:].strip()
return None
def get_compute_before(self):
return self.get_mixin_function("compute_before")
def get_compute_inner(self):
return self.get_mixin_function("compute_inner")
def get_compute_after(self):
return self.get_mixin_function("compute_after")
def get_init(self):
return self.get_mixin_function("__init__")
class PortSpec(object):
xml_name = "portSpec"
attrs = {"name": "",
"port_type": None,
"docstring": ("", True),
"required": (False, False, True),
"show_port": (False, False, True),
"hide": (False, False, True),
"property_type": "",}
def __init__(self, arg, **kwargs):
self.arg = arg
self.set_defaults(**kwargs)
def set_defaults(self, **kwargs):
for attr, props in self.attrs.iteritems():
if isinstance(props, tuple):
default_val = props[0]
else:
default_val = props
if attr in kwargs:
setattr(self, attr, kwargs[attr])
else:
setattr(self, attr, default_val)
if not self.name:
if self.port_type == "__property__":
self.name = self.arg + "Properties"
else:
self.name = self.arg
def to_xml(self, elt=None):
if elt is None:
elt = ET.Element(self.xml_name)
elt.set("arg", self.arg)
for attr, props in self.attrs.iteritems():
attr_val = getattr(self, attr)
is_subelt = False
if isinstance(props, tuple):
default_val = props[0]
if len(props) > 1:
is_subelt = props[1]
else:
default_val = props
if default_val != attr_val:
if is_subelt:
subelt = ET.Element(attr)
subelt.text = str(getattr(self, attr))
elt.append(subelt)
else:
elt.set(attr, str(attr_val))
return elt
@classmethod
def internal_from_xml(cls, elt, obj=None):
arg = elt.get("arg", "")
if obj is None:
obj = cls(arg)
else:
obj.arg = arg
child_elts = {}
for child in elt.getchildren():
if child.tag not in child_elts:
child_elts[child.tag] = []
child_elts[child.tag].append(child)
kwargs = {}
for attr, props in obj.attrs.iteritems():
is_subelt = False
run_eval = False
if isinstance(props, tuple):
if len(props) > 1:
is_subelt = props[1]
if len(props) > 2:
run_eval = props[2]
attr_vals = []
if is_subelt:
if attr in child_elts:
attr_vals = [c.text for c in child_elts[attr]
if c.text is not None]
if attr == "docstring":
print "()() docstring attr_vals:", attr_vals
else:
attr_val = elt.get(attr)
if attr_val is not None:
attr_vals = [attr_val]
if len(attr_vals) > 1:
raise ValueError('Should have only one value for '
'attribute "%s"' % attr)
if len(attr_vals) > 0:
attr_val = attr_vals[0]
if run_eval:
try:
kwargs[attr] = eval(attr_val)
except (NameError, SyntaxError):
kwargs[attr] = attr_val
else:
kwargs[attr] = attr_val
obj.set_defaults(**kwargs)
return obj, child_elts
@classmethod
def from_xml(cls, elt, obj=None):
obj, child_elts = cls.internal_from_xml(elt, obj)
return obj
@staticmethod
def create_from_xml(elt):
if elt.tag == "inputPortSpec":
return InputPortSpec.from_xml(elt)
elif elt.tag == "outputPortSpec":
return OutputPortSpec.from_xml(elt)
elif elt.tag == "alternateSpec":
return AlternatePortSpec.from_xml(elt)
raise TypeError('Cannot create spec from element of type "%s"' %
elt.tag)
def is_property(self):
return self.port_type == "__property__"
def get_property_type(self):
return "Mpl%sProperties" % \
capfirst(self.property_type.rsplit('.', 1)[1])
def get_port_type(self):
if self.port_type is None:
return "basic:String"
return self.port_type
class InputPortSpec(PortSpec):
xml_name = "inputPortSpec"
attrs = {"entry_types": (None, True, True),
"values": (None, True, True),
"defaults": (None, True, True),
"translations": (None, True, True),
"in_kwargs": (True, False, True),
"in_args": (False, False, True),
"constructor_arg": (False, False, True),
"not_setp": (False, False, True),
"arg_pos": (-1, False, True),
}
attrs.update(PortSpec.attrs)
def __init__(self, arg, **kwargs):
if "alternate_specs" in kwargs and kwargs["alternate_specs"]:
self.alternate_specs = kwargs.pop("alternate_specs")
else:
self.alternate_specs = []
PortSpec.__init__(self, arg, **kwargs)
for spec in self.alternate_specs:
spec.set_parent(self)
def to_xml(self, elt=None):
elt = PortSpec.to_xml(self, elt)
for spec in self.alternate_specs:
# write the spec
subelt = spec.to_xml()
elt.append(subelt)
return elt
@classmethod
def from_xml(cls, elt, obj=None):
obj, child_elts = cls.internal_from_xml(elt, obj)
if "alternateSpec" in child_elts:
for child_elt in child_elts["alternateSpec"]:
spec = AlternatePortSpec.from_xml(child_elt)
spec.set_parent(obj)
obj.alternate_specs.append(spec)
return obj
def get_port_attr_dict(self):
attrs = {}
if self.values:
attrs["values"] = str(self.values)
if self.entry_types:
attrs["entry_types"] = str(self.entry_types)
if self.defaults:
attrs["defaults"] = str(self.defaults)
if self.docstring:
attrs["docstring"] = self.docstring
if not self.required and not self.show_port:
attrs["optional"] = True
return attrs
def get_port_attrs(self):
return str(self.get_port_attr_dict())
def has_alternate_versions(self):
return len(self.alternate_specs) > 0
class AlternatePortSpec(InputPortSpec):
xml_name = "alternateSpec"
def __init__(self, *args, **kwargs):
if len(args) < 1:
args = [""]
InputPortSpec.__init__(self, *args, **kwargs)
self._parent = None
def set_parent(self, parent):
self._parent = parent
if not self.name:
if self._parent.name.endswith("Sequence"):
base_name = self._parent.name[:-8]
elif self._parent.name.endswith("Scalar"):
base_name = self._parent.name[:-6]
else:
base_name = self._parent.name
if self.port_type == "basic:List":
self.name = base_name + "Sequence"
else:
self.name = base_name + "Scalar"
self.arg = self._parent.arg
def get_port_attr_dict(self):
print "CALLING AlternatePortSpec.get_port_attr_dict", self.arg
my_attrs = InputPortSpec.get_port_attr_dict(self)
print "=> my_attrs:", my_attrs
par_attrs = self._parent.get_port_attr_dict()
print "=> par_attrs:", par_attrs
for k, v in par_attrs.iteritems():
if k == 'defaults' or k == "values" or k == "entry_types" or \
k == "translations":
continue
if k not in my_attrs or my_attrs[k] is None:
my_attrs[k] = v
print my_attrs
return my_attrs
class OutputPortSpec(PortSpec):
xml_name = "outputPortSpec"
attrs = {"compute_name": "",
"property_key": None,
"plural": (False, False, True),
"compute_parent": "",
}
attrs.update(PortSpec.attrs)
def set_defaults(self, **kwargs):
PortSpec.set_defaults(self, **kwargs)
if self.compute_name == "":
if self.plural and self.is_property():
self.compute_name = self.arg + 's'
else:
self.compute_name = self.arg
@classmethod
def from_xml(cls, elt, obj=None):
obj, child_elts = cls.internal_from_xml(elt, obj)
output_type = elt.get("output_type")
if output_type is not None:
obj.port_type = output_type
return obj
def get_port_attrs(self):
attrs = {}
if self.docstring:
attrs["docstring"] = self.docstring
return str(attrs)
# class OutputPortSpec(object):
# attrs = ["name", "compute_name", "output_type", "docstring",
# "property_type", "property_key", "plural", "compute_parent"]
# def __init__(self, arg, name, compute_name, output_type, docstring="",
# property_type="", property_key=None, plural=False,
# compute_parent=""):
# self.arg = arg
# self.name = name
# self.compute_name = compute_name
# self.output_type = output_type
# self.docstring = docstring
# self.property_type = property_type
# self.property_key = property_key
# self.plural = plural
# self.compute_parent = compute_parent
# self._property_name = None
# def to_xml(self, elt=None):
# if elt is None:
# elt = ET.Element("outputPortSpec")
# elt.set("arg", self.arg)
# elt.set("name", self.name)
# elt.set("compute_name", self.compute_name)
# if self.output_type is not None:
# elt.set("output_type", self.output_type)
# else:
# elt.set("output_type", "__unknown__")
# elt.set("property_type", self.property_type)
# if self.property_key is None:
# elt.set("property_key", "__none__")
# else:
# elt.set("property_key", str(self.property_key))
# elt.set("plural", str(self.plural))
# elt.set("compute_parent", self.compute_parent)
# subelt = ET.Element("docstring")
# subelt.text = str(self.docstring)
# elt.append(subelt)
# return elt
# @classmethod
# def from_xml(cls, elt):
# arg = elt.get("arg", "")
# output_type = elt.get("output_type", "")
# if output_type == "__unknown__":
# output_type = None
# plural = eval(elt.get("plural", "False"))
# if output_type.lower() == "__property__":
# name = elt.get("name", arg + "Properties")
# compute_name = elt.get("compute_name", arg +
# ("s" if plural else ""))
# else:
# name = elt.get("name", arg)
# compute_name = elt.get("name", arg)
# property_type = elt.get("property_type", "")
# property_key = elt.get("property_key", None)
# if property_key is not None:
# if property_key == "__none__":
# property_key = None
# else:
# try:
# property_key = int(property_key)
# except ValueError:
# pass
# compute_parent = elt.get("compute_parent", "")
# docstring = ""
# for child in elt.getchildren():
# if child.tag == "docstring" and child.text:
# docstring = child.text
# return cls(arg, name, compute_name, output_type, docstring,
# property_type, property_key, plural, compute_parent)
# def is_property_output(self):
# return self.output_type.lower() == "__property__"
# def get_property_type(self):
# return "Mpl%sProperties" % \
# capfirst(self.property_type.rsplit('.', 1)[1])
# def get_port_type(self):
# if self.output_type is None:
# return "basic:String"
# return self.output_type
# class InputPortSpec(PortSpec):
# def __init__(self, arg="", name="", port_type=None, docstring="",
# required=False, show_port=False, hide=False, property_type="",
# entry_types=None, values=None, defaults=None,
# translations=None, alternate_specs=None, in_kwargs=True,
# in_args=False, constructor_arg=False):
# PortSpec.__init__(self, arg, name, port_type, docstring, required,
# show_port, hide, property_type)
# self.entry_types = entry_types
# self.values = values
# self.defaults = defaults
# self.translations = translations
# self.in_kwargs = in_kwargs
# self.in_args = in_args
# self.constructor_arg = constructor_arg
# if alternate_specs is None:
# self.alternate_specs = []
# else:
# self.alternate_specs = alternate_specs
# for spec in self.alternate_specs:
# spec.set_parent(self)
# def to_xml(self, elt=None):
# if elt is None:
# elt = ET.Element("inputPortSpec")
# PortSpec.to_xml(self, elt)
# elt.set("in_kwargs", str(self.in_kwargs))
# elt.set("in_args", str(self.in_args))
# elt.set("constructor_arg", str(self.constructor_arg))
# if self.entry_types is not None:
# subelt = ET.Element("entry_types")
# subelt.text = str(self.entry_types)
# elt.append(subelt)
# if self.values is not None:
# subelt = ET.Element("values")
# subelt.text = str(self.values)
# elt.append(subelt)
# if self.translations is not None:
# subelt = ET.Element("translations")
# subelt.text = str(self.translations)
# elt.append(subelt)
# if self.defaults is not None:
# subelt = ET.Element("defaults")
# subelt.text = str(self.defaults)
# elt.append(subelt)
# for spec in self.alternate_specs:
# # print "FOUND ALT:", spec.name, spec.alternate_specs, spec
# subelt = ET.Element("alternateSpec")
# spec.to_xml(subelt)
# elt.append(subelt)
# # if self.entry_types is not None and self.values is not None and \
# # self.defaults is not None and self.translations is not None:
# # for entry_type, value, default, translation in \
# # izip(self.entry_types, self.values, self.defaults,
# # self.translations):
# # subelt = ET.Element("entry")
# # subelt.set("type", str(entry_type))
# # valueselt = ET.Element("values")
# # valueselt.text = str(value)
# # subelt.append(valueselt)
# # transelt = ET.Element("translation")
# # transelt.text = str(translation)
# # subelt.append(transelt)
# # defaultelt = ET.Element("default")
# # if isinstance(default, basestring):
# # defaultelt.text = "'%s'" % default
# # else:
# # defaultelt.text = str(default)
# # subelt.append(defaultelt)
# # elt.append(subelt)
# docelt = ET.Element("docstring")
# docelt.text = self.docstring
# elt.append(docelt)
# return elt
# @classmethod
# def from_xml(cls, elt):
# arg = elt.get("arg", "")
# port_type = elt.get("port_type", "")
# if port_type == "__unknown__":
# port_type = None
# required = eval(elt.get("required", "False"))
# hide = eval(elt.get("hide", "False"))
# in_kwargs = eval(elt.get("in_kwargs", "True"))
# property_type = elt.get("property_type", "")
# constructor_arg = eval(elt.get("constructor_arg", "False"))
# if port_type is not None and port_type.lower() == "__property__":
# name = elt.get("name", arg + "Properties")
# else:
# name = elt.get("name", arg)
# entry_types = None
# values = None
# defaults = None
# translations = None
# docstring = ""
# alternate_specs = []
# for child in elt.getchildren():
# if child.tag == "entry_types":
# entry_types = eval(child.text)
# elif child.tag == "values":
# try:
# values = eval(child.text)
# except SyntaxError:
# values = [[child.text[2:-2]]]
# elif child.tag == "translations":
# try:
# translations = eval(child.text)
# except NameError:
# translations = child.text
# elif child.tag == "defaults":
# if child.text:
# defaults = eval(child.text)
# elif child.tag == "docstring":
# if child.text:
# docstring = child.text
# elif child.tag == "alternateSpec":
# alternate_specs.append(AlternatePortSpec.from_xml(child))
# # if child.tag == "entry":
# # if entry_types is None:
# # entry_types = []
# # values = []
# # defaults = []
# # translations = []
# # entry_types.append(child.get("type", None))
# # for subchild in child.getchildren():
# # if subchild.tag == "values":
# # values.append(eval(subchild.text))
# # elif subchild.tag == "translation":
# # try:
# # translation = eval(subchild.text)
# # except NameError:
# # translation = subchild.text
# # translations.append(translation)
# # elif subchild.tag == "default":
# # defaults.append(eval(subchild.text))
# # elif child.tag == "docstring":
# # docstring = child.text
# return cls(arg, name, port_type, docstring, required, hide,
# entry_types, values, defaults, translations,
# alternate_specs, in_kwargs, property_type, constructor_arg)
# # def has_scalar_version(self):
# # return self.scalar_type and self.scalar_type != self.port_type
# # def get_scalar_name(self):
# # return self.name + "Scalar"
# # def has_sequence_version(self):
# # return self.sequence_type and self.sequence_type != self.port_type
# # def get_sequence_name(self):
# # return self.name + "Sequence"
# # def has_other_version(self):
# # return self.has_scalar_version() or self.has_sequence_version()
# # def get_other_name(self):
# # if self.has_scalar_version():
# # return self.get_scalar_name()
# # elif self.has_sequence_version():
# # return self.get_sequence_name()
# # return None
# # def get_other_type(self):
# # if self.has_scalar_version():
# # return self.scalar_type
# # elif self.has_sequence_version():
# # return self.sequence_type
# # return None
def run():
specs = SpecList.read_from_xml("mpl_plots_raw.xml")
specs.write_to_xml("mpl_plots_raw_out.xml")
if __name__ == '__main__':
run()
|
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from .k8s_io_api_core_v1_affinity import K8sIoApiCoreV1Affinity
from .k8s_io_api_core_v1_downward_api_volume_file import K8sIoApiCoreV1DownwardAPIVolumeFile
from .k8s_io_api_core_v1_exec_action import K8sIoApiCoreV1ExecAction
from .k8s_io_api_core_v1_http_get_action import K8sIoApiCoreV1HTTPGetAction
from .k8s_io_api_core_v1_http_header import K8sIoApiCoreV1HTTPHeader
from .k8s_io_api_core_v1_local_object_reference import K8sIoApiCoreV1LocalObjectReference
from .k8s_io_api_core_v1_node_affinity import K8sIoApiCoreV1NodeAffinity
from .k8s_io_api_core_v1_node_selector import K8sIoApiCoreV1NodeSelector
from .k8s_io_api_core_v1_node_selector_requirement import K8sIoApiCoreV1NodeSelectorRequirement
from .k8s_io_api_core_v1_node_selector_term import K8sIoApiCoreV1NodeSelectorTerm
from .k8s_io_api_core_v1_object_field_selector import K8sIoApiCoreV1ObjectFieldSelector
from .k8s_io_api_core_v1_persistent_volume_claim_spec import K8sIoApiCoreV1PersistentVolumeClaimSpec
from .k8s_io_api_core_v1_persistent_volume_claim_volume_source import K8sIoApiCoreV1PersistentVolumeClaimVolumeSource
from .k8s_io_api_core_v1_pod_affinity import K8sIoApiCoreV1PodAffinity
from .k8s_io_api_core_v1_pod_affinity_term import K8sIoApiCoreV1PodAffinityTerm
from .k8s_io_api_core_v1_pod_anti_affinity import K8sIoApiCoreV1PodAntiAffinity
from .k8s_io_api_core_v1_pod_dns_config import K8sIoApiCoreV1PodDNSConfig
from .k8s_io_api_core_v1_pod_dns_config_option import K8sIoApiCoreV1PodDNSConfigOption
from .k8s_io_api_core_v1_preferred_scheduling_term import K8sIoApiCoreV1PreferredSchedulingTerm
from .k8s_io_api_core_v1_resource_field_selector import K8sIoApiCoreV1ResourceFieldSelector
from .k8s_io_api_core_v1_resource_requirements import K8sIoApiCoreV1ResourceRequirements
from .k8s_io_api_core_v1_tcp_socket_action import K8sIoApiCoreV1TCPSocketAction
from .k8s_io_api_core_v1_toleration import K8sIoApiCoreV1Toleration
from .k8s_io_api_core_v1_typed_local_object_reference import K8sIoApiCoreV1TypedLocalObjectReference
from .k8s_io_api_core_v1_weighted_pod_affinity_term import K8sIoApiCoreV1WeightedPodAffinityTerm
from .k8s_io_apimachinery_pkg_api_resource_quantity import K8sIoApimachineryPkgApiResourceQuantity
from .k8s_io_apimachinery_pkg_apis_meta_v1_api_group import K8sIoApimachineryPkgApisMetaV1APIGroup
from .k8s_io_apimachinery_pkg_apis_meta_v1_api_group_list import K8sIoApimachineryPkgApisMetaV1APIGroupList
from .k8s_io_apimachinery_pkg_apis_meta_v1_api_resource import K8sIoApimachineryPkgApisMetaV1APIResource
from .k8s_io_apimachinery_pkg_apis_meta_v1_api_resource_list import K8sIoApimachineryPkgApisMetaV1APIResourceList
from .k8s_io_apimachinery_pkg_apis_meta_v1_delete_options import K8sIoApimachineryPkgApisMetaV1DeleteOptions
from .k8s_io_apimachinery_pkg_apis_meta_v1_duration import K8sIoApimachineryPkgApisMetaV1Duration
from .k8s_io_apimachinery_pkg_apis_meta_v1_fields_v1 import K8sIoApimachineryPkgApisMetaV1FieldsV1
from .k8s_io_apimachinery_pkg_apis_meta_v1_group_version_for_discovery import K8sIoApimachineryPkgApisMetaV1GroupVersionForDiscovery
from .k8s_io_apimachinery_pkg_apis_meta_v1_label_selector import K8sIoApimachineryPkgApisMetaV1LabelSelector
from .k8s_io_apimachinery_pkg_apis_meta_v1_label_selector_requirement import K8sIoApimachineryPkgApisMetaV1LabelSelectorRequirement
from .k8s_io_apimachinery_pkg_apis_meta_v1_list_meta import K8sIoApimachineryPkgApisMetaV1ListMeta
from .k8s_io_apimachinery_pkg_apis_meta_v1_managed_fields_entry import K8sIoApimachineryPkgApisMetaV1ManagedFieldsEntry
from .k8s_io_apimachinery_pkg_apis_meta_v1_object_meta import K8sIoApimachineryPkgApisMetaV1ObjectMeta
from .k8s_io_apimachinery_pkg_apis_meta_v1_owner_reference import K8sIoApimachineryPkgApisMetaV1OwnerReference
from .k8s_io_apimachinery_pkg_apis_meta_v1_patch import K8sIoApimachineryPkgApisMetaV1Patch
from .k8s_io_apimachinery_pkg_apis_meta_v1_preconditions import K8sIoApimachineryPkgApisMetaV1Preconditions
from .k8s_io_apimachinery_pkg_apis_meta_v1_root_paths import K8sIoApimachineryPkgApisMetaV1RootPaths
from .k8s_io_apimachinery_pkg_apis_meta_v1_server_address_by_client_cidr import K8sIoApimachineryPkgApisMetaV1ServerAddressByClientCIDR
from .k8s_io_apimachinery_pkg_apis_meta_v1_status import K8sIoApimachineryPkgApisMetaV1Status
from .k8s_io_apimachinery_pkg_apis_meta_v1_status_cause import K8sIoApimachineryPkgApisMetaV1StatusCause
from .k8s_io_apimachinery_pkg_apis_meta_v1_status_details import K8sIoApimachineryPkgApisMetaV1StatusDetails
from .k8s_io_apimachinery_pkg_apis_meta_v1_time import K8sIoApimachineryPkgApisMetaV1Time
from .k8s_io_apimachinery_pkg_apis_meta_v1_watch_event import K8sIoApimachineryPkgApisMetaV1WatchEvent
from .k8s_io_apimachinery_pkg_runtime_raw_extension import K8sIoApimachineryPkgRuntimeRawExtension
from .k8s_io_apimachinery_pkg_util_intstr_int_or_string import K8sIoApimachineryPkgUtilIntstrIntOrString
from .v1_access_credential import V1AccessCredential
from .v1_access_credential_secret_source import V1AccessCredentialSecretSource
from .v1_add_volume_options import V1AddVolumeOptions
from .v1_bios import V1BIOS
from .v1_block_size import V1BlockSize
from .v1_bootloader import V1Bootloader
from .v1_cd_rom_target import V1CDRomTarget
from .v1_cpu import V1CPU
from .v1_cpu_feature import V1CPUFeature
from .v1_cert_config import V1CertConfig
from .v1_chassis import V1Chassis
from .v1_client_passthrough_devices import V1ClientPassthroughDevices
from .v1_clock import V1Clock
from .v1_clock_offset_utc import V1ClockOffsetUTC
from .v1_cloud_init_config_drive_source import V1CloudInitConfigDriveSource
from .v1_cloud_init_no_cloud_source import V1CloudInitNoCloudSource
from .v1_component_config import V1ComponentConfig
from .v1_config_drive_ssh_public_key_access_credential_propagation import V1ConfigDriveSSHPublicKeyAccessCredentialPropagation
from .v1_config_map_volume_source import V1ConfigMapVolumeSource
from .v1_container_disk_source import V1ContainerDiskSource
from .v1_custom_block_size import V1CustomBlockSize
from .v1_customize_components import V1CustomizeComponents
from .v1_customize_components_patch import V1CustomizeComponentsPatch
from .v1_dhcp_options import V1DHCPOptions
from .v1_dhcp_private_options import V1DHCPPrivateOptions
from .v1_data_volume_source import V1DataVolumeSource
from .v1_data_volume_template_dummy_status import V1DataVolumeTemplateDummyStatus
from .v1_data_volume_template_spec import V1DataVolumeTemplateSpec
from .v1_developer_configuration import V1DeveloperConfiguration
from .v1_devices import V1Devices
from .v1_disk import V1Disk
from .v1_disk_target import V1DiskTarget
from .v1_disk_verification import V1DiskVerification
from .v1_domain_spec import V1DomainSpec
from .v1_downward_api_volume_source import V1DownwardAPIVolumeSource
from .v1_downward_metrics_volume_source import V1DownwardMetricsVolumeSource
from .v1_efi import V1EFI
from .v1_empty_disk_source import V1EmptyDiskSource
from .v1_ephemeral_volume_source import V1EphemeralVolumeSource
from .v1_feature_apic import V1FeatureAPIC
from .v1_feature_hyperv import V1FeatureHyperv
from .v1_feature_kvm import V1FeatureKVM
from .v1_feature_spinlocks import V1FeatureSpinlocks
from .v1_feature_state import V1FeatureState
from .v1_feature_vendor_id import V1FeatureVendorID
from .v1_features import V1Features
from .v1_filesystem import V1Filesystem
from .v1_filesystem_virtiofs import V1FilesystemVirtiofs
from .v1_firmware import V1Firmware
from .v1_flags import V1Flags
from .v1_flavor_matcher import V1FlavorMatcher
from .v1_freeze_unfreeze_timeout import V1FreezeUnfreezeTimeout
from .v1_gpu import V1GPU
from .v1_generation_status import V1GenerationStatus
from .v1_guest_agent_command_info import V1GuestAgentCommandInfo
from .v1_guest_agent_ping import V1GuestAgentPing
from .v1_hpet_timer import V1HPETTimer
from .v1_host_device import V1HostDevice
from .v1_host_disk import V1HostDisk
from .v1_hotplug_volume_source import V1HotplugVolumeSource
from .v1_hotplug_volume_status import V1HotplugVolumeStatus
from .v1_hugepages import V1Hugepages
from .v1_hyperv_timer import V1HypervTimer
from .v1_i6300_esb_watchdog import V1I6300ESBWatchdog
from .v1_input import V1Input
from .v1_interface import V1Interface
from .v1_interface_bridge import V1InterfaceBridge
from .v1_interface_macvtap import V1InterfaceMacvtap
from .v1_interface_masquerade import V1InterfaceMasquerade
from .v1_interface_sriov import V1InterfaceSRIOV
from .v1_interface_slirp import V1InterfaceSlirp
from .v1_kvm_timer import V1KVMTimer
from .v1_kernel_boot import V1KernelBoot
from .v1_kernel_boot_container import V1KernelBootContainer
from .v1_kube_virt import V1KubeVirt
from .v1_kube_virt_certificate_rotate_strategy import V1KubeVirtCertificateRotateStrategy
from .v1_kube_virt_condition import V1KubeVirtCondition
from .v1_kube_virt_configuration import V1KubeVirtConfiguration
from .v1_kube_virt_list import V1KubeVirtList
from .v1_kube_virt_self_sign_configuration import V1KubeVirtSelfSignConfiguration
from .v1_kube_virt_spec import V1KubeVirtSpec
from .v1_kube_virt_status import V1KubeVirtStatus
from .v1_kube_virt_workload_update_strategy import V1KubeVirtWorkloadUpdateStrategy
from .v1_launch_security import V1LaunchSecurity
from .v1_log_verbosity import V1LogVerbosity
from .v1_lun_target import V1LunTarget
from .v1_machine import V1Machine
from .v1_mediated_devices_configuration import V1MediatedDevicesConfiguration
from .v1_mediated_host_device import V1MediatedHostDevice
from .v1_memory import V1Memory
from .v1_migrate_options import V1MigrateOptions
from .v1_migration_configuration import V1MigrationConfiguration
from .v1_multus_network import V1MultusNetwork
from .v1_numa import V1NUMA
from .v1_numa_guest_mapping_passthrough import V1NUMAGuestMappingPassthrough
from .v1_network import V1Network
from .v1_network_configuration import V1NetworkConfiguration
from .v1_node_mediated_device_types_config import V1NodeMediatedDeviceTypesConfig
from .v1_node_placement import V1NodePlacement
from .v1_pit_timer import V1PITTimer
from .v1_pause_options import V1PauseOptions
from .v1_pci_host_device import V1PciHostDevice
from .v1_permitted_host_devices import V1PermittedHostDevices
from .v1_persistent_volume_claim_info import V1PersistentVolumeClaimInfo
from .v1_persistent_volume_claim_volume_source import V1PersistentVolumeClaimVolumeSource
from .v1_pod_network import V1PodNetwork
from .v1_port import V1Port
from .v1_probe import V1Probe
from .v1_qemu_guest_agent_ssh_public_key_access_credential_propagation import V1QemuGuestAgentSSHPublicKeyAccessCredentialPropagation
from .v1_qemu_guest_agent_user_password_access_credential_propagation import V1QemuGuestAgentUserPasswordAccessCredentialPropagation
from .v1_rest_client_configuration import V1RESTClientConfiguration
from .v1_rtc_timer import V1RTCTimer
from .v1_rate_limiter import V1RateLimiter
from .v1_realtime import V1Realtime
from .v1_reloadable_component_configuration import V1ReloadableComponentConfiguration
from .v1_remove_volume_options import V1RemoveVolumeOptions
from .v1_resource_requirements import V1ResourceRequirements
from .v1_restart_options import V1RestartOptions
from .v1_rng import V1Rng
from .v1_sev import V1SEV
from .v1_sm_bios_configuration import V1SMBiosConfiguration
from .v1_ssh_public_key_access_credential import V1SSHPublicKeyAccessCredential
from .v1_ssh_public_key_access_credential_propagation_method import V1SSHPublicKeyAccessCredentialPropagationMethod
from .v1_ssh_public_key_access_credential_source import V1SSHPublicKeyAccessCredentialSource
from .v1_secret_volume_source import V1SecretVolumeSource
from .v1_service_account_volume_source import V1ServiceAccountVolumeSource
from .v1_sound_device import V1SoundDevice
from .v1_start_options import V1StartOptions
from .v1_stop_options import V1StopOptions
from .v1_sy_nic_timer import V1SyNICTimer
from .v1_sysprep_source import V1SysprepSource
from .v1_timer import V1Timer
from .v1_token_bucket_rate_limiter import V1TokenBucketRateLimiter
from .v1_topology_hints import V1TopologyHints
from .v1_unpause_options import V1UnpauseOptions
from .v1_user_password_access_credential import V1UserPasswordAccessCredential
from .v1_user_password_access_credential_propagation_method import V1UserPasswordAccessCredentialPropagationMethod
from .v1_user_password_access_credential_source import V1UserPasswordAccessCredentialSource
from .v1_vgpu_display_options import V1VGPUDisplayOptions
from .v1_vgpu_options import V1VGPUOptions
from .v1_virtual_machine import V1VirtualMachine
from .v1_virtual_machine_condition import V1VirtualMachineCondition
from .v1_virtual_machine_instance import V1VirtualMachineInstance
from .v1_virtual_machine_instance_condition import V1VirtualMachineInstanceCondition
from .v1_virtual_machine_instance_file_system import V1VirtualMachineInstanceFileSystem
from .v1_virtual_machine_instance_file_system_info import V1VirtualMachineInstanceFileSystemInfo
from .v1_virtual_machine_instance_file_system_list import V1VirtualMachineInstanceFileSystemList
from .v1_virtual_machine_instance_guest_agent_info import V1VirtualMachineInstanceGuestAgentInfo
from .v1_virtual_machine_instance_guest_os_info import V1VirtualMachineInstanceGuestOSInfo
from .v1_virtual_machine_instance_guest_os_user import V1VirtualMachineInstanceGuestOSUser
from .v1_virtual_machine_instance_guest_os_user_list import V1VirtualMachineInstanceGuestOSUserList
from .v1_virtual_machine_instance_list import V1VirtualMachineInstanceList
from .v1_virtual_machine_instance_migration import V1VirtualMachineInstanceMigration
from .v1_virtual_machine_instance_migration_condition import V1VirtualMachineInstanceMigrationCondition
from .v1_virtual_machine_instance_migration_list import V1VirtualMachineInstanceMigrationList
from .v1_virtual_machine_instance_migration_spec import V1VirtualMachineInstanceMigrationSpec
from .v1_virtual_machine_instance_migration_state import V1VirtualMachineInstanceMigrationState
from .v1_virtual_machine_instance_migration_status import V1VirtualMachineInstanceMigrationStatus
from .v1_virtual_machine_instance_network_interface import V1VirtualMachineInstanceNetworkInterface
from .v1_virtual_machine_instance_phase_transition_timestamp import V1VirtualMachineInstancePhaseTransitionTimestamp
from .v1_virtual_machine_instance_preset import V1VirtualMachineInstancePreset
from .v1_virtual_machine_instance_preset_list import V1VirtualMachineInstancePresetList
from .v1_virtual_machine_instance_preset_spec import V1VirtualMachineInstancePresetSpec
from .v1_virtual_machine_instance_replica_set import V1VirtualMachineInstanceReplicaSet
from .v1_virtual_machine_instance_replica_set_condition import V1VirtualMachineInstanceReplicaSetCondition
from .v1_virtual_machine_instance_replica_set_list import V1VirtualMachineInstanceReplicaSetList
from .v1_virtual_machine_instance_replica_set_spec import V1VirtualMachineInstanceReplicaSetSpec
from .v1_virtual_machine_instance_replica_set_status import V1VirtualMachineInstanceReplicaSetStatus
from .v1_virtual_machine_instance_spec import V1VirtualMachineInstanceSpec
from .v1_virtual_machine_instance_status import V1VirtualMachineInstanceStatus
from .v1_virtual_machine_instance_template_spec import V1VirtualMachineInstanceTemplateSpec
from .v1_virtual_machine_list import V1VirtualMachineList
from .v1_virtual_machine_spec import V1VirtualMachineSpec
from .v1_virtual_machine_start_failure import V1VirtualMachineStartFailure
from .v1_virtual_machine_state_change_request import V1VirtualMachineStateChangeRequest
from .v1_virtual_machine_status import V1VirtualMachineStatus
from .v1_virtual_machine_volume_request import V1VirtualMachineVolumeRequest
from .v1_volume import V1Volume
from .v1_volume_snapshot_status import V1VolumeSnapshotStatus
from .v1_volume_status import V1VolumeStatus
from .v1_watchdog import V1Watchdog
from .v1alpha1_condition import V1alpha1Condition
from .v1alpha1_error import V1alpha1Error
from .v1alpha1_migration_policy import V1alpha1MigrationPolicy
from .v1alpha1_migration_policy_list import V1alpha1MigrationPolicyList
from .v1alpha1_migration_policy_spec import V1alpha1MigrationPolicySpec
from .v1alpha1_migration_policy_status import V1alpha1MigrationPolicyStatus
from .v1alpha1_persistent_volume_claim import V1alpha1PersistentVolumeClaim
from .v1alpha1_selectors import V1alpha1Selectors
from .v1alpha1_source_spec import V1alpha1SourceSpec
from .v1alpha1_virtual_machine_cluster_flavor import V1alpha1VirtualMachineClusterFlavor
from .v1alpha1_virtual_machine_cluster_flavor_list import V1alpha1VirtualMachineClusterFlavorList
from .v1alpha1_virtual_machine_flavor import V1alpha1VirtualMachineFlavor
from .v1alpha1_virtual_machine_flavor_list import V1alpha1VirtualMachineFlavorList
from .v1alpha1_virtual_machine_flavor_profile import V1alpha1VirtualMachineFlavorProfile
from .v1alpha1_virtual_machine_pool import V1alpha1VirtualMachinePool
from .v1alpha1_virtual_machine_pool_condition import V1alpha1VirtualMachinePoolCondition
from .v1alpha1_virtual_machine_pool_list import V1alpha1VirtualMachinePoolList
from .v1alpha1_virtual_machine_pool_spec import V1alpha1VirtualMachinePoolSpec
from .v1alpha1_virtual_machine_pool_status import V1alpha1VirtualMachinePoolStatus
from .v1alpha1_virtual_machine_restore import V1alpha1VirtualMachineRestore
from .v1alpha1_virtual_machine_restore_list import V1alpha1VirtualMachineRestoreList
from .v1alpha1_virtual_machine_restore_spec import V1alpha1VirtualMachineRestoreSpec
from .v1alpha1_virtual_machine_restore_status import V1alpha1VirtualMachineRestoreStatus
from .v1alpha1_virtual_machine_snapshot import V1alpha1VirtualMachineSnapshot
from .v1alpha1_virtual_machine_snapshot_content import V1alpha1VirtualMachineSnapshotContent
from .v1alpha1_virtual_machine_snapshot_content_list import V1alpha1VirtualMachineSnapshotContentList
from .v1alpha1_virtual_machine_snapshot_content_spec import V1alpha1VirtualMachineSnapshotContentSpec
from .v1alpha1_virtual_machine_snapshot_content_status import V1alpha1VirtualMachineSnapshotContentStatus
from .v1alpha1_virtual_machine_snapshot_list import V1alpha1VirtualMachineSnapshotList
from .v1alpha1_virtual_machine_snapshot_spec import V1alpha1VirtualMachineSnapshotSpec
from .v1alpha1_virtual_machine_snapshot_status import V1alpha1VirtualMachineSnapshotStatus
from .v1alpha1_virtual_machine_template_spec import V1alpha1VirtualMachineTemplateSpec
from .v1alpha1_volume_backup import V1alpha1VolumeBackup
from .v1alpha1_volume_restore import V1alpha1VolumeRestore
from .v1alpha1_volume_snapshot_status import V1alpha1VolumeSnapshotStatus
from .v1beta1_data_volume_blank_image import V1beta1DataVolumeBlankImage
from .v1beta1_data_volume_checkpoint import V1beta1DataVolumeCheckpoint
from .v1beta1_data_volume_source import V1beta1DataVolumeSource
from .v1beta1_data_volume_source_http import V1beta1DataVolumeSourceHTTP
from .v1beta1_data_volume_source_image_io import V1beta1DataVolumeSourceImageIO
from .v1beta1_data_volume_source_pvc import V1beta1DataVolumeSourcePVC
from .v1beta1_data_volume_source_ref import V1beta1DataVolumeSourceRef
from .v1beta1_data_volume_source_registry import V1beta1DataVolumeSourceRegistry
from .v1beta1_data_volume_source_s3 import V1beta1DataVolumeSourceS3
from .v1beta1_data_volume_source_upload import V1beta1DataVolumeSourceUpload
from .v1beta1_data_volume_source_vddk import V1beta1DataVolumeSourceVDDK
from .v1beta1_data_volume_spec import V1beta1DataVolumeSpec
from .v1beta1_storage_spec import V1beta1StorageSpec
from .v1_interface_bridge import V1InterfaceBridge
from .v1_interface_slirp import V1InterfaceSlirp
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The security groups extension."""
from oslo_log import log as logging
from oslo_serialization import jsonutils
from webob import exc
from nova.api.openstack.api_version_request \
import MAX_PROXY_API_SUPPORT_VERSION
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import security_groups as \
schema_security_groups
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
from nova.network.security_group import openstack_driver
from nova.policies import security_groups as sg_policies
from nova.virt import netutils
LOG = logging.getLogger(__name__)
ATTRIBUTE_NAME = 'security_groups'
def _authorize_context(req):
context = req.environ['nova.context']
context.can(sg_policies.BASE_POLICY_NAME)
return context
class SecurityGroupControllerBase(object):
"""Base class for Security Group controllers."""
def __init__(self):
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _format_security_group_rule(self, context, rule, group_rule_data=None):
"""Return a security group rule in desired API response format.
If group_rule_data is passed in that is used rather than querying
for it.
"""
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['parent_group_id'] = rule['parent_group_id']
sg_rule['ip_protocol'] = rule['protocol']
sg_rule['from_port'] = rule['from_port']
sg_rule['to_port'] = rule['to_port']
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
if group_rule_data:
sg_rule['group'] = group_rule_data
elif rule['group_id']:
try:
source_group = self.security_group_api.get(
context, id=rule['group_id'])
except exception.SecurityGroupNotFound:
# NOTE(arosen): There is a possible race condition that can
# occur here if two api calls occur concurrently: one that
# lists the security groups and another one that deletes a
# security group rule that has a group_id before the
# group_id is fetched. To handle this if
# SecurityGroupNotFound is raised we return None instead
# of the rule and the caller should ignore the rule.
LOG.debug("Security Group ID %s does not exist",
rule['group_id'])
return
sg_rule['group'] = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
else:
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
def _format_security_group(self, context, group):
security_group = {}
security_group['id'] = group['id']
security_group['description'] = group['description']
security_group['name'] = group['name']
security_group['tenant_id'] = group['project_id']
security_group['rules'] = []
for rule in group['rules']:
formatted_rule = self._format_security_group_rule(context, rule)
if formatted_rule:
security_group['rules'] += [formatted_rule]
return security_group
def _from_body(self, body, key):
if not body:
raise exc.HTTPBadRequest(
explanation=_("The request body can't be empty"))
value = body.get(key, None)
if value is None:
raise exc.HTTPBadRequest(
explanation=_("Missing parameter %s") % key)
return value
class SecurityGroupController(SecurityGroupControllerBase, wsgi.Controller):
"""The Security group API controller for the OpenStack API."""
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors((400, 404))
def show(self, req, id):
"""Return data about the given security group."""
context = _authorize_context(req)
try:
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
return {'security_group': self._format_security_group(context,
security_group)}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors((400, 404))
@wsgi.response(202)
def delete(self, req, id):
"""Delete a security group."""
context = _authorize_context(req)
try:
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
self.security_group_api.destroy(context, security_group)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors(404)
def index(self, req):
"""Returns a list of security groups."""
context = _authorize_context(req)
search_opts = {}
search_opts.update(req.GET)
project_id = context.project_id
raw_groups = self.security_group_api.list(context,
project=project_id,
search_opts=search_opts)
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
for group in limited_list]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors((400, 403))
def create(self, req, body):
"""Creates a new security group."""
context = _authorize_context(req)
security_group = self._from_body(body, 'security_group')
group_name = security_group.get('name', None)
group_description = security_group.get('description', None)
try:
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
except exception.SecurityGroupLimitExceeded as exp:
raise exc.HTTPForbidden(explanation=exp.format_message())
return {'security_group': self._format_security_group(context,
group_ref)}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors((400, 404))
def update(self, req, id, body):
"""Update a security group."""
context = _authorize_context(req)
try:
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
security_group_data = self._from_body(body, 'security_group')
group_name = security_group_data.get('name', None)
group_description = security_group_data.get('description', None)
try:
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.update_security_group(
context, security_group, group_name, group_description)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
return {'security_group': self._format_security_group(context,
group_ref)}
class SecurityGroupRulesController(SecurityGroupControllerBase,
wsgi.Controller):
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors((400, 403, 404))
def create(self, req, body):
context = _authorize_context(req)
sg_rule = self._from_body(body, 'security_group_rule')
group_id = sg_rule.get('group_id')
source_group = {}
try:
parent_group_id = self.security_group_api.validate_id(
sg_rule.get('parent_group_id'))
security_group = self.security_group_api.get(context, None,
parent_group_id,
map_exception=True)
if group_id is not None:
group_id = self.security_group_api.validate_id(group_id)
source_group = self.security_group_api.get(
context, id=group_id)
new_rule = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'),
group_id=group_id)
except (exception.Invalid, exception.InvalidCidr) as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
if new_rule is None:
msg = _("Not enough parameters to build a valid rule.")
raise exc.HTTPBadRequest(explanation=msg)
new_rule['parent_group_id'] = security_group['id']
if 'cidr' in new_rule:
net, prefixlen = netutils.get_net_and_prefixlen(new_rule['cidr'])
if net not in ('0.0.0.0', '::') and prefixlen == '0':
msg = _("Bad prefix for network in cidr %s") % new_rule['cidr']
raise exc.HTTPBadRequest(explanation=msg)
group_rule_data = None
try:
if group_id:
group_rule_data = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
security_group_rule = (
self.security_group_api.create_security_group_rule(
context, security_group, new_rule))
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.SecurityGroupLimitExceeded as exp:
raise exc.HTTPForbidden(explanation=exp.format_message())
formatted_rule = self._format_security_group_rule(context,
security_group_rule,
group_rule_data)
return {"security_group_rule": formatted_rule}
def _rule_args_to_dict(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr=None, group_id=None):
if group_id is not None:
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
def delete(self, req, id):
context = _authorize_context(req)
try:
id = self.security_group_api.validate_id(id)
rule = self.security_group_api.get_rule(context, id)
group_id = rule['parent_group_id']
security_group = self.security_group_api.get(context, None,
group_id,
map_exception=True)
self.security_group_api.remove_rules(context, security_group,
[rule['id']])
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.NoUniqueMatch as exp:
raise exc.HTTPConflict(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
class ServerSecurityGroupController(SecurityGroupControllerBase):
@extensions.expected_errors(404)
def index(self, req, server_id):
"""Returns a list of security groups for the given instance."""
context = _authorize_context(req)
self.security_group_api.ensure_default(context)
instance = common.get_instance(self.compute_api, context, server_id)
try:
groups = self.security_group_api.get_instance_security_groups(
context, instance, True)
except (exception.SecurityGroupNotFound,
exception.InstanceNotFound) as exp:
msg = exp.format_message()
raise exc.HTTPNotFound(explanation=msg)
result = [self._format_security_group(context, group)
for group in groups]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
class SecurityGroupActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupActionController, self).__init__(*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _parse(self, body, action):
try:
body = body[action]
group_name = body['name']
except TypeError:
msg = _("Missing parameter dict")
raise exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Security group not specified")
raise exc.HTTPBadRequest(explanation=msg)
if not group_name or group_name.strip() == '':
msg = _("Security group name cannot be empty")
raise exc.HTTPBadRequest(explanation=msg)
return group_name
def _invoke(self, method, context, id, group_name):
instance = common.get_instance(self.compute_api, context, id)
method(context, instance, group_name)
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('addSecurityGroup')
def _addSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
context.can(sg_policies.BASE_POLICY_NAME)
group_name = self._parse(body, 'addSecurityGroup')
try:
return self._invoke(self.security_group_api.add_to_instance,
context, id, group_name)
except (exception.SecurityGroupNotFound,
exception.InstanceNotFound) as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.NoUniqueMatch as exp:
raise exc.HTTPConflict(explanation=exp.format_message())
except (exception.SecurityGroupCannotBeApplied,
exception.SecurityGroupExistsForInstance) as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('removeSecurityGroup')
def _removeSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
context.can(sg_policies.BASE_POLICY_NAME)
group_name = self._parse(body, 'removeSecurityGroup')
try:
return self._invoke(self.security_group_api.remove_from_instance,
context, id, group_name)
except (exception.SecurityGroupNotFound,
exception.InstanceNotFound) as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.NoUniqueMatch as exp:
raise exc.HTTPConflict(explanation=exp.format_message())
except exception.SecurityGroupNotExistsForInstance as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def _extend_servers(self, req, servers):
# TODO(arosen) this function should be refactored to reduce duplicate
# code and use get_instance_security_groups instead of get_db_instance.
if not len(servers):
return
key = "security_groups"
context = req.environ['nova.context']
if not context.can(sg_policies.BASE_POLICY_NAME, fatal=False):
return
if not openstack_driver.is_neutron_security_groups():
for server in servers:
instance = req.get_db_instance(server['id'])
groups = instance.get(key)
if groups:
server[ATTRIBUTE_NAME] = [{"name": group.name}
for group in groups]
else:
# If method is a POST we get the security groups intended for an
# instance from the request. The reason for this is if using
# neutron security groups the requested security groups for the
# instance are not in the db and have not been sent to neutron yet.
if req.method != 'POST':
sg_instance_bindings = (
self.security_group_api
.get_instances_security_groups_bindings(context,
servers))
for server in servers:
groups = sg_instance_bindings.get(server['id'])
if groups:
server[ATTRIBUTE_NAME] = groups
# In this section of code len(servers) == 1 as you can only POST
# one server in an API request.
else:
# try converting to json
req_obj = jsonutils.loads(req.body)
# Add security group to server, if no security group was in
# request add default since that is the group it is part of
servers[0][ATTRIBUTE_NAME] = req_obj['server'].get(
ATTRIBUTE_NAME, [{'name': 'default'}])
def _show(self, req, resp_obj):
if 'server' in resp_obj.obj:
self._extend_servers(req, [resp_obj.obj['server']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
self._extend_servers(req, list(resp_obj.obj['servers']))
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
def server_create(server_dict, create_kwargs, body_deprecated_param):
security_groups = server_dict.get(ATTRIBUTE_NAME)
if security_groups is not None:
create_kwargs['security_groups'] = [
sg['name'] for sg in security_groups if sg.get('name')]
create_kwargs['security_groups'] = list(
set(create_kwargs['security_groups']))
def get_server_create_schema(version):
if version == '2.0':
return schema_security_groups.server_create_v20
return schema_security_groups.server_create
|
|
"""Support for Apple HomeKit."""
from __future__ import annotations
import asyncio
from copy import deepcopy
import ipaddress
import logging
import os
from aiohttp import web
from pyhap.const import STANDALONE_AID
import voluptuous as vol
from homeassistant.components import device_automation, network, zeroconf
from homeassistant.components.binary_sensor import (
DOMAIN as BINARY_SENSOR_DOMAIN,
BinarySensorDeviceClass,
)
from homeassistant.components.camera import DOMAIN as CAMERA_DOMAIN
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.humidifier import DOMAIN as HUMIDIFIER_DOMAIN
from homeassistant.components.network.const import MDNS_TARGET_IP
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN, SensorDeviceClass
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_BATTERY_CHARGING,
ATTR_BATTERY_LEVEL,
ATTR_DEVICE_ID,
ATTR_ENTITY_ID,
ATTR_HW_VERSION,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_SW_VERSION,
CONF_DEVICES,
CONF_IP_ADDRESS,
CONF_NAME,
CONF_PORT,
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
SERVICE_RELOAD,
)
from homeassistant.core import CoreState, HomeAssistant, ServiceCall, callback
from homeassistant.exceptions import HomeAssistantError, Unauthorized
from homeassistant.helpers import device_registry, entity_registry
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entityfilter import (
BASE_FILTER_SCHEMA,
FILTER_SCHEMA,
EntityFilter,
)
from homeassistant.helpers.reload import async_integration_yaml_config
from homeassistant.helpers.service import async_extract_referenced_entity_ids
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import IntegrationNotFound, async_get_integration
from . import ( # noqa: F401
type_cameras,
type_covers,
type_fans,
type_humidifiers,
type_lights,
type_locks,
type_media_players,
type_remotes,
type_security_systems,
type_sensors,
type_switches,
type_thermostats,
)
from .accessories import HomeBridge, HomeDriver, get_accessory
from .aidmanager import AccessoryAidStorage
from .const import (
ATTR_INTEGRATION,
BRIDGE_NAME,
BRIDGE_SERIAL_NUMBER,
CONF_ADVERTISE_IP,
CONF_ENTITY_CONFIG,
CONF_ENTRY_INDEX,
CONF_EXCLUDE_ACCESSORY_MODE,
CONF_FILTER,
CONF_HOMEKIT_MODE,
CONF_LINKED_BATTERY_CHARGING_SENSOR,
CONF_LINKED_BATTERY_SENSOR,
CONF_LINKED_DOORBELL_SENSOR,
CONF_LINKED_HUMIDITY_SENSOR,
CONF_LINKED_MOTION_SENSOR,
CONFIG_OPTIONS,
DEFAULT_EXCLUDE_ACCESSORY_MODE,
DEFAULT_HOMEKIT_MODE,
DEFAULT_PORT,
DOMAIN,
HOMEKIT,
HOMEKIT_MODE_ACCESSORY,
HOMEKIT_MODES,
HOMEKIT_PAIRING_QR,
HOMEKIT_PAIRING_QR_SECRET,
MANUFACTURER,
PERSIST_LOCK,
SERVICE_HOMEKIT_RESET_ACCESSORY,
SERVICE_HOMEKIT_UNPAIR,
SHUTDOWN_TIMEOUT,
)
from .type_triggers import DeviceTriggerAccessory
from .util import (
accessory_friendly_name,
async_dismiss_setup_message,
async_port_is_available,
async_show_setup_message,
get_persist_fullpath_for_entry_id,
remove_state_files_for_entry_id,
state_needs_accessory_mode,
validate_entity_config,
)
_LOGGER = logging.getLogger(__name__)
MAX_DEVICES = 150
# #### Driver Status ####
STATUS_READY = 0
STATUS_RUNNING = 1
STATUS_STOPPED = 2
STATUS_WAIT = 3
PORT_CLEANUP_CHECK_INTERVAL_SECS = 1
_HOMEKIT_CONFIG_UPDATE_TIME = (
5 # number of seconds to wait for homekit to see the c# change
)
def _has_all_unique_names_and_ports(bridges):
"""Validate that each homekit bridge configured has a unique name."""
names = [bridge[CONF_NAME] for bridge in bridges]
ports = [bridge[CONF_PORT] for bridge in bridges]
vol.Schema(vol.Unique())(names)
vol.Schema(vol.Unique())(ports)
return bridges
BRIDGE_SCHEMA = vol.All(
vol.Schema(
{
vol.Optional(CONF_HOMEKIT_MODE, default=DEFAULT_HOMEKIT_MODE): vol.In(
HOMEKIT_MODES
),
vol.Optional(CONF_NAME, default=BRIDGE_NAME): vol.All(
cv.string, vol.Length(min=3, max=25)
),
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_IP_ADDRESS): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_ADVERTISE_IP): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_FILTER, default={}): BASE_FILTER_SCHEMA,
vol.Optional(CONF_ENTITY_CONFIG, default={}): validate_entity_config,
vol.Optional(CONF_DEVICES): cv.ensure_list,
},
extra=vol.ALLOW_EXTRA,
),
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.ensure_list, [BRIDGE_SCHEMA], _has_all_unique_names_and_ports)},
extra=vol.ALLOW_EXTRA,
)
RESET_ACCESSORY_SERVICE_SCHEMA = vol.Schema(
{vol.Required(ATTR_ENTITY_ID): cv.entity_ids}
)
UNPAIR_SERVICE_SCHEMA = vol.All(
vol.Schema(cv.ENTITY_SERVICE_FIELDS),
cv.has_at_least_one_key(ATTR_DEVICE_ID),
)
def _async_all_homekit_instances(hass: HomeAssistant) -> list[HomeKit]:
"""All active HomeKit instances."""
return [
data[HOMEKIT]
for data in hass.data[DOMAIN].values()
if isinstance(data, dict) and HOMEKIT in data
]
def _async_get_entries_by_name(current_entries):
"""Return a dict of the entries by name."""
# For backwards compat, its possible the first bridge is using the default
# name.
return {entry.data.get(CONF_NAME, BRIDGE_NAME): entry for entry in current_entries}
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the HomeKit from yaml."""
hass.data.setdefault(DOMAIN, {})[PERSIST_LOCK] = asyncio.Lock()
_async_register_events_and_services(hass)
if DOMAIN not in config:
return True
current_entries = hass.config_entries.async_entries(DOMAIN)
entries_by_name = _async_get_entries_by_name(current_entries)
for index, conf in enumerate(config[DOMAIN]):
if _async_update_config_entry_if_from_yaml(hass, entries_by_name, conf):
continue
conf[CONF_ENTRY_INDEX] = index
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=conf,
)
)
return True
@callback
def _async_update_config_entry_if_from_yaml(hass, entries_by_name, conf):
"""Update a config entry with the latest yaml.
Returns True if a matching config entry was found
Returns False if there is no matching config entry
"""
bridge_name = conf[CONF_NAME]
if (
bridge_name in entries_by_name
and entries_by_name[bridge_name].source == SOURCE_IMPORT
):
entry = entries_by_name[bridge_name]
# If they alter the yaml config we import the changes
# since there currently is no practical way to support
# all the options in the UI at this time.
data = conf.copy()
options = {}
for key in CONFIG_OPTIONS:
if key in data:
options[key] = data[key]
del data[key]
hass.config_entries.async_update_entry(entry, data=data, options=options)
return True
return False
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up HomeKit from a config entry."""
_async_import_options_from_data_if_missing(hass, entry)
conf = entry.data
options = entry.options
name = conf[CONF_NAME]
port = conf[CONF_PORT]
_LOGGER.debug("Begin setup HomeKit for %s", name)
# ip_address and advertise_ip are yaml only
ip_address = conf.get(
CONF_IP_ADDRESS, await network.async_get_source_ip(hass, MDNS_TARGET_IP)
)
advertise_ip = conf.get(CONF_ADVERTISE_IP)
# exclude_accessory_mode is only used for config flow
# to indicate that the config entry was setup after
# we started creating config entries for entities that
# to run in accessory mode and that we should never include
# these entities on the bridge. For backwards compatibility
# with users who have not migrated yet we do not do exclude
# these entities by default as we cannot migrate automatically
# since it requires a re-pairing.
exclude_accessory_mode = conf.get(
CONF_EXCLUDE_ACCESSORY_MODE, DEFAULT_EXCLUDE_ACCESSORY_MODE
)
homekit_mode = options.get(CONF_HOMEKIT_MODE, DEFAULT_HOMEKIT_MODE)
entity_config = options.get(CONF_ENTITY_CONFIG, {}).copy()
entity_filter = FILTER_SCHEMA(options.get(CONF_FILTER, {}))
devices = options.get(CONF_DEVICES, [])
homekit = HomeKit(
hass,
name,
port,
ip_address,
entity_filter,
exclude_accessory_mode,
entity_config,
homekit_mode,
advertise_ip,
entry.entry_id,
entry.title,
devices=devices,
)
entry.async_on_unload(entry.add_update_listener(_async_update_listener))
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, homekit.async_stop)
)
hass.data[DOMAIN][entry.entry_id] = {HOMEKIT: homekit}
if hass.state == CoreState.running:
await homekit.async_start()
else:
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, homekit.async_start)
return True
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle options update."""
if entry.source == SOURCE_IMPORT:
return
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
async_dismiss_setup_message(hass, entry.entry_id)
homekit = hass.data[DOMAIN][entry.entry_id][HOMEKIT]
if homekit.status == STATUS_RUNNING:
await homekit.async_stop()
logged_shutdown_wait = False
for _ in range(0, SHUTDOWN_TIMEOUT):
if async_port_is_available(entry.data[CONF_PORT]):
break
if not logged_shutdown_wait:
_LOGGER.info("Waiting for the HomeKit server to shutdown")
logged_shutdown_wait = True
await asyncio.sleep(PORT_CLEANUP_CHECK_INTERVAL_SECS)
hass.data[DOMAIN].pop(entry.entry_id)
return True
async def async_remove_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Remove a config entry."""
return await hass.async_add_executor_job(
remove_state_files_for_entry_id, hass, entry.entry_id
)
@callback
def _async_import_options_from_data_if_missing(hass: HomeAssistant, entry: ConfigEntry):
options = deepcopy(dict(entry.options))
data = deepcopy(dict(entry.data))
modified = False
for importable_option in CONFIG_OPTIONS:
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
del data[importable_option]
modified = True
if modified:
hass.config_entries.async_update_entry(entry, data=data, options=options)
@callback
def _async_register_events_and_services(hass: HomeAssistant):
"""Register events and services for HomeKit."""
hass.http.register_view(HomeKitPairingQRView)
async def async_handle_homekit_reset_accessory(service: ServiceCall) -> None:
"""Handle reset accessory HomeKit service call."""
for homekit in _async_all_homekit_instances(hass):
if homekit.status != STATUS_RUNNING:
_LOGGER.warning(
"HomeKit is not running. Either it is waiting to be "
"started or has been stopped"
)
continue
entity_ids = service.data.get("entity_id")
await homekit.async_reset_accessories(entity_ids)
hass.services.async_register(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
async_handle_homekit_reset_accessory,
schema=RESET_ACCESSORY_SERVICE_SCHEMA,
)
async def async_handle_homekit_unpair(service: ServiceCall) -> None:
"""Handle unpair HomeKit service call."""
referenced = async_extract_referenced_entity_ids(hass, service)
dev_reg = device_registry.async_get(hass)
for device_id in referenced.referenced_devices:
if not (dev_reg_ent := dev_reg.async_get(device_id)):
raise HomeAssistantError(f"No device found for device id: {device_id}")
macs = [
cval
for ctype, cval in dev_reg_ent.connections
if ctype == device_registry.CONNECTION_NETWORK_MAC
]
matching_instances = [
homekit
for homekit in _async_all_homekit_instances(hass)
if homekit.driver
and device_registry.format_mac(homekit.driver.state.mac) in macs
]
if not matching_instances:
raise HomeAssistantError(
f"No homekit accessory found for device id: {device_id}"
)
for homekit in matching_instances:
homekit.async_unpair()
hass.services.async_register(
DOMAIN,
SERVICE_HOMEKIT_UNPAIR,
async_handle_homekit_unpair,
schema=UNPAIR_SERVICE_SCHEMA,
)
async def _handle_homekit_reload(service: ServiceCall) -> None:
"""Handle start HomeKit service call."""
config = await async_integration_yaml_config(hass, DOMAIN)
if not config or DOMAIN not in config:
return
current_entries = hass.config_entries.async_entries(DOMAIN)
entries_by_name = _async_get_entries_by_name(current_entries)
for conf in config[DOMAIN]:
_async_update_config_entry_if_from_yaml(hass, entries_by_name, conf)
reload_tasks = [
hass.config_entries.async_reload(entry.entry_id)
for entry in current_entries
]
await asyncio.gather(*reload_tasks)
hass.helpers.service.async_register_admin_service(
DOMAIN,
SERVICE_RELOAD,
_handle_homekit_reload,
)
class HomeKit:
"""Class to handle all actions between HomeKit and Home Assistant."""
def __init__(
self,
hass,
name,
port,
ip_address,
entity_filter,
exclude_accessory_mode,
entity_config,
homekit_mode,
advertise_ip=None,
entry_id=None,
entry_title=None,
devices=None,
):
"""Initialize a HomeKit object."""
self.hass = hass
self._name = name
self._port = port
self._ip_address = ip_address
self._filter: EntityFilter = entity_filter
self._config = entity_config
self._exclude_accessory_mode = exclude_accessory_mode
self._advertise_ip = advertise_ip
self._entry_id = entry_id
self._entry_title = entry_title
self._homekit_mode = homekit_mode
self._devices = devices or []
self.aid_storage = None
self.status = STATUS_READY
self.bridge = None
self.driver = None
def setup(self, async_zeroconf_instance, uuid):
"""Set up bridge and accessory driver."""
persist_file = get_persist_fullpath_for_entry_id(self.hass, self._entry_id)
self.driver = HomeDriver(
self.hass,
self._entry_id,
self._name,
self._entry_title,
loop=self.hass.loop,
address=self._ip_address,
port=self._port,
persist_file=persist_file,
advertised_address=self._advertise_ip,
async_zeroconf_instance=async_zeroconf_instance,
zeroconf_server=f"{uuid}-hap.local.",
)
# If we do not load the mac address will be wrong
# as pyhap uses a random one until state is restored
if os.path.exists(persist_file):
self.driver.load()
async def async_reset_accessories(self, entity_ids):
"""Reset the accessory to load the latest configuration."""
if not self.bridge:
await self.async_reset_accessories_in_accessory_mode(entity_ids)
return
await self.async_reset_accessories_in_bridge_mode(entity_ids)
async def async_reset_accessories_in_accessory_mode(self, entity_ids):
"""Reset accessories in accessory mode."""
acc = self.driver.accessory
if acc.entity_id not in entity_ids:
return
await acc.stop()
if not (state := self.hass.states.get(acc.entity_id)):
_LOGGER.warning(
"The underlying entity %s disappeared during reset", acc.entity
)
return
if new_acc := self._async_create_single_accessory([state]):
self.driver.accessory = new_acc
self.hass.async_add_job(new_acc.run)
await self.async_config_changed()
async def async_reset_accessories_in_bridge_mode(self, entity_ids):
"""Reset accessories in bridge mode."""
new = []
for entity_id in entity_ids:
aid = self.aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
if aid not in self.bridge.accessories:
continue
_LOGGER.info(
"HomeKit Bridge %s will reset accessory with linked entity_id %s",
self._name,
entity_id,
)
acc = await self.async_remove_bridge_accessory(aid)
if state := self.hass.states.get(acc.entity_id):
new.append(state)
else:
_LOGGER.warning(
"The underlying entity %s disappeared during reset", acc.entity
)
if not new:
# No matched accessories, probably on another bridge
return
await self.async_config_changed()
await asyncio.sleep(_HOMEKIT_CONFIG_UPDATE_TIME)
for state in new:
acc = self.add_bridge_accessory(state)
if acc:
self.hass.async_add_job(acc.run)
await self.async_config_changed()
async def async_config_changed(self):
"""Call config changed which writes out the new config to disk."""
await self.hass.async_add_executor_job(self.driver.config_changed)
def add_bridge_accessory(self, state):
"""Try adding accessory to bridge if configured beforehand."""
if self._would_exceed_max_devices(state.entity_id):
return
if state_needs_accessory_mode(state):
if self._exclude_accessory_mode:
return
_LOGGER.warning(
"The bridge %s has entity %s. For best performance, "
"and to prevent unexpected unavailability, create and "
"pair a separate HomeKit instance in accessory mode for "
"this entity",
self._name,
state.entity_id,
)
aid = self.aid_storage.get_or_allocate_aid_for_entity_id(state.entity_id)
conf = self._config.get(state.entity_id, {}).copy()
# If an accessory cannot be created or added due to an exception
# of any kind (usually in pyhap) it should not prevent
# the rest of the accessories from being created
try:
acc = get_accessory(self.hass, self.driver, state, aid, conf)
if acc is not None:
self.bridge.add_accessory(acc)
return acc
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Failed to create a HomeKit accessory for %s", state.entity_id
)
return None
def _would_exceed_max_devices(self, name):
"""Check if adding another devices would reach the limit and log."""
# The bridge itself counts as an accessory
if len(self.bridge.accessories) + 1 >= MAX_DEVICES:
_LOGGER.warning(
"Cannot add %s as this would exceed the %d device limit. Consider using the filter option",
name,
MAX_DEVICES,
)
return True
return False
def add_bridge_triggers_accessory(self, device, device_triggers):
"""Add device automation triggers to the bridge."""
if self._would_exceed_max_devices(device.name):
return
aid = self.aid_storage.get_or_allocate_aid(device.id, device.id)
# If an accessory cannot be created or added due to an exception
# of any kind (usually in pyhap) it should not prevent
# the rest of the accessories from being created
config = {}
self._fill_config_from_device_registry_entry(device, config)
self.bridge.add_accessory(
DeviceTriggerAccessory(
self.hass,
self.driver,
device.name,
None,
aid,
config,
device_id=device.id,
device_triggers=device_triggers,
)
)
async def async_remove_bridge_accessory(self, aid):
"""Try adding accessory to bridge if configured beforehand."""
if acc := self.bridge.accessories.pop(aid, None):
await acc.stop()
return acc
async def async_configure_accessories(self):
"""Configure accessories for the included states."""
dev_reg = device_registry.async_get(self.hass)
ent_reg = entity_registry.async_get(self.hass)
device_lookup = ent_reg.async_get_device_class_lookup(
{
(BINARY_SENSOR_DOMAIN, BinarySensorDeviceClass.BATTERY_CHARGING),
(BINARY_SENSOR_DOMAIN, BinarySensorDeviceClass.MOTION),
(BINARY_SENSOR_DOMAIN, BinarySensorDeviceClass.OCCUPANCY),
(SENSOR_DOMAIN, SensorDeviceClass.BATTERY),
(SENSOR_DOMAIN, SensorDeviceClass.HUMIDITY),
}
)
entity_states = []
for state in self.hass.states.async_all():
entity_id = state.entity_id
if not self._filter(entity_id):
continue
if ent_reg_ent := ent_reg.async_get(entity_id):
if (
ent_reg_ent.entity_category is not None
and not self._filter.explicitly_included(entity_id)
):
continue
await self._async_set_device_info_attributes(
ent_reg_ent, dev_reg, entity_id
)
self._async_configure_linked_sensors(ent_reg_ent, device_lookup, state)
entity_states.append(state)
return entity_states
async def async_start(self, *args):
"""Load storage and start."""
if self.status != STATUS_READY:
return
self.status = STATUS_WAIT
async_zc_instance = await zeroconf.async_get_async_instance(self.hass)
uuid = await self.hass.helpers.instance_id.async_get()
await self.hass.async_add_executor_job(self.setup, async_zc_instance, uuid)
self.aid_storage = AccessoryAidStorage(self.hass, self._entry_id)
await self.aid_storage.async_initialize()
if not await self._async_create_accessories():
return
self._async_register_bridge()
_LOGGER.debug("Driver start for %s", self._name)
await self.driver.async_start()
async with self.hass.data[DOMAIN][PERSIST_LOCK]:
await self.hass.async_add_executor_job(self.driver.persist)
self.status = STATUS_RUNNING
if self.driver.state.paired:
return
self._async_show_setup_message()
@callback
def _async_show_setup_message(self):
"""Show the pairing setup message."""
async_show_setup_message(
self.hass,
self._entry_id,
accessory_friendly_name(self._entry_title, self.driver.accessory),
self.driver.state.pincode,
self.driver.accessory.xhm_uri(),
)
@callback
def async_unpair(self):
"""Remove all pairings for an accessory so it can be repaired."""
state = self.driver.state
for client_uuid in list(state.paired_clients):
# We need to check again since removing a single client
# can result in removing all the clients that the client
# granted access to if it was an admin, otherwise
# remove_paired_client can generate a KeyError
if client_uuid in state.paired_clients:
state.remove_paired_client(client_uuid)
self.driver.async_persist()
self.driver.async_update_advertisement()
self._async_show_setup_message()
@callback
def _async_register_bridge(self):
"""Register the bridge as a device so homekit_controller and exclude it from discovery."""
dev_reg = device_registry.async_get(self.hass)
formatted_mac = device_registry.format_mac(self.driver.state.mac)
# Connections and identifiers are both used here.
#
# connections exists so homekit_controller can know the
# virtual mac address of the bridge and know to not offer
# it via discovery.
#
# identifiers is used as well since the virtual mac may change
# because it will not survive manual pairing resets (deleting state file)
# which we have trained users to do over the past few years
# because this was the way you had to fix homekit when pairing
# failed.
#
connection = (device_registry.CONNECTION_NETWORK_MAC, formatted_mac)
identifier = (DOMAIN, self._entry_id, BRIDGE_SERIAL_NUMBER)
self._async_purge_old_bridges(dev_reg, identifier, connection)
is_accessory_mode = self._homekit_mode == HOMEKIT_MODE_ACCESSORY
hk_mode_name = "Accessory" if is_accessory_mode else "Bridge"
dev_reg.async_get_or_create(
config_entry_id=self._entry_id,
identifiers={identifier},
connections={connection},
manufacturer=MANUFACTURER,
name=accessory_friendly_name(self._entry_title, self.driver.accessory),
model=f"HomeKit {hk_mode_name}",
entry_type=device_registry.DeviceEntryType.SERVICE,
)
@callback
def _async_purge_old_bridges(self, dev_reg, identifier, connection):
"""Purge bridges that exist from failed pairing or manual resets."""
devices_to_purge = []
for entry in dev_reg.devices.values():
if self._entry_id in entry.config_entries and (
identifier not in entry.identifiers
or connection not in entry.connections
):
devices_to_purge.append(entry.id)
for device_id in devices_to_purge:
dev_reg.async_remove_device(device_id)
@callback
def _async_create_single_accessory(self, entity_states):
"""Create a single HomeKit accessory (accessory mode)."""
if not entity_states:
_LOGGER.error(
"HomeKit %s cannot startup: entity not available: %s",
self._name,
self._filter.config,
)
return None
state = entity_states[0]
conf = self._config.get(state.entity_id, {}).copy()
acc = get_accessory(self.hass, self.driver, state, STANDALONE_AID, conf)
if acc is None:
_LOGGER.error(
"HomeKit %s cannot startup: entity not supported: %s",
self._name,
self._filter.config,
)
return acc
async def _async_create_bridge_accessory(self, entity_states):
"""Create a HomeKit bridge with accessories. (bridge mode)."""
self.bridge = HomeBridge(self.hass, self.driver, self._name)
for state in entity_states:
self.add_bridge_accessory(state)
dev_reg = device_registry.async_get(self.hass)
if self._devices:
valid_device_ids = []
for device_id in self._devices:
if not dev_reg.async_get(device_id):
_LOGGER.warning(
"HomeKit %s cannot add device %s because it is missing from the device registry",
self._name,
device_id,
)
else:
valid_device_ids.append(device_id)
for device_id, device_triggers in (
await device_automation.async_get_device_automations(
self.hass,
device_automation.DeviceAutomationType.TRIGGER,
valid_device_ids,
)
).items():
self.add_bridge_triggers_accessory(
dev_reg.async_get(device_id), device_triggers
)
return self.bridge
async def _async_create_accessories(self):
"""Create the accessories."""
entity_states = await self.async_configure_accessories()
if self._homekit_mode == HOMEKIT_MODE_ACCESSORY:
acc = self._async_create_single_accessory(entity_states)
else:
acc = await self._async_create_bridge_accessory(entity_states)
if acc is None:
return False
# No need to load/persist as we do it in setup
self.driver.accessory = acc
return True
async def async_stop(self, *args):
"""Stop the accessory driver."""
if self.status != STATUS_RUNNING:
return
self.status = STATUS_STOPPED
_LOGGER.debug("Driver stop for %s", self._name)
await self.driver.async_stop()
@callback
def _async_configure_linked_sensors(self, ent_reg_ent, device_lookup, state):
if (
ent_reg_ent is None
or ent_reg_ent.device_id is None
or ent_reg_ent.device_id not in device_lookup
or (ent_reg_ent.device_class or ent_reg_ent.original_device_class)
in (BinarySensorDeviceClass.BATTERY_CHARGING, SensorDeviceClass.BATTERY)
):
return
if ATTR_BATTERY_CHARGING not in state.attributes:
battery_charging_binary_sensor_entity_id = device_lookup[
ent_reg_ent.device_id
].get((BINARY_SENSOR_DOMAIN, BinarySensorDeviceClass.BATTERY_CHARGING))
if battery_charging_binary_sensor_entity_id:
self._config.setdefault(state.entity_id, {}).setdefault(
CONF_LINKED_BATTERY_CHARGING_SENSOR,
battery_charging_binary_sensor_entity_id,
)
if ATTR_BATTERY_LEVEL not in state.attributes:
battery_sensor_entity_id = device_lookup[ent_reg_ent.device_id].get(
(SENSOR_DOMAIN, SensorDeviceClass.BATTERY)
)
if battery_sensor_entity_id:
self._config.setdefault(state.entity_id, {}).setdefault(
CONF_LINKED_BATTERY_SENSOR, battery_sensor_entity_id
)
if state.entity_id.startswith(f"{CAMERA_DOMAIN}."):
motion_binary_sensor_entity_id = device_lookup[ent_reg_ent.device_id].get(
(BINARY_SENSOR_DOMAIN, BinarySensorDeviceClass.MOTION)
)
if motion_binary_sensor_entity_id:
self._config.setdefault(state.entity_id, {}).setdefault(
CONF_LINKED_MOTION_SENSOR,
motion_binary_sensor_entity_id,
)
doorbell_binary_sensor_entity_id = device_lookup[ent_reg_ent.device_id].get(
(BINARY_SENSOR_DOMAIN, BinarySensorDeviceClass.OCCUPANCY)
)
if doorbell_binary_sensor_entity_id:
self._config.setdefault(state.entity_id, {}).setdefault(
CONF_LINKED_DOORBELL_SENSOR,
doorbell_binary_sensor_entity_id,
)
if state.entity_id.startswith(f"{HUMIDIFIER_DOMAIN}."):
current_humidity_sensor_entity_id = device_lookup[
ent_reg_ent.device_id
].get((SENSOR_DOMAIN, SensorDeviceClass.HUMIDITY))
if current_humidity_sensor_entity_id:
self._config.setdefault(state.entity_id, {}).setdefault(
CONF_LINKED_HUMIDITY_SENSOR,
current_humidity_sensor_entity_id,
)
async def _async_set_device_info_attributes(self, ent_reg_ent, dev_reg, entity_id):
"""Set attributes that will be used for homekit device info."""
ent_cfg = self._config.setdefault(entity_id, {})
if ent_reg_ent.device_id:
if dev_reg_ent := dev_reg.async_get(ent_reg_ent.device_id):
self._fill_config_from_device_registry_entry(dev_reg_ent, ent_cfg)
if ATTR_MANUFACTURER not in ent_cfg:
try:
integration = await async_get_integration(
self.hass, ent_reg_ent.platform
)
ent_cfg[ATTR_INTEGRATION] = integration.name
except IntegrationNotFound:
ent_cfg[ATTR_INTEGRATION] = ent_reg_ent.platform
def _fill_config_from_device_registry_entry(self, device_entry, config):
"""Populate a config dict from the registry."""
if device_entry.manufacturer:
config[ATTR_MANUFACTURER] = device_entry.manufacturer
if device_entry.model:
config[ATTR_MODEL] = device_entry.model
if device_entry.sw_version:
config[ATTR_SW_VERSION] = device_entry.sw_version
if device_entry.hw_version:
config[ATTR_HW_VERSION] = device_entry.hw_version
if device_entry.config_entries:
first_entry = list(device_entry.config_entries)[0]
if entry := self.hass.config_entries.async_get_entry(first_entry):
config[ATTR_INTEGRATION] = entry.domain
class HomeKitPairingQRView(HomeAssistantView):
"""Display the homekit pairing code at a protected url."""
url = "/api/homekit/pairingqr"
name = "api:homekit:pairingqr"
requires_auth = False
async def get(self, request):
"""Retrieve the pairing QRCode image."""
# pylint: disable=no-self-use
if not request.query_string:
raise Unauthorized()
entry_id, secret = request.query_string.split("-")
if (
entry_id not in request.app["hass"].data[DOMAIN]
or secret
!= request.app["hass"].data[DOMAIN][entry_id][HOMEKIT_PAIRING_QR_SECRET]
):
raise Unauthorized()
return web.Response(
body=request.app["hass"].data[DOMAIN][entry_id][HOMEKIT_PAIRING_QR],
content_type="image/svg+xml",
)
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import decimal
import random
import re
import time
from eventlet import timeout as eventlet_timeout
from oslo.config import cfg
from nova.compute import power_state
from nova import exception as n_exc
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova.virt.powervm import blockdev
from nova.virt.powervm import command
from nova.virt.powervm import common
from nova.virt.powervm import constants
from nova.virt.powervm import exception
from nova.virt.powervm import lpar as LPAR
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def get_powervm_operator():
if CONF.powervm.powervm_mgr_type == 'ivm':
return IVMOperator(common.Connection(CONF.powervm.powervm_mgr,
CONF.powervm.powervm_mgr_user,
CONF.powervm.powervm_mgr_passwd))
def get_powervm_disk_adapter():
return blockdev.PowerVMLocalVolumeAdapter(
common.Connection(CONF.powervm.powervm_mgr,
CONF.powervm.powervm_mgr_user,
CONF.powervm.powervm_mgr_passwd))
class PowerVMOperator(object):
"""PowerVM main operator.
The PowerVMOperator is intended to wrap all operations
from the driver and handle either IVM or HMC managed systems.
"""
def __init__(self):
self._operator = get_powervm_operator()
self._disk_adapter = get_powervm_disk_adapter()
self._host_stats = {}
self._update_host_stats()
def get_info(self, instance_name):
"""Get the current status of an LPAR instance.
Returns a dict containing:
:state: the running state, one of the power_state codes
:max_mem: (int) the maximum memory in KBytes allowed
:mem: (int) the memory in KBytes used by the domain
:num_cpu: (int) the number of virtual CPUs for the domain
:cpu_time: (int) the CPU time used in nanoseconds
:raises: PowerVMLPARInstanceNotFound
"""
lpar_instance = self._get_instance(instance_name)
state = constants.POWERVM_POWER_STATE.get(
lpar_instance['state'], power_state.NOSTATE)
return {'state': state,
'max_mem': lpar_instance['max_mem'],
'mem': lpar_instance['desired_mem'],
'num_cpu': lpar_instance['max_procs'],
'cpu_time': lpar_instance['uptime']}
def instance_exists(self, instance_name):
lpar_instance = self._operator.get_lpar(instance_name)
return True if lpar_instance else False
def _get_instance(self, instance_name):
"""Check whether or not the LPAR instance exists and return it."""
lpar_instance = self._operator.get_lpar(instance_name)
if lpar_instance is None:
LOG.error(_("LPAR instance '%s' not found") % instance_name)
raise exception.PowerVMLPARInstanceNotFound(
instance_name=instance_name)
return lpar_instance
def list_instances(self):
"""Get all instances' name as a list
Return the names of all the instances known to the virtualization
layer, as a list.
"""
lpar_instances = self._operator.list_lpar_instances()
return lpar_instances
def get_available_resource(self):
"""Retrieve resource info.
:returns: dictionary containing resource info
"""
data = self.get_host_stats()
# Memory data is in MB already.
memory_mb_used = data['host_memory_total'] - data['host_memory_free']
# Convert to GB
local_gb = data['disk_total'] / 1024
local_gb_used = data['disk_used'] / 1024
dic = {'vcpus': data['vcpus'],
'memory_mb': data['host_memory_total'],
'local_gb': local_gb,
'vcpus_used': data['vcpus_used'],
'memory_mb_used': memory_mb_used,
'local_gb_used': local_gb_used,
'hypervisor_type': data['hypervisor_type'],
'hypervisor_version': data['hypervisor_version'],
'hypervisor_hostname': self._operator.get_hostname(),
'cpu_info': ','.join(data['cpu_info']),
'disk_available_least': data['disk_total'],
'supported_instances': jsonutils.dumps(
data['supported_instances'])}
return dic
def get_host_stats(self, refresh=False):
"""Return currently known host stats."""
if refresh or not self._host_stats:
self._update_host_stats()
return self._host_stats
def _update_host_stats(self):
memory_info = self._operator.get_memory_info()
cpu_info = self._operator.get_cpu_info()
# Note: disk avail information is not accurate. The value
# is a sum of all Volume Groups and the result cannot
# represent the real possibility. Example: consider two
# VGs both 10G, the avail disk will be 20G however,
# a 15G image does not fit in any VG. This can be improved
# later on.
disk_info = self._operator.get_disk_info()
data = {}
data['vcpus'] = cpu_info['total_procs']
data['vcpus_used'] = cpu_info['total_procs'] - cpu_info['avail_procs']
data['cpu_info'] = constants.POWERVM_CPU_INFO
data['disk_total'] = disk_info['disk_total']
data['disk_used'] = disk_info['disk_used']
data['disk_available'] = disk_info['disk_avail']
data['host_memory_total'] = memory_info['total_mem']
data['host_memory_free'] = memory_info['avail_mem']
data['hypervisor_type'] = constants.POWERVM_HYPERVISOR_TYPE
data['hypervisor_version'] = constants.POWERVM_HYPERVISOR_VERSION
data['hypervisor_hostname'] = self._operator.get_hostname()
data['supported_instances'] = constants.POWERVM_SUPPORTED_INSTANCES
data['extres'] = ''
self._host_stats = data
def get_host_uptime(self, host):
"""Returns the result of calling "uptime" on the target host."""
return self._operator.get_host_uptime(host)
def spawn(self, context, instance, image_id, network_info):
def _create_image(context, instance, image_id):
"""Fetch image from glance and copy it to the remote system."""
try:
root_volume = self._disk_adapter.create_volume_from_image(
context, instance, image_id)
self._disk_adapter.attach_volume_to_host(root_volume)
#NOTE(xg.song): These two metadatas are used by VNC client to
# connect to the lpar through manager.
lpar_id = self._operator.get_lpar(instance['name'])['lpar_id']
meta = instance.metadata
# Use powervm_mgr as default, otherwise configure through host
mgr = CONF.powervm.powervm_mgr
if CONF.powervm.host:
mgr = CONF.powervm.host
meta['powervm_mgr'] = mgr
meta['deploy_id'] = lpar_id
instance.metadata.update(meta)
instance.save()
meta = instance.metadata
vhost = self._operator.get_vhost_by_instance_id(lpar_id)
self._operator.attach_disk_to_vhost(
root_volume['device_name'], vhost)
except Exception as e:
LOG.exception(_("PowerVM image creation failed: %s") % str(e))
raise exception.PowerVMImageCreationFailed()
spawn_start = time.time()
try:
try:
host_stats = self.get_host_stats(refresh=True)
lpar_inst = self._create_lpar_instance(instance,
network_info, host_stats)
#TODO(mjfork) capture the error and handle the error when the
# MAC prefix already exists on the
# system (1 in 2^28)
self._operator.create_lpar(lpar_inst)
LOG.debug(_("Creating LPAR instance '%s'") % instance['name'])
except processutils.ProcessExecutionError:
LOG.exception(_("LPAR instance '%s' creation failed") %
instance['name'])
raise exception.PowerVMLPARCreationFailed(
instance_name=instance['name'])
_create_image(context, instance, image_id)
LOG.debug(_("Activating the LPAR instance '%s'")
% instance['name'])
self._operator.start_lpar(instance['name'])
# TODO(mrodden): probably do this a better way
# that actually relies on the time module
# and nonblocking threading
# Wait for boot
timeout_count = range(10)
while timeout_count:
state = self.get_info(instance['name'])['state']
if state == power_state.RUNNING:
LOG.info(_("Instance spawned successfully."),
instance=instance)
break
timeout_count.pop()
if len(timeout_count) == 0:
LOG.error(_("Instance '%s' failed to boot") %
instance['name'])
self._cleanup(instance['name'])
break
time.sleep(1)
except exception.PowerVMImageCreationFailed:
with excutils.save_and_reraise_exception():
# log errors in cleanup
try:
self._cleanup(instance['name'])
except Exception:
LOG.exception(_('Error while attempting to '
'clean up failed instance launch.'))
spawn_time = time.time() - spawn_start
LOG.info(_("Instance spawned in %s seconds") % spawn_time,
instance=instance)
def destroy(self, instance_name, destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance.
:param instance_name: Instance name.
"""
try:
self._cleanup(instance_name, destroy_disks)
except exception.PowerVMLPARInstanceNotFound:
LOG.warn(_("During destroy, LPAR instance '%s' was not found on "
"PowerVM system.") % instance_name)
def capture_image(self, context, instance, image_id, image_meta,
update_task_state):
"""Capture the root disk for a snapshot
:param context: nova context for this operation
:param instance: instance information to capture the image from
:param image_id: uuid of pre-created snapshot image
:param image_meta: metadata to upload with captured image
:param update_task_state: Function reference that allows for updates
to the instance task state.
"""
lpar = self._operator.get_lpar(instance['name'])
previous_state = lpar['state']
# stop the instance if it is running
if previous_state == 'Running':
LOG.debug(_("Stopping instance %s for snapshot.") %
instance['name'])
# wait up to 2 minutes for shutdown
self.power_off(instance['name'], timeout=120)
# get disk_name
vhost = self._operator.get_vhost_by_instance_id(lpar['lpar_id'])
disk_name = self._operator.get_disk_name_by_vhost(vhost)
# do capture and upload
self._disk_adapter.create_image_from_volume(
disk_name, context, image_id, image_meta, update_task_state)
# restart instance if it was running before
if previous_state == 'Running':
self.power_on(instance['name'])
def _cleanup(self, instance_name, destroy_disks=True):
lpar_id = self._get_instance(instance_name)['lpar_id']
try:
vhost = self._operator.get_vhost_by_instance_id(lpar_id)
disk_name = self._operator.get_disk_name_by_vhost(vhost)
LOG.debug(_("Shutting down the instance '%s'") % instance_name)
self._operator.stop_lpar(instance_name)
#dperaza: LPAR should be deleted first so that vhost is
#cleanly removed and detached from disk device.
LOG.debug(_("Deleting the LPAR instance '%s'") % instance_name)
self._operator.remove_lpar(instance_name)
if disk_name and destroy_disks:
# TODO(mrodden): we should also detach from the instance
# before we start deleting things...
volume_info = {'device_name': disk_name}
#Volume info dictionary might need more info that is lost when
#volume is detached from host so that it can be deleted
self._disk_adapter.detach_volume_from_host(volume_info)
self._disk_adapter.delete_volume(volume_info)
except Exception:
LOG.exception(_("PowerVM instance cleanup failed"))
raise exception.PowerVMLPARInstanceCleanupFailed(
instance_name=instance_name)
def power_off(self, instance_name,
timeout=constants.POWERVM_LPAR_OPERATION_TIMEOUT):
self._operator.stop_lpar(instance_name, timeout)
def power_on(self, instance_name):
self._operator.start_lpar(instance_name)
def macs_for_instance(self, instance):
return self._operator.macs_for_instance(instance)
def _create_lpar_instance(self, instance, network_info, host_stats=None):
inst_name = instance['name']
# CPU/Memory min and max can be configurable. Lets assume
# some default values for now.
# Memory
mem = instance['memory_mb']
if host_stats and mem > host_stats['host_memory_free']:
LOG.error(_('Not enough free memory in the host'))
raise exception.PowerVMInsufficientFreeMemory(
instance_name=instance['name'])
mem_min = min(mem, constants.POWERVM_MIN_MEM)
mem_max = mem + constants.POWERVM_MAX_MEM
# CPU
cpus = instance['vcpus']
if host_stats:
avail_cpus = host_stats['vcpus'] - host_stats['vcpus_used']
if cpus > avail_cpus:
LOG.error(_('Insufficient available CPU on PowerVM'))
raise exception.PowerVMInsufficientCPU(
instance_name=instance['name'])
cpus_min = min(cpus, constants.POWERVM_MIN_CPUS)
cpus_max = cpus + constants.POWERVM_MAX_CPUS
cpus_units_min = decimal.Decimal(cpus_min) / decimal.Decimal(10)
cpus_units = decimal.Decimal(cpus) / decimal.Decimal(10)
# Network
# To ensure the MAC address on the guest matches the
# generated value, pull the first 10 characters off the
# MAC address for the mac_base_value parameter and then
# get the integer value of the final 2 characters as the
# slot_id parameter
virtual_eth_adapters = ""
for vif in network_info:
mac = vif['address']
mac_base_value = (mac[:-2]).replace(':', '')
slot_id = int(mac[-2:], 16)
network_type = vif['network'].get_meta('network_type', None)
if network_type == 'vlan':
eth_id = vif['network'].get_meta('vlan')
else:
eth_id = self._operator.get_virtual_eth_adapter_id()
if virtual_eth_adapters:
virtual_eth_adapters = ('\\"%(virtual_eth_adapters)s, \
%(slot_id)s/0/%(eth_id)s//0/0\\"' %
{'virtual_eth_adapters': virtual_eth_adapters,
'slot_id': slot_id, 'eth_id': eth_id})
else:
virtual_eth_adapters = ('%(slot_id)s/0/%(eth_id)s//0/0' %
{'slot_id': slot_id, 'eth_id': eth_id})
# LPAR configuration data
# max_virtual_slots is hardcoded to 64 since we generate a MAC
# address that must be placed in slots 32 - 64
lpar_inst = LPAR.LPAR(
name=inst_name, lpar_env='aixlinux',
min_mem=mem_min, desired_mem=mem,
max_mem=mem_max, proc_mode='shared',
sharing_mode='uncap', min_procs=cpus_min,
desired_procs=cpus, max_procs=cpus_max,
min_proc_units=cpus_units_min,
desired_proc_units=cpus_units,
max_proc_units=cpus_max,
virtual_eth_mac_base_value=mac_base_value,
max_virtual_slots=64,
virtual_eth_adapters=virtual_eth_adapters)
return lpar_inst
def _check_host_resources(self, instance, vcpus, mem, host_stats):
"""Checks resources on host for resize, migrate, and spawn
:param vcpus: CPUs to be used
:param mem: memory requested by instance
:param disk: size of disk to be expanded or created
"""
if mem > host_stats['host_memory_free']:
LOG.exception(_('Not enough free memory in the host'))
raise exception.PowerVMInsufficientFreeMemory(
instance_name=instance['name'])
avail_cpus = host_stats['vcpus'] - host_stats['vcpus_used']
if vcpus > avail_cpus:
LOG.exception(_('Insufficient available CPU on PowerVM'))
raise exception.PowerVMInsufficientCPU(
instance_name=instance['name'])
def migrate_disk(self, device_name, src_host, dest, image_path,
instance_name=None):
"""Migrates SVC or Logical Volume based disks
:param device_name: disk device name in /dev/
:param dest: IP or DNS name of destination host/VIOS
:param image_path: path on source and destination to directory
for storing image files
:param instance_name: name of instance being migrated
:returns: disk_info dictionary object describing root volume
information used for locating/mounting the volume
"""
dest_file_path = self._disk_adapter.migrate_volume(
device_name, src_host, dest, image_path, instance_name)
disk_info = {}
disk_info['root_disk_file'] = dest_file_path
return disk_info
def deploy_from_migrated_file(self, lpar, file_path, size,
power_on=True):
"""Deploy the logical volume and attach to new lpar.
:param lpar: lar instance
:param file_path: logical volume path
:param size: new size of the logical volume
"""
need_decompress = file_path.endswith('.gz')
try:
# deploy lpar from file
self._deploy_from_vios_file(lpar, file_path, size,
decompress=need_decompress,
power_on=power_on)
finally:
# cleanup migrated file
self._operator._remove_file(file_path)
def _deploy_from_vios_file(self, lpar, file_path, size,
decompress=True, power_on=True):
self._operator.create_lpar(lpar)
lpar = self._operator.get_lpar(lpar['name'])
instance_id = lpar['lpar_id']
vhost = self._operator.get_vhost_by_instance_id(instance_id)
# Create logical volume on IVM
diskName = self._disk_adapter._create_logical_volume(size)
# Attach the disk to LPAR
self._operator.attach_disk_to_vhost(diskName, vhost)
# Copy file to device
self._disk_adapter._copy_file_to_device(file_path, diskName,
decompress)
if power_on:
self._operator.start_lpar(lpar['name'])
class BaseOperator(object):
"""Base operator for IVM and HMC managed systems."""
def __init__(self, connection):
"""Constructor.
:param connection: common.Connection object with the
information to connect to the remote
ssh.
"""
self._connection = None
self.connection_data = connection
def _set_connection(self):
# create a new connection or verify an existing connection
# and re-establish if the existing connection is dead
self._connection = common.check_connection(self._connection,
self.connection_data)
def _poll_for_lpar_status(self, instance_name, status, operation,
timeout=constants.POWERVM_LPAR_OPERATION_TIMEOUT):
"""Polls until the LPAR with the given name reaches the given status.
:param instance_name: LPAR instance name
:param status: Poll until the given LPAR status is reached
:param operation: The operation being performed, e.g. 'stop_lpar'
:param timeout: The number of seconds to wait.
:raises: PowerVMLPARInstanceNotFound
:raises: PowerVMLPAROperationTimeout
:raises: InvalidParameterValue
"""
# make sure it's a valid status
if (status == constants.POWERVM_NOSTATE or
not status in constants.POWERVM_POWER_STATE):
msg = _("Invalid LPAR state: %s") % status
raise n_exc.InvalidParameterValue(err=msg)
# raise the given timeout exception if the loop call doesn't complete
# in the specified timeout
timeout_exception = exception.PowerVMLPAROperationTimeout(
operation=operation,
instance_name=instance_name)
with eventlet_timeout.Timeout(timeout, timeout_exception):
def _wait_for_lpar_status(instance_name, status):
"""Called at an interval until the status is reached."""
lpar_obj = self.get_lpar(instance_name)
if lpar_obj['state'] == status:
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_lpar_status,
instance_name, status)
timer.start(interval=1).wait()
def get_lpar(self, instance_name, resource_type='lpar'):
"""Return a LPAR object by its instance name.
:param instance_name: LPAR instance name
:param resource_type: the type of resources to list
:returns: LPAR object
"""
cmd = self.command.lssyscfg('-r %s --filter "lpar_names=%s"'
% (resource_type, instance_name))
output = self.run_vios_command(cmd)
if not output:
return None
lpar = LPAR.load_from_conf_data(output[0])
return lpar
def list_lpar_instances(self):
"""List all existent LPAR instances names.
:returns: list -- list with instances names.
"""
lpar_names = self.run_vios_command(self.command.lssyscfg(
'-r lpar -F name'))
if not lpar_names:
return []
return lpar_names
def create_lpar(self, lpar):
"""Receives a LPAR data object and creates a LPAR instance.
:param lpar: LPAR object
"""
conf_data = lpar.to_string()
self.run_vios_command(self.command.mksyscfg('-r lpar -i "%s"' %
conf_data))
def start_lpar(self, instance_name,
timeout=constants.POWERVM_LPAR_OPERATION_TIMEOUT):
"""Start a LPAR instance.
:param instance_name: LPAR instance name
:param timeout: value in seconds for specifying
how long to wait for the LPAR to start
"""
self.run_vios_command(self.command.chsysstate('-r lpar -o on -n %s'
% instance_name))
# poll instance until running or raise exception
self._poll_for_lpar_status(instance_name, constants.POWERVM_RUNNING,
'start_lpar', timeout)
def stop_lpar(self, instance_name,
timeout=constants.POWERVM_LPAR_OPERATION_TIMEOUT):
"""Stop a running LPAR.
:param instance_name: LPAR instance name
:param timeout: value in seconds for specifying
how long to wait for the LPAR to stop
"""
cmd = self.command.chsysstate('-r lpar -o shutdown --immed -n %s' %
instance_name)
self.run_vios_command(cmd)
# poll instance until stopped or raise exception
self._poll_for_lpar_status(instance_name, constants.POWERVM_SHUTDOWN,
'stop_lpar', timeout)
def remove_lpar(self, instance_name):
"""Removes a LPAR.
:param instance_name: LPAR instance name
"""
self.run_vios_command(self.command.rmsyscfg('-r lpar -n %s'
% instance_name))
def get_vhost_by_instance_id(self, instance_id):
"""Return the vhost name by the instance id.
:param instance_id: LPAR instance id
:returns: string -- vhost name or None in case none is found
"""
instance_hex_id = '%#010x' % int(instance_id)
cmd = self.command.lsmap('-all -field clientid svsa -fmt :')
output = self.run_vios_command(cmd)
vhosts = dict(item.split(':') for item in list(output))
if instance_hex_id in vhosts:
return vhosts[instance_hex_id]
return None
def get_virtual_eth_adapter_id(self):
"""Virtual ethernet adapter id.
Searches for the shared ethernet adapter and returns
its id.
:returns: id of the virtual ethernet adapter.
"""
cmd = self.command.lsmap('-all -net -field sea -fmt :')
output = self.run_vios_command(cmd)
sea = output[0]
cmd = self.command.lsdev('-dev %s -attr pvid' % sea)
output = self.run_vios_command(cmd)
# Returned output looks like this: ['value', '', '1']
if output:
return output[2]
return None
def get_hostname(self):
"""Returns the managed system hostname.
:returns: string -- hostname
"""
output = self.run_vios_command(self.command.hostname())
hostname = output[0]
if not hasattr(self, '_hostname'):
self._hostname = hostname
elif hostname != self._hostname:
LOG.error(_('Hostname has changed from %(old)s to %(new)s. '
'A restart is required to take effect.'
) % {'old': self._hostname, 'new': hostname})
return self._hostname
def get_disk_name_by_vhost(self, vhost):
"""Returns the disk name attached to a vhost.
:param vhost: a vhost name
:returns: string -- disk name
"""
cmd = self.command.lsmap('-vadapter %s -field backing -fmt :' % vhost)
output = self.run_vios_command(cmd)
if output:
return output[0]
return None
def attach_disk_to_vhost(self, disk, vhost):
"""Attach disk name to a specific vhost.
:param disk: the disk name
:param vhost: the vhost name
"""
cmd = self.command.mkvdev('-vdev %s -vadapter %s') % (disk, vhost)
self.run_vios_command(cmd)
def get_memory_info(self):
"""Get memory info.
:returns: tuple - memory info (total_mem, avail_mem)
"""
cmd = self.command.lshwres(
'-r mem --level sys -F configurable_sys_mem,curr_avail_sys_mem')
output = self.run_vios_command(cmd)
total_mem, avail_mem = output[0].split(',')
return {'total_mem': int(total_mem),
'avail_mem': int(avail_mem)}
def get_host_uptime(self, host):
"""Get host uptime.
:returns: string - amount of time since last system startup
"""
# The output of the command is like this:
# "02:54PM up 24 days, 5:41, 1 user, load average: 0.06, 0.03, 0.02"
cmd = self.command.sysstat('-short %s' % self.connection_data.username)
return self.run_vios_command(cmd)[0]
def get_cpu_info(self):
"""Get CPU info.
:returns: tuple - cpu info (total_procs, avail_procs)
"""
cmd = self.command.lshwres(
'-r proc --level sys -F '
'configurable_sys_proc_units,curr_avail_sys_proc_units')
output = self.run_vios_command(cmd)
total_procs, avail_procs = output[0].split(',')
return {'total_procs': float(total_procs),
'avail_procs': float(avail_procs)}
def get_disk_info(self):
"""Get the disk usage information.
:returns: tuple - disk info (disk_total, disk_used, disk_avail)
"""
vgs = self.run_vios_command(self.command.lsvg())
(disk_total, disk_used, disk_avail) = [0, 0, 0]
for vg in vgs:
cmd = self.command.lsvg('%s -field totalpps usedpps freepps -fmt :'
% vg)
output = self.run_vios_command(cmd)
# Output example:
# 1271 (10168 megabytes):0 (0 megabytes):1271 (10168 megabytes)
(d_total, d_used, d_avail) = re.findall(r'(\d+) megabytes',
output[0])
disk_total += int(d_total)
disk_used += int(d_used)
disk_avail += int(d_avail)
return {'disk_total': disk_total,
'disk_used': disk_used,
'disk_avail': disk_avail}
def run_vios_command(self, cmd, check_exit_code=True):
"""Run a remote command using an active ssh connection.
:param command: String with the command to run.
"""
self._set_connection()
stdout, stderr = processutils.ssh_execute(
self._connection, cmd, check_exit_code=check_exit_code)
error_text = stderr.strip()
if error_text:
LOG.warn(_("Found error stream for command \"%(cmd)s\": "
"%(error_text)s"),
{'cmd': cmd, 'error_text': error_text})
return stdout.strip().splitlines()
def run_vios_command_as_root(self, command, check_exit_code=True):
"""Run a remote command as root using an active ssh connection.
:param command: List of commands.
"""
self._set_connection()
stdout, stderr = common.ssh_command_as_root(
self._connection, command, check_exit_code=check_exit_code)
error_text = stderr.read()
if error_text:
LOG.warn(_("Found error stream for command \"%(command)s\":"
" %(error_text)s"),
{'command': command, 'error_text': error_text})
return stdout.read().splitlines()
def macs_for_instance(self, instance):
pass
def update_lpar(self, lpar_info):
"""Resizing an LPAR
:param lpar_info: dictionary of LPAR information
"""
configuration_data = ('name=%s,min_mem=%s,desired_mem=%s,'
'max_mem=%s,min_procs=%s,desired_procs=%s,'
'max_procs=%s,min_proc_units=%s,'
'desired_proc_units=%s,max_proc_units=%s' %
(lpar_info['name'], lpar_info['min_mem'],
lpar_info['desired_mem'],
lpar_info['max_mem'],
lpar_info['min_procs'],
lpar_info['desired_procs'],
lpar_info['max_procs'],
lpar_info['min_proc_units'],
lpar_info['desired_proc_units'],
lpar_info['max_proc_units']))
self.run_vios_command(self.command.chsyscfg('-r prof -i "%s"' %
configuration_data))
def get_logical_vol_size(self, diskname):
"""Finds and calculates the logical volume size in GB
:param diskname: name of the logical volume
:returns: size of logical volume in GB
"""
configuration_data = ("ioscli lslv %s -fmt : -field pps ppsize" %
diskname)
output = self.run_vios_command(configuration_data)
pps, ppsize = output[0].split(':')
ppsize = re.findall(r'\d+', ppsize)
ppsize = int(ppsize[0])
pps = int(pps)
lv_size = ((pps * ppsize) / 1024)
return lv_size
def rename_lpar(self, instance_name, new_name):
"""Rename LPAR given by instance_name to new_name
Note: For IVM based deployments, the name is
limited to 31 characters and will be trimmed
to meet this requirement
:param instance_name: name of LPAR to be renamed
:param new_name: desired new name of LPAR
:returns: new name of renamed LPAR trimmed to 31 characters
if necessary
"""
# grab first 31 characters of new name
new_name_trimmed = new_name[:31]
cmd = ''.join(['chsyscfg -r lpar -i ',
'"',
'name=%s,' % instance_name,
'new_name=%s' % new_name_trimmed,
'"'])
self.run_vios_command(cmd)
return new_name_trimmed
def _remove_file(self, file_path):
"""Removes a file on the VIOS partition
:param file_path: absolute path to file to be removed
"""
command = 'rm -f %s' % file_path
self.run_vios_command_as_root(command)
def set_lpar_mac_base_value(self, instance_name, mac):
"""Set LPAR's property virtual_eth_mac_base_value
:param instance_name: name of the instance to be set
:param mac: mac of virtual ethernet
"""
# NOTE(ldbragst) We only use the base mac value because the last
# byte is the slot id of the virtual NIC, which doesn't change.
mac_base_value = mac[:-2].replace(':', '')
cmd = ' '.join(['chsyscfg -r lpar -i',
'"name=%s,' % instance_name,
'virtual_eth_mac_base_value=%s"' % mac_base_value])
self.run_vios_command(cmd)
class IVMOperator(BaseOperator):
"""Integrated Virtualization Manager (IVM) Operator.
Runs specific commands on an IVM managed system.
"""
def __init__(self, ivm_connection):
self.command = command.IVMCommand()
BaseOperator.__init__(self, ivm_connection)
def macs_for_instance(self, instance):
"""Generates set of valid MAC addresses for an IVM instance."""
# NOTE(vish): We would prefer to use 0xfe here to ensure that linux
# bridge mac addresses don't change, but it appears to
# conflict with libvirt, so we use the next highest octet
# that has the unicast and locally administered bits set
# properly: 0xfa.
# Discussion: https://bugs.launchpad.net/nova/+bug/921838
# NOTE(mjfork): For IVM-based PowerVM, we cannot directly set a MAC
# address on an LPAR, but rather need to construct one
# that can be used. Retain the 0xfa as noted above,
# but ensure the final 2 hex values represent a value
# between 32 and 64 so we can assign as the slot id on
# the system. For future reference, the last octect
# should not exceed FF (255) since it would spill over
# into the higher-order octect.
#
# FA:xx:xx:xx:xx:[32-64]
macs = set()
mac_base = [0xfa,
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0x00)]
for n in range(32, 64):
mac_base[5] = n
macs.add(':'.join(map(lambda x: "%02x" % x, mac_base)))
return macs
|
|
"""These routines are for doing typical extracellular (cell-attached) analyses.
The main data structures we use here are XSG dictionaries. These
contain keys by default that have a collection of meta-data, and one
key for each of the three ephus programs (ephys, acquierer and
stimulator).
We have been inspired by the spike_sort package, but re-implemented
routines to better fit the XSG data structure. In particular, we have
'detectSpikes' and 'extractSpikes', as well as routines to calculate
spike rate histograms and densities, and plotting a spike raster.
"""
import numpy as np
import matplotlib.pyplot as plt
import copy
from itertools import repeat
from scipy.stats import norm
from scipy.ndimage import convolve1d
__all__ = ['plotRaster', 'makeSTH', 'makeSpikeDensity', 'detectSpikes', 'extractSpikes']
def detectSpikes(orig_xsg, thresh, edge='falling', channel='chan0', filter_trace=False):
"""This function detects spikes in a merged or unmerged XSG
dictionary containing cell attached-recordings. It adds a key
'spikeTimes' to a new copy of the XSG, which is simply an numpy
array of events indicies (in milliseconds), or a list of such
arrays in the case of a merged xsg. Note that this must be a list
of numpy arrays, instead of a 2d array, because the # of events is
different in each trial.
Note that this routine can take different types of optional
parameters. Central is 'thresh', which can be either a floating
point threshold value, an explicit wave that is exactly the same
size as a single trial. One can specify a list of such
thresholds, one for each trial. This list must be the same overall
length, but entries can mix and match explict waves and single
numbers.
By default we're going to detect spikes in channel 0, and there is
room here to filter the waves before detection (need to implement
soon).
:param: - orig_xsg - merged or unmerged xsg containing cell-attached ephys data
:param: - thresh - a single threshold or list (as specified above)
:param: - edge - string, one of 'rising' or 'falling'
:param: - channel - string- specifying which channel to use (must be a
valid key in the 'ephys' sub-dictionary)
:param: - filter - boolean, pre-filter traces before detection, not implemented
:returns: - xsg - a copy of orig_xsg, which adds a single array/list of arrays of spike indicies
"""
assert(edge in ['falling', 'rising'], "Edge must be 'falling' or 'rising'!")
xsg = copy.deepcopy(orig_xsg)
# internal function to be used with a map
def detect(params):
trace, thresh, filter_trace, sample_rate = params
if filter_trace:
#trace = filterthetrace(trace)
pass
# thresh is now a single value or an explicit wave the same size and shape as trace
# let's just make it explicit
if type(thresh) is not np.ndarray:
thresh = np.ones_like(trace) * thresh
if edge == 'rising':
i, = np.where((trace[:-1] < thresh[:-1]) & (trace[1:] > thresh[1:]))
if edge == 'falling':
i, = np.where((trace[:-1] > thresh[:-1]) & (trace[1:] < thresh[1:]))
return i * 1000.0 / sample_rate
if 'merged' in xsg.keys():
# important type expectation here --- could be list of floats or a list of expicit ndarrays
if type(thresh) is not list:
thresh = repeat(thresh)
if type(filter_trace) is not list:
filter_trace = repeat(filter_trace)
xsg['spikeTimes'] = map(detect, zip(np.rollaxis(xsg['ephys'][channel], 1, 0), thresh, filter_trace, repeat(xsg['sampleRate'][0])))
else:
xsg['spikeTimes'] = detect((xsg['ephys'][channel], thresh, filter_trace, xsg['sampleRate'])) # wrapping here to make it compatible with the zip for a single trial
return xsg
def extractSpikes(orig_xsg, width=100, energy=False, channel='chan0'):
"""Creates a list of extracted spikes from a merged or unmerged XSG dictionary.
Note that the dictionary has to have the key 'spikeTimes', which is
generated by detectSpikes(). The value of this key is a single numpy array
with spike loctions in samples (or a list of such arrays).
Creates a new key 'extractedSpikes' which is a 2d numpy array of
samples x spikes, centered on threshold crossing. The width
parameter is in samples, and with a sampling frequency of 10kH,
defaults to 1 millisecond. The parameter 'energy' controls
weather each extracted trace is normalized by it's total energy.
This routine could be improved by 1) accounting for spikes at the
very edges of the trace 2) aligning the spikes to the peak instead
of the threshold crossing (maybe best done in the spike detection?
3) baselining or otherwise normalizing for preperation for PCA
:param: - xsg - a merged or unmerged XSG dictionary with a 'spikeTimes' entry
:param: - width - optional int, window size in samples, defaults to 1ms at 10kHz samplling rate
:param: - energy - optional boolean, controls normalizing by energy level
:param: - channel - which channel to extract spikes from, defaults to 'chan0'
:returns: - a xsg with the added field 'extractedSpikes', as explained above
"""
half_width = int(width / 2)
xsg = copy.deepcopy(orig_xsg)
def extract(params):
data, times = params
extracted_temp = np.zeros((width, len(times)))
for i, spike in enumerate(times):
sample = int(spike*10)
if energy is True:
extracted_temp[:,i] = data[sample-half_width:sample+half_width] / (np.sqrt(np.sum(np.power(data[sample-half_width:sample+half_width], 2))))
else:
extracted_temp[:,i] = data[sample-half_width:sample+half_width]
return extracted_temp
if 'merged' in orig_xsg.keys():
xsg['extractedSpikes'] = map(extract, zip(np.rollaxis(xsg['ephys'][channel], 1, 0), [times for times in xsg['spikeTimes']]))
else:
xsg['extractedSpikes'] = extract((xsg['ephys'][channel], xsg['spikeTimes']))
return xsg
def plotRaster(xsg, ax=None, height=1.):
"""Creates raster plot from a merged or unmerged XSG dictionary.
Note that the dictionary has to have the key 'spikeTimes', which is
generated by detectSpikes(). The value of this key is a single numpy array
with spike loctions in samples (or a list of such arrays).
Note that we plot these based on the size of the traces themselves.
This works because we split up our acquisitions, but in general,
we might want to only plot regions of the raster. plt.xlim() should
be able to be used post-hoc for this.
:param: - xsg - a merged or unmerged XSG dictionary with a 'spikeTimes' entry
:param: - ax - optional, a matplotlib axis to plot onto
:param: - height - optional, spacing for the rasters
"""
if ax is None:
ax = plt.gca() # otherwise it'll plot on the top figure
try:
if type(xsg['spikeTimes']) is list:
for trial, trace in enumerate(xsg['spikeTimes']):
plt.vlines(trace, trial, trial+height)
plt.ylim(len(xsg['spikeTimes']), 0)
plt.xlim(0,float(xsg['ephys']['chan0'].shape[0]) / xsg['sampleRate'][0] * 1000.0)
else:
plt.vlines(xsg['spikeTimes'], 0, height)
plt.ylim((0,1))
plt.xlim(0,float(xsg['ephys']['chan0'].shape[0]) / xsg['sampleRate'] * 1000.0)
plt.xlabel('time (ms)')
plt.ylabel('trials')
except:
print 'No spike times found!'
def makeSTH(orig_xsg, bin_size=1):
"""Creates spike rate histograms from a merged or unmerged XSG dictionary.
Note that the dictionary has to have the key 'spikeTimes', which is
generated by detectSpikes(). The value of this key is a single numpy array
with spike loctions in samples (or a list of such arrays).
This routine creates a new key, 'spikeHist' which is a dictionary.
It contains 4 entries:
'binCenters'
'binEdges'
'counts'
'rates' - this is simply counts * 1000 / bin_size
If the xsg was unmerged, these values are 1d arrays. If it was merged, they are 2d (time x trials)
:param: xsg - - a merged or unmerged XSG dictionary with a 'spikeTimes' entry
:param: bin_size - optional bin size in milliseconds. Default to 1ms.
:returns: xsg - a copy of the previous XSG, with the 'spikeHist' dictionary added as described above.
"""
assert('spikeTimes' in orig_xsg.keys(), 'No spike times found!')
xsg = copy.deepcopy(orig_xsg)
xsg['spikeHist'] = {}
if 'merged' in xsg.keys():
sampleRate = float(xsg['sampleRate'][0])
else:
sampleRate = float(xsg['sampleRate'])
bins = np.arange(0,xsg['ephys']['chan0'].shape[0] / sampleRate * 1000.0, bin_size)
rate_factor = 1000.0 / bin_size
def makeHist(params):
spike_time, bins = params
counts, bin_edges = np.histogram(spike_time, bins)
bin_centers = 0.5*(bin_edges[1:]+bin_edges[:-1])
return bin_centers, counts, bin_edges
if 'merged' in xsg.keys():
temp_hist = map(makeHist, zip([st for st in xsg['spikeTimes']], repeat(bins)))
xsg['spikeHist']['binCenters'] = np.array([x[0] for x in temp_hist]).T
xsg['spikeHist']['counts'] = np.array([x[1] for x in temp_hist]).T
xsg['spikeHist']['binEdges'] = np.array([x[2] for x in temp_hist]).T
xsg['spikeHist']['rates'] = xsg['spikeHist']['counts'] * rate_factor
else:
temp_hist = makeHist((xsg['spikeTimes'], bins))
xsg['spikeHist']['binCenters'] = temp_hist[0]
xsg['spikeHist']['counts'] = temp_hist[1]
xsg['spikeHist']['binEdges'] = temp_hist[2]
xsg['spikeHist']['rates'] = xsg['spikeHist']['counts'] * rate_factor
xsg['spikeHist']['binSize'] = bin_size
return xsg
def makeSpikeDensity(orig_xsg, sigma=100):
"""Creates spike rate densities from from a merged or unmerged XSG dictionary.
Note that the dictionary has to have the key 'spikeHist', which is
generated by makeSTH(). The value of this key is a dictionary of binned
spiked times and associated metadata (see makeSTH() for details).
The essential thing that this routine does is smooth the rates
calculated in makeSTH() with a gaussian of a specified width.
Note that the resolution of the kernel is dependent on the bin
size, so the best use of this is to bin with a small value (~1ms
for instance) and then smooth with something larger (~100ms). The
sigma size must be equal or larger than the bin size. Playing
with different verions of this yields smoothed approximations of
the rates you get if you bin with a sample size of 1 second.
This routine creates a new key, 'spikeDensity' which is a dictionary.
It contains 4 entries:
'binCenters' - centers of binned values
'rates' - smoothed rates
'kernel' - calculated kernel for smoothing
'sigma' - sigma value passed in, in ms.
If the xsg was unmerged, these values are 1d arrays. If it was merged, they are 2d (time x trials)
The exception is sigma, which is a single value.
:param: xsg - - a merged or unmerged XSG dictionary with a 'spikeDensity' entry
:param: sigma - optional standard deviation of gaussian to smooth with, in milliseconds.
:returns: xsg - a copy of the previous XSG, with the 'spikeDensity' dictionary added as described above.
"""
assert(sigma >= orig_xsg['spikeHist']['binSize']) # the resolution of our guassian depends on the bin size
xsg = copy.deepcopy(orig_xsg)
edges = np.arange(-3*sigma, 3*sigma, orig_xsg['spikeHist']['binSize'])
kernel = norm.pdf(edges, 0, sigma)
kernel *= orig_xsg['spikeHist']['binSize']
xsg['spikeDensity'] = {}
xsg['spikeDensity']['binCenters'] = xsg['spikeHist']['binCenters'].copy()
xsg['spikeDensity']['kernel'] = kernel
xsg['spikeDensity']['sigma'] = sigma
# actually smooth. note that we use ndimage's convolve1d, which by default trims the edges
xsg['spikeDensity']['rates'] = convolve1d(xsg['spikeHist']['rates'].astype(float), kernel, axis=0)
return xsg
|
|
# -*- coding: utf-8 -*-
from contextlib import contextmanager
from functools import update_wrapper
import atexit
import os
import warnings
import numpy as np
@contextmanager
def ignore_invalid():
err = np.seterr(invalid='ignore')
try:
yield
finally:
np.seterr(**err)
def check_array_like(a, *ndims, **kwargs):
if not hasattr(a, 'ndim'):
cls = kwargs.pop('default', np.asarray)
a = cls(a, **kwargs)
if a.ndim not in ndims:
raise ValueError('invalid number of dimensions: %s' % a.ndim)
def asarray_ndim(a, *ndims, **kwargs):
"""Ensure numpy array.
Parameters
----------
a : array_like
*ndims : int, optional
Allowed values for number of dimensions.
**kwargs
Passed through to :func:`numpy.array`.
Returns
-------
a : numpy.ndarray
"""
allow_none = kwargs.pop('allow_none', False)
kwargs.setdefault('copy', False)
if a is None and allow_none:
return None
a = np.array(a, **kwargs)
if a.ndim not in ndims:
if len(ndims) > 1:
expect_str = 'one of %s' % str(ndims)
else:
# noinspection PyUnresolvedReferences
expect_str = '%s' % ndims[0]
raise TypeError('bad number of dimensions: expected %s; found %s' %
(expect_str, a.ndim))
return a
def check_ndim(a, ndim):
if a.ndim != ndim:
raise TypeError('bad number of dimensions: expected %s; found %s' % (ndim, a.ndim))
def check_shape(a, shape):
if a.shape != shape:
raise TypeError('bad shape: expected %s; found %s' % (shape, a.shape))
def check_dtype(a, *dtypes):
dtypes = [np.dtype(t) for t in dtypes]
if a.dtype not in dtypes:
raise TypeError('bad dtype: expected on of %s; found %s' % (dtypes, a.dtype))
def check_dtype_kind(a, *kinds):
if a.dtype.kind not in kinds:
raise TypeError('bad dtype kind: expected on of %s; found %s' % (kinds, a.dtype.kind))
def check_integer_dtype(a):
check_dtype_kind(a, 'u', 'i')
def check_dim0_aligned(*arrays):
check_dim_aligned(0, *arrays)
def check_dim1_aligned(*arrays):
check_dim_aligned(1, *arrays)
def check_dim_aligned(dim, *arrays):
a = arrays[0]
for b in arrays[1:]:
if b.shape[dim] != a.shape[dim]:
raise ValueError(
'arrays do not have matching length for dimension %s' % dim
)
def check_same_ndim(*arrays):
a = arrays[0]
for b in arrays[1:]:
if len(b.shape) != len(a.shape):
raise ValueError(
'arrays do not have same number of dimensions'
)
def check_equal_length(a, *others):
expected_length = len(a)
for b in others:
if len(b) != expected_length:
raise ValueError('sequences do not have matching length')
def resize_dim1(a, s, fill=0):
if a.shape[1] < s:
newshape = a.shape[0], s
b = np.zeros(newshape, dtype=a.dtype)
if fill != 0:
b.fill(fill)
b[:, :a.shape[1]] = a
return b
else:
return a
def ensure_dim1_aligned(*arrays, **kwargs):
fill = kwargs.get('fill', 0)
dim1_length = max(a.shape[1] for a in arrays)
arrays = [resize_dim1(a, dim1_length, fill=fill) for a in arrays]
return arrays
def ensure_square(dist):
from scipy.spatial.distance import squareform
dist = asarray_ndim(dist, 1, 2)
if dist.ndim == 1:
dist = squareform(dist)
else:
if dist.shape[0] != dist.shape[1]:
raise ValueError('distance matrix is not square')
return dist
def mask_inaccessible(is_accessible, pos, *arrays):
"""
This function returns a tuple (positions, *arrays) in which
positions that are not accessible are removed from the positions
and the *arrays.
Parameters
----------
is_accessible : array_like, bool, shape (len(contig),)
Boolean array indicating accessibility status for all positions in the
chromosome/contig.
pos : array_like, int, shape (n_variants,)
Variant positions, using 1-based coordinates, in ascending order.
array1, array2, ... : array_like
N-dimensional array objects with n_variants elements in the 1D.
Returns
-------
pos : array_like, int, shape (n_items,)
Positions array consisting exclusively of accessible sites in the
original positions array.
array1, array2, ... : array_like
N-dimensional array objects with n_variants elements in the 1D
but now consisting exclusively of accessible sites in the original
arrays.
"""
if is_accessible is not None:
# sanity check
if np.max(pos) > len(is_accessible):
raise ValueError(
'Not all positions are covered by is_accessible.'
)
# check array shapes
check_dim0_aligned(pos, *arrays)
loc_accessible = is_accessible[pos-1]
if np.any(np.logical_not(loc_accessible)):
warnings.warn("Some variants were inaccessible and hence masked.")
arrays = tuple(a[loc_accessible] for a in arrays)
pos = pos[loc_accessible]
return (pos,) + arrays
class _HashedSeq(list):
__slots__ = 'hashvalue'
# noinspection PyShadowingBuiltins,PyMissingConstructor
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
# noinspection PyShadowingBuiltins
def _make_key(args, kwds, typed,
kwd_mark=('__kwargs__',),
fasttypes=(int, str, frozenset, type(None)),
sorted=sorted, tuple=tuple, type=type, len=len):
key = args
kwd_items = sorted(kwds.items())
if kwds:
key += kwd_mark
for item in kwd_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for _, v in kwd_items)
else:
key = args
if len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def _hdf5_cache_act(filepath, parent, container, key, names, no_cache,
user_function, args, kwargs, h5dcreate_kwargs):
import h5py
# open the HDF5 file
with h5py.File(filepath, mode='a') as h5f:
# find parent group
if parent is None:
# use root group
h5g_parent = h5f
else:
h5g_parent = h5f.require_group(parent)
# find cache container group
h5g_container = h5g_parent.require_group(container)
# find cache group
h5g = h5g_container.require_group(key)
# call user function and (re)build cache
if no_cache or '__success__' not in h5g.attrs:
# reset success mark if present
if '__success__' in h5g.attrs:
del h5g.attrs['__success__']
# compute result
result = user_function(*args, **kwargs)
# handle tuple of return values
if isinstance(result, tuple):
# determine dataset names
if names is None:
names = ['f%02d' % i for i in range(len(result))]
elif len(names) < len(result):
names = list(names) + ['f%02d' % i
for i in range(len(names),
len(result))]
# save data
for n, r in zip(names, result):
if n in h5g:
del h5g[n]
if np.isscalar(r):
h5g.create_dataset(n, data=r)
else:
h5g.create_dataset(n, data=r, **h5dcreate_kwargs)
# handle single return value
else:
# determine dataset name
if names is None:
n = 'data'
elif isinstance(names, str):
n = names
elif len(names) > 0:
n = names[0]
else:
n = 'data'
# save data
if n in h5g:
del h5g[n]
if np.isscalar(result):
h5g.create_dataset(n, data=result)
else:
h5g.create_dataset(n, data=result,
**h5dcreate_kwargs)
# mark success
h5g.attrs['__success__'] = True
# load from cache
else:
# determine dataset names
if names is None:
names = sorted(h5g.keys())
elif isinstance(names, str):
names = (names,)
# load result from cache
if len(names) == 1:
result = h5g[names[0]]
result = result[:] if len(result.shape) > 0 else result[()]
else:
result = tuple(h5g[n] for n in names)
result = tuple(r[:] if len(r.shape) > 0 else r[()]
for r in result)
return result
def hdf5_cache(filepath=None, parent=None, group=None, names=None, typed=False,
hashed_key=False, **h5dcreate_kwargs):
"""HDF5 cache decorator.
Parameters
----------
filepath : string, optional
Path to HDF5 file. If None a temporary file name will be used.
parent : string, optional
Path to group within HDF5 file to use as parent. If None the root
group will be used.
group : string, optional
Path to group within HDF5 file, relative to parent, to use as
container for cached data. If None the name of the wrapped function
will be used.
names : sequence of strings, optional
Name(s) of dataset(s). If None, default names will be 'f00', 'f01',
etc.
typed : bool, optional
If True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
hashed_key : bool, optional
If False (default) the key will not be hashed, which makes for
readable cache group names. If True the key will be hashed, however
note that on Python >= 3.3 the hash value will not be the same between
sessions unless the environment variable PYTHONHASHSEED has been set
to the same value.
Returns
-------
decorator : function
Examples
--------
Without any arguments, will cache using a temporary HDF5 file::
>>> import allel
>>> @allel.util.hdf5_cache()
... def foo(n):
... print('executing foo')
... return np.arange(n)
...
>>> foo(3)
executing foo
array([0, 1, 2])
>>> foo(3)
array([0, 1, 2])
>>> foo.cache_filepath # doctest: +SKIP
'/tmp/tmp_jwtwgjz'
Supports multiple return values, including scalars, e.g.::
>>> @allel.util.hdf5_cache()
... def bar(n):
... print('executing bar')
... a = np.arange(n)
... return a, a**2, n**2
...
>>> bar(3)
executing bar
(array([0, 1, 2]), array([0, 1, 4]), 9)
>>> bar(3)
(array([0, 1, 2]), array([0, 1, 4]), 9)
Names can also be specified for the datasets, e.g.::
>>> @allel.util.hdf5_cache(names=['z', 'x', 'y'])
... def baz(n):
... print('executing baz')
... a = np.arange(n)
... return a, a**2, n**2
...
>>> baz(3)
executing baz
(array([0, 1, 2]), array([0, 1, 4]), 9)
>>> baz(3)
(array([0, 1, 2]), array([0, 1, 4]), 9)
"""
# initialise HDF5 file path
if filepath is None:
import tempfile
filepath = tempfile.mktemp(prefix='scikit_allel_', suffix='.h5')
atexit.register(os.remove, filepath)
# initialise defaults for dataset creation
h5dcreate_kwargs.setdefault('chunks', True)
def decorator(user_function):
# setup the name for the cache container group
if group is None:
container = user_function.__name__
else:
container = group
def wrapper(*args, **kwargs):
# load from cache or not
no_cache = kwargs.pop('no_cache', False)
# compute a key from the function arguments
key = _make_key(args, kwargs, typed)
if hashed_key:
key = str(hash(key))
else:
key = str(key).replace('/', '__slash__')
return _hdf5_cache_act(filepath, parent, container, key, names,
no_cache, user_function, args, kwargs,
h5dcreate_kwargs)
wrapper.cache_filepath = filepath
return update_wrapper(wrapper, user_function)
return decorator
def contains_newaxis(item):
if item is None:
return True
elif item is np.newaxis:
return True
elif isinstance(item, tuple):
return any((i is None or i is np.newaxis) for i in item)
return False
def check_ploidy(actual, expect):
if expect != actual:
raise ValueError(
'expected ploidy %s, found %s' % (expect, actual)
)
def check_min_samples(actual, expect):
if actual < expect:
raise ValueError(
'expected at least %s samples, found %s' % (expect, actual)
)
def check_type(obj, expected):
if not isinstance(obj, expected):
raise TypeError('bad argument type, expected %s, found %s' % (expected, type(obj)))
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import errno
import glob
import ntpath
import os
import subprocess
import sys
import tempfile
from .envfuncs import Env, add_path_entry
from .manifest import ContextGenerator
from .platform import HostType, is_windows
try:
import typing # noqa: F401
except ImportError:
pass
def containing_repo_type(path):
while True:
if os.path.exists(os.path.join(path, ".git")):
return ("git", path)
if os.path.exists(os.path.join(path, ".hg")):
return ("hg", path)
parent = os.path.dirname(path)
if parent == path:
return None, None
path = parent
def detect_project(path):
repo_type, repo_root = containing_repo_type(path)
if repo_type is None:
return None, None
# Look for a .projectid file. If it exists, read the project name from it.
project_id_path = os.path.join(repo_root, ".projectid")
try:
with open(project_id_path, "r") as f:
project_name = f.read().strip()
return repo_root, project_name
except EnvironmentError as ex:
if ex.errno != errno.ENOENT:
raise
return repo_root, None
class BuildOptions(object):
def __init__(
self,
fbcode_builder_dir,
scratch_dir,
host_type,
install_dir=None,
num_jobs=0,
use_shipit=False,
vcvars_path=None,
):
""" fbcode_builder_dir - the path to either the in-fbsource fbcode_builder dir,
or for shipit-transformed repos, the build dir that
has been mapped into that dir.
scratch_dir - a place where we can store repos and build bits.
This path should be stable across runs and ideally
should not be in the repo of the project being built,
but that is ultimately where we generally fall back
for builds outside of FB
install_dir - where the project will ultimately be installed
num_jobs - the level of concurrency to use while building
use_shipit - use real shipit instead of the simple shipit transformer
vcvars_path - Path to external VS toolchain's vsvarsall.bat
"""
if not num_jobs:
import multiprocessing
num_jobs = multiprocessing.cpu_count()
if not install_dir:
install_dir = os.path.join(scratch_dir, "installed")
self.project_hashes = None
for p in ["../deps/github_hashes", "../project_hashes"]:
hashes = os.path.join(fbcode_builder_dir, p)
if os.path.exists(hashes):
self.project_hashes = hashes
break
# Detect what repository and project we are being run from.
self.repo_root, self.repo_project = detect_project(os.getcwd())
# If we are running from an fbsource repository, set self.fbsource_dir
# to allow the ShipIt-based fetchers to use it.
if self.repo_project == "fbsource":
self.fbsource_dir = self.repo_root
else:
self.fbsource_dir = None
self.num_jobs = num_jobs
self.scratch_dir = scratch_dir
self.install_dir = install_dir
self.fbcode_builder_dir = fbcode_builder_dir
self.host_type = host_type
self.use_shipit = use_shipit
if vcvars_path is None and is_windows():
# On Windows, the compiler is not available in the PATH by
# default so we need to run the vcvarsall script to populate the
# environment. We use a glob to find some version of this script
# as deployed with Visual Studio 2017. This logic can also
# locate Visual Studio 2019 but note that at the time of writing
# the version of boost in our manifest cannot be built with
# VS 2019, so we're effectively tied to VS 2017 until we upgrade
# the boost dependency.
vcvarsall = []
for year in ["2017", "2019"]:
vcvarsall += glob.glob(
os.path.join(
os.environ["ProgramFiles(x86)"],
"Microsoft Visual Studio",
year,
"*",
"VC",
"Auxiliary",
"Build",
"vcvarsall.bat",
)
)
vcvars_path = vcvarsall[0]
self.vcvars_path = vcvars_path
@property
def manifests_dir(self):
return os.path.join(self.fbcode_builder_dir, "manifests")
def is_darwin(self):
return self.host_type.is_darwin()
def is_windows(self):
return self.host_type.is_windows()
def get_vcvars_path(self):
return self.vcvars_path
def is_linux(self):
return self.host_type.is_linux()
def get_context_generator(self, host_tuple=None, facebook_internal=None):
""" Create a manifest ContextGenerator for the specified target platform. """
if host_tuple is None:
host_type = self.host_type
elif isinstance(host_tuple, HostType):
host_type = host_tuple
else:
host_type = HostType.from_tuple_string(host_tuple)
# facebook_internal is an Optional[bool]
# If it is None, default to assuming this is a Facebook-internal build if
# we are running in an fbsource repository.
if facebook_internal is None:
facebook_internal = self.fbsource_dir is not None
return ContextGenerator(
{
"os": host_type.ostype,
"distro": host_type.distro,
"distro_vers": host_type.distrovers,
"fb": "on" if facebook_internal else "off",
"test": "off",
}
)
def compute_env_for_install_dirs(self, install_dirs, env=None):
if env is not None:
env = env.copy()
else:
env = Env()
if self.fbsource_dir:
env["YARN_YARN_OFFLINE_MIRROR"] = os.path.join(
self.fbsource_dir, "xplat/third-party/yarn/offline-mirror"
)
yarn_exe = "yarn.bat" if self.is_windows() else "yarn"
env["YARN_PATH"] = os.path.join(
self.fbsource_dir, "xplat/third-party/yarn/", yarn_exe
)
node_exe = "node-win-x64.exe" if self.is_windows() else "node"
env["NODE_BIN"] = os.path.join(
self.fbsource_dir, "xplat/third-party/node/bin/", node_exe
)
lib_path = None
if self.is_darwin():
lib_path = "DYLD_LIBRARY_PATH"
elif self.is_linux():
lib_path = "LD_LIBRARY_PATH"
elif self.is_windows():
lib_path = "PATH"
else:
lib_path = None
for d in install_dirs:
add_path_entry(env, "CMAKE_PREFIX_PATH", d)
pkgconfig = os.path.join(d, "lib/pkgconfig")
if os.path.exists(pkgconfig):
add_path_entry(env, "PKG_CONFIG_PATH", pkgconfig)
pkgconfig = os.path.join(d, "lib64/pkgconfig")
if os.path.exists(pkgconfig):
add_path_entry(env, "PKG_CONFIG_PATH", pkgconfig)
# Allow resolving shared objects built earlier (eg: zstd
# doesn't include the full path to the dylib in its linkage
# so we need to give it an assist)
if lib_path:
for lib in ["lib", "lib64"]:
libdir = os.path.join(d, lib)
if os.path.exists(libdir):
add_path_entry(env, lib_path, libdir)
# Allow resolving binaries (eg: cmake, ninja) and dlls
# built by earlier steps
bindir = os.path.join(d, "bin")
if os.path.exists(bindir):
add_path_entry(env, "PATH", bindir, append=False)
# If rustc is present in the `bin` directory, set RUSTC to prevent
# cargo uses the rustc installed in the system.
if self.is_windows():
rustc_path = os.path.join(bindir, "rustc.bat")
rustdoc_path = os.path.join(bindir, "rustdoc.bat")
else:
rustc_path = os.path.join(bindir, "rustc")
rustdoc_path = os.path.join(bindir, "rustdoc")
if os.path.isfile(rustc_path):
env["RUSTC"] = rustc_path
env["RUSTDOC"] = rustdoc_path
return env
def list_win32_subst_letters():
output = subprocess.check_output(["subst"]).decode("utf-8")
# The output is a set of lines like: `F:\: => C:\open\some\where`
lines = output.strip().split("\r\n")
mapping = {}
for line in lines:
fields = line.split(": => ")
if len(fields) != 2:
continue
letter = fields[0]
path = fields[1]
mapping[letter] = path
return mapping
def find_existing_win32_subst_for_path(
path, # type: str
subst_mapping, # type: typing.Mapping[str, str]
):
# type: (...) -> typing.Optional[str]
path = ntpath.normcase(ntpath.normpath(path))
for letter, target in subst_mapping.items():
if ntpath.normcase(target) == path:
return letter
return None
def find_unused_drive_letter():
import ctypes
buffer_len = 256
blen = ctypes.c_uint(buffer_len)
rv = ctypes.c_uint()
bufs = ctypes.create_string_buffer(buffer_len)
rv = ctypes.windll.kernel32.GetLogicalDriveStringsA(blen, bufs)
if rv > buffer_len:
raise Exception("GetLogicalDriveStringsA result too large for buffer")
nul = "\x00".encode("ascii")
used = [drive.decode("ascii")[0] for drive in bufs.raw.strip(nul).split(nul)]
possible = [c for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
available = sorted(list(set(possible) - set(used)))
if len(available) == 0:
return None
# Prefer to assign later letters rather than earlier letters
return available[-1]
def create_subst_path(path):
for _attempt in range(0, 24):
drive = find_existing_win32_subst_for_path(
path, subst_mapping=list_win32_subst_letters()
)
if drive:
return drive
available = find_unused_drive_letter()
if available is None:
raise Exception(
(
"unable to make shorter subst mapping for %s; "
"no available drive letters"
)
% path
)
# Try to set up a subst mapping; note that we may be racing with
# other processes on the same host, so this may not succeed.
try:
subprocess.check_call(["subst", "%s:" % available, path])
return "%s:\\" % available
except Exception:
print("Failed to map %s -> %s" % (available, path))
raise Exception("failed to set up a subst path for %s" % path)
def _check_host_type(args, host_type):
if host_type is None:
host_tuple_string = getattr(args, "host_type", None)
if host_tuple_string:
host_type = HostType.from_tuple_string(host_tuple_string)
else:
host_type = HostType()
assert isinstance(host_type, HostType)
return host_type
def setup_build_options(args, host_type=None):
""" Create a BuildOptions object based on the arguments """
fbcode_builder_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
scratch_dir = args.scratch_path
if not scratch_dir:
# TODO: `mkscratch` doesn't currently know how best to place things on
# sandcastle, so whip up something reasonable-ish
if "SANDCASTLE" in os.environ:
if "DISK_TEMP" not in os.environ:
raise Exception(
(
"I need DISK_TEMP to be set in the sandcastle environment "
"so that I can store build products somewhere sane"
)
)
scratch_dir = os.path.join(
os.environ["DISK_TEMP"], "fbcode_builder_getdeps"
)
if not scratch_dir:
try:
scratch_dir = (
subprocess.check_output(
["mkscratch", "path", "--subdir", "fbcode_builder_getdeps"]
)
.strip()
.decode("utf-8")
)
except OSError as exc:
if exc.errno != errno.ENOENT:
# A legit failure; don't fall back, surface the error
raise
# This system doesn't have mkscratch so we fall back to
# something local.
munged = fbcode_builder_dir.replace("Z", "zZ")
for s in ["/", "\\", ":"]:
munged = munged.replace(s, "Z")
scratch_dir = os.path.join(
tempfile.gettempdir(), "fbcode_builder_getdeps-%s" % munged
)
if not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
if is_windows():
subst = create_subst_path(scratch_dir)
print(
"Mapping scratch dir %s -> %s" % (scratch_dir, subst), file=sys.stderr
)
scratch_dir = subst
else:
if not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
# Make sure we normalize the scratch path. This path is used as part of the hash
# computation for detecting if projects have been updated, so we need to always
# use the exact same string to refer to a given directory.
scratch_dir = os.path.realpath(scratch_dir)
host_type = _check_host_type(args, host_type)
return BuildOptions(
fbcode_builder_dir,
scratch_dir,
host_type,
install_dir=args.install_prefix,
num_jobs=args.num_jobs,
use_shipit=args.use_shipit,
vcvars_path=args.vcvars_path,
)
|
|
import os
import signal
import sys
import threading
import time
from unittest import skipIf, skipUnless
from django.db import (
DatabaseError, Error, IntegrityError, OperationalError, connection,
transaction,
)
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from .models import Reporter
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicTests(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
available_apps = ['transactions']
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(
Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>']
)
def test_nested_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_nested_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(
Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>']
)
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with atomic:
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_force_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# atomic block shouldn't rollback, but force it.
self.assertFalse(transaction.get_rollback())
transaction.set_rollback(True)
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_prevent_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
sid = transaction.savepoint()
# trigger a database error inside an inner atomic without savepoint
with self.assertRaises(DatabaseError):
with transaction.atomic(savepoint=False):
with connection.cursor() as cursor:
cursor.execute(
"SELECT no_such_col FROM transactions_reporter")
# prevent atomic from rolling back since we're recovering manually
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
transaction.savepoint_rollback(sid)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
@skipIf(sys.platform.startswith('win'), "Windows doesn't have signals.")
def test_rollback_on_keyboardinterrupt(self):
try:
with transaction.atomic():
Reporter.objects.create(first_name='Tintin')
# Send SIGINT (simulate Ctrl-C). One call isn't enough.
os.kill(os.getpid(), signal.SIGINT)
os.kill(os.getpid(), signal.SIGINT)
except KeyboardInterrupt:
pass
self.assertEqual(Reporter.objects.all().count(), 0)
class AtomicInsideTransactionTests(AtomicTests):
"""All basic tests for atomic should also pass within an existing transaction."""
def setUp(self):
self.atomic = transaction.atomic()
self.atomic.__enter__()
def tearDown(self):
self.atomic.__exit__(*sys.exc_info())
@skipIf(
connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit."
)
class AtomicWithoutAutocommitTests(AtomicTests):
"""All basic tests for atomic should also pass when autocommit is turned off."""
def setUp(self):
transaction.set_autocommit(False)
def tearDown(self):
# The tests access the database after exercising 'atomic', initiating
# a transaction ; a rollback is required before restoring autocommit.
transaction.rollback()
transaction.set_autocommit(True)
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicMergeTests(TransactionTestCase):
"""Test merging transactions with savepoint=False."""
available_apps = ['transactions']
def test_merged_outer_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The first block has a savepoint and must roll back.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_inner_savepoint_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second block has a savepoint and must roll back.
self.assertEqual(Reporter.objects.count(), 1)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicErrorsTests(TransactionTestCase):
available_apps = ['transactions']
def test_atomic_prevents_setting_autocommit(self):
autocommit = transaction.get_autocommit()
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.set_autocommit(not autocommit)
# Make sure autocommit wasn't changed.
self.assertEqual(connection.autocommit, autocommit)
def test_atomic_prevents_calling_transaction_methods(self):
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.commit()
with self.assertRaises(transaction.TransactionManagementError):
transaction.rollback()
def test_atomic_prevents_queries_in_broken_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# The transaction is marked as needing rollback.
with self.assertRaises(transaction.TransactionManagementError):
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Haddock")
@skipIfDBFeature('atomic_transactions')
def test_atomic_allows_queries_after_fixing_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# Mark the transaction as no longer needing rollback.
transaction.set_rollback(False)
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Calculus")
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_atomic_prevents_queries_in_broken_transaction_after_client_close(self):
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
connection.close()
# The connection is closed and the transaction is marked as
# needing rollback. This will raise an InterfaceError on databases
# that refuse to create cursors on closed connections (PostgreSQL)
# and a TransactionManagementError on other databases.
with self.assertRaises(Error):
Reporter.objects.create(first_name="Cuthbert", last_name="Calculus")
# The connection is usable again .
self.assertEqual(Reporter.objects.count(), 0)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific behaviors")
class AtomicMySQLTests(TransactionTestCase):
available_apps = ['transactions']
@skipIf(threading is None, "Test requires threading")
def test_implicit_savepoint_rollback(self):
"""MySQL implicitly rolls back savepoints when it deadlocks (#22291)."""
Reporter.objects.create(id=1)
Reporter.objects.create(id=2)
main_thread_ready = threading.Event()
def other_thread():
try:
with transaction.atomic():
Reporter.objects.select_for_update().get(id=1)
main_thread_ready.wait()
# 1) This line locks... (see below for 2)
Reporter.objects.exclude(id=1).update(id=2)
finally:
# This is the thread-local connection, not the main connection.
connection.close()
other_thread = threading.Thread(target=other_thread)
other_thread.start()
with self.assertRaisesMessage(OperationalError, 'Deadlock found'):
# Double atomic to enter a transaction and create a savepoint.
with transaction.atomic():
with transaction.atomic():
Reporter.objects.select_for_update().get(id=2)
main_thread_ready.set()
# The two threads can't be synchronized with an event here
# because the other thread locks. Sleep for a little while.
time.sleep(1)
# 2) ... and this line deadlocks. (see above for 1)
Reporter.objects.exclude(id=2).update(id=1)
other_thread.join()
class AtomicMiscTests(TransactionTestCase):
available_apps = []
def test_wrap_callable_instance(self):
"""#20028 -- Atomic must support wrapping callable instances."""
class Callable:
def __call__(self):
pass
# Must not raise an exception
transaction.atomic(Callable())
@skipUnlessDBFeature('can_release_savepoints')
def test_atomic_does_not_leak_savepoints_on_failure(self):
"""#23074 -- Savepoints must be released after rollback."""
# Expect an error when rolling back a savepoint that doesn't exist.
# Done outside of the transaction block to ensure proper recovery.
with self.assertRaises(Error):
# Start a plain transaction.
with transaction.atomic():
# Swallow the intentional error raised in the sub-transaction.
with self.assertRaisesMessage(Exception, "Oops"):
# Start a sub-transaction with a savepoint.
with transaction.atomic():
sid = connection.savepoint_ids[-1]
raise Exception("Oops")
# This is expected to fail because the savepoint no longer exists.
connection.savepoint_rollback(sid)
@skipIf(
connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit."
)
class NonAutocommitTests(TransactionTestCase):
available_apps = []
def test_orm_query_after_error_and_rollback(self):
"""
ORM queries are allowed after an error and a rollback in non-autocommit
mode (#27504).
"""
transaction.set_autocommit(False)
r1 = Reporter.objects.create(first_name='Archibald', last_name='Haddock')
r2 = Reporter(first_name='Cuthbert', last_name='Calculus', id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
transaction.rollback()
Reporter.objects.last()
def test_orm_query_without_autocommit(self):
"""#24921 -- ORM queries must be possible after set_autocommit(False)."""
transaction.set_autocommit(False)
try:
Reporter.objects.create(first_name="Tintin")
finally:
transaction.rollback()
transaction.set_autocommit(True)
|
|
# Copyright 2015 Hitachi Data Systems inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ddt
import mock
from manila.common import constants
from manila import context
from manila.data import helper as data_copy_helper
from manila import db
from manila import exception
from manila.share import rpcapi as share_rpc
from manila import test
from manila.tests import db_utils
from manila import utils
@ddt.ddt
class DataServiceHelperTestCase(test.TestCase):
"""Tests DataServiceHelper."""
def setUp(self):
super(DataServiceHelperTestCase, self).setUp()
self.share = db_utils.create_share()
self.share_instance = db_utils.create_share_instance(
share_id=self.share['id'],
status=constants.STATUS_AVAILABLE)
self.context = context.get_admin_context()
self.share_instance = db.share_instance_get(
self.context, self.share_instance['id'], with_share_data=True)
self.access = db_utils.create_access(share_id=self.share['id'])
self.helper = data_copy_helper.DataServiceHelper(
self.context, db, self.share)
@ddt.data(True, False)
def test_allow_access_to_data_service(self, allow_dest_instance):
access = db_utils.create_access(share_id=self.share['id'])
info_src = {
'access_mapping': {
'ip': ['nfs'],
'user': ['cifs', 'nfs'],
}
}
info_dest = {
'access_mapping': {
'ip': ['nfs', 'cifs'],
'user': ['cifs'],
}
}
if allow_dest_instance:
mapping = {'ip': ['nfs'], 'user': ['cifs']}
else:
mapping = info_src['access_mapping']
fake_access = {
'access_to': 'fake_ip',
'access_level': constants.ACCESS_LEVEL_RW,
'access_type': 'ip',
}
access_values = fake_access
access_values['share_id'] = self.share['id']
self.mock_object(
self.helper, '_get_access_entries_according_to_mapping',
mock.Mock(return_value=[fake_access]))
self.mock_object(
self.helper.db, 'share_access_get_all_by_type_and_access',
mock.Mock(return_value=[access]))
change_data_access_call = self.mock_object(
self.helper, '_change_data_access_to_instance')
self.mock_object(self.helper.db, 'share_instance_access_create',
mock.Mock(return_value=access))
if allow_dest_instance:
result = self.helper.allow_access_to_data_service(
self.share_instance, info_src, self.share_instance, info_dest)
else:
result = self.helper.allow_access_to_data_service(
self.share_instance, info_src)
self.assertEqual([access], result)
(self.helper._get_access_entries_according_to_mapping.
assert_called_once_with(mapping))
(self.helper.db.share_access_get_all_by_type_and_access.
assert_called_once_with(
self.context, self.share['id'], fake_access['access_type'],
fake_access['access_to']))
access_create_calls = [
mock.call(self.context, access_values, self.share_instance['id'])
]
if allow_dest_instance:
access_create_calls.append(mock.call(
self.context, access_values, self.share_instance['id']))
self.helper.db.share_instance_access_create.assert_has_calls(
access_create_calls)
change_access_calls = [
mock.call(self.share_instance, [access], deny=True),
mock.call(self.share_instance),
]
if allow_dest_instance:
change_access_calls.append(
mock.call(self.share_instance))
self.assertEqual(len(change_access_calls),
change_data_access_call.call_count)
change_data_access_call.assert_has_calls(change_access_calls)
@ddt.data({'ip': []}, {'cert': []}, {'user': []}, {'cephx': []}, {'x': []})
def test__get_access_entries_according_to_mapping(self, mapping):
data_copy_helper.CONF.data_node_access_cert = 'fake'
data_copy_helper.CONF.data_node_access_ip = 'fake'
data_copy_helper.CONF.data_node_access_admin_user = 'fake'
expected = [{
'access_type': list(mapping.keys())[0],
'access_level': constants.ACCESS_LEVEL_RW,
'access_to': 'fake',
}]
exists = [x for x in mapping if x in ('ip', 'user', 'cert')]
if exists:
result = self.helper._get_access_entries_according_to_mapping(
mapping)
self.assertEqual(expected, result)
else:
self.assertRaises(
exception.ShareDataCopyFailed,
self.helper._get_access_entries_according_to_mapping, mapping)
def test__get_access_entries_according_to_mapping_exception_not_set(self):
data_copy_helper.CONF.data_node_access_ip = None
self.assertRaises(
exception.ShareDataCopyFailed,
self.helper._get_access_entries_according_to_mapping, {'ip': []})
def test__get_access_entries_according_to_mapping_ip_list(self):
ips = ['fake1', 'fake2']
data_copy_helper.CONF.data_node_access_ips = ips
data_copy_helper.CONF.data_node_access_ip = None
expected = [{
'access_type': 'ip',
'access_level': constants.ACCESS_LEVEL_RW,
'access_to': x,
} for x in ips]
result = self.helper._get_access_entries_according_to_mapping(
{'ip': []})
self.assertEqual(expected, result)
def test_deny_access_to_data_service(self):
# mocks
self.mock_object(self.helper, '_change_data_access_to_instance')
# run
self.helper.deny_access_to_data_service(
[self.access], self.share_instance['id'])
# asserts
self.helper._change_data_access_to_instance.assert_called_once_with(
self.share_instance['id'], [self.access], deny=True)
@ddt.data(None, Exception('fake'))
def test_cleanup_data_access(self, exc):
# mocks
self.mock_object(self.helper, 'deny_access_to_data_service',
mock.Mock(side_effect=exc))
self.mock_object(data_copy_helper.LOG, 'warning')
# run
self.helper.cleanup_data_access([self.access],
self.share_instance['id'])
# asserts
self.helper.deny_access_to_data_service.assert_called_once_with(
[self.access], self.share_instance['id'])
if exc:
self.assertTrue(data_copy_helper.LOG.warning.called)
@ddt.data(False, True)
def test_cleanup_temp_folder(self, exc):
fake_path = ''.join(('/fake_path/', self.share_instance['id']))
# mocks
self.mock_object(os.path, 'exists',
mock.Mock(side_effect=[True, True, exc]))
self.mock_object(os, 'rmdir')
self.mock_object(data_copy_helper.LOG, 'warning')
# run
self.helper.cleanup_temp_folder(
self.share_instance['id'], '/fake_path/')
# asserts
os.rmdir.assert_called_once_with(fake_path)
os.path.exists.assert_has_calls([
mock.call(fake_path),
mock.call(fake_path),
mock.call(fake_path)
])
if exc:
self.assertTrue(data_copy_helper.LOG.warning.called)
@ddt.data(None, Exception('fake'))
def test_cleanup_unmount_temp_folder(self, exc):
# mocks
self.mock_object(self.helper, 'unmount_share_instance',
mock.Mock(side_effect=exc))
self.mock_object(data_copy_helper.LOG, 'warning')
# run
self.helper.cleanup_unmount_temp_folder(
'unmount_template', 'fake_path', self.share_instance['id'])
# asserts
self.helper.unmount_share_instance.assert_called_once_with(
'unmount_template', 'fake_path', self.share_instance['id'])
if exc:
self.assertTrue(data_copy_helper.LOG.warning.called)
@ddt.data(True, False)
def test__change_data_access_to_instance(self, deny):
access_rule = db_utils.create_access(share_id=self.share['id'])
access_rule = db.share_instance_access_get(
self.context, access_rule['id'], self.share_instance['id'])
# mocks
self.mock_object(share_rpc.ShareAPI, 'update_access')
self.mock_object(utils, 'wait_for_access_update')
mock_access_rules_status_update = self.mock_object(
self.helper.access_helper,
'get_and_update_share_instance_access_rules_status')
mock_rules_update = self.mock_object(
self.helper.access_helper,
'get_and_update_share_instance_access_rules')
# run
self.helper._change_data_access_to_instance(
self.share_instance, access_rule, deny=deny)
# asserts
if deny:
mock_rules_update.assert_called_once_with(
self.context, share_instance_id=self.share_instance['id'],
filters={'access_id': [access_rule['id']]},
updates={'state': constants.ACCESS_STATE_QUEUED_TO_DENY})
else:
self.assertFalse(mock_rules_update.called)
share_rpc.ShareAPI.update_access.assert_called_once_with(
self.context, self.share_instance)
mock_access_rules_status_update.assert_called_once_with(
self.context, status=constants.SHARE_INSTANCE_RULES_SYNCING,
share_instance_id=self.share_instance['id'])
utils.wait_for_access_update.assert_called_once_with(
self.context, self.helper.db, self.share_instance,
data_copy_helper.CONF.data_access_wait_access_rules_timeout)
def test_mount_share_instance(self):
fake_path = ''.join(('/fake_path/', self.share_instance['id']))
# mocks
self.mock_object(utils, 'execute')
self.mock_object(os.path, 'exists', mock.Mock(
side_effect=[False, False, True]))
self.mock_object(os, 'makedirs')
# run
self.helper.mount_share_instance(
'mount %(path)s', '/fake_path', self.share_instance)
# asserts
utils.execute.assert_called_once_with('mount', fake_path,
run_as_root=True)
os.makedirs.assert_called_once_with(fake_path)
os.path.exists.assert_has_calls([
mock.call(fake_path),
mock.call(fake_path),
mock.call(fake_path)
])
@ddt.data([True, True, False], [True, True, Exception('fake')])
def test_unmount_share_instance(self, side_effect):
fake_path = ''.join(('/fake_path/', self.share_instance['id']))
# mocks
self.mock_object(utils, 'execute')
self.mock_object(os.path, 'exists', mock.Mock(
side_effect=side_effect))
self.mock_object(os, 'rmdir')
self.mock_object(data_copy_helper.LOG, 'warning')
# run
self.helper.unmount_share_instance(
'unmount %(path)s', '/fake_path', self.share_instance['id'])
# asserts
utils.execute.assert_called_once_with('unmount', fake_path,
run_as_root=True)
os.rmdir.assert_called_once_with(fake_path)
os.path.exists.assert_has_calls([
mock.call(fake_path),
mock.call(fake_path),
mock.call(fake_path)
])
if any(isinstance(x, Exception) for x in side_effect):
self.assertTrue(data_copy_helper.LOG.warning.called)
|
|
from django import forms
from django.forms import ModelForm
import six
import json
from ..constants import START_YEAR
from .models import TaxSaveInputs
from .helpers import (is_safe, INPUTS_META, bool_like)
from .param_displayers import defaults_all
from .param_formatters import (get_default_policy_param,
ParameterLookUpException)
import taxcalc
class PolicyBrainForm:
def add_fields(self, args):
if not args:
return args
parsed_data = {}
args_data = args[0]
raw_fields = {}
for k, v in list(args_data.items()):
if k not in INPUTS_META:
raw_fields[k] = v
elif k in ('first_year', 'data_source'):
parsed_data[k] = v
else:
pass
parsed_data["raw_gui_field_inputs"] = json.dumps(raw_fields)
parsed_data["gui_field_inputs"] = json.dumps("")
return (parsed_data,)
def add_errors_on_extra_inputs(self):
ALLOWED_EXTRAS = {'has_errors', 'start_year', 'csrfmiddlewaretoken',
'data_source'}
all_inputs = set(self.data.keys())
allowed_inputs = set(self.fields.keys())
extra_inputs = all_inputs - allowed_inputs - ALLOWED_EXTRAS
for _input in extra_inputs:
self.add_error(None,
"Extra input '{0}' not allowed".format(_input))
all_fields = self.cleaned_data['raw_gui_field_inputs']
default_params = getattr(self.Meta, 'default_params', None)
allowed_fields = getattr(self.Meta, 'allowed_fields', None)
for _field in all_fields:
if default_params:
try:
get_default_policy_param(_field, default_params)
except ParameterLookUpException as exn:
self.add_error(None, str(exn))
elif _field not in allowed_fields:
msg = "Received unexpected parameter: {}".format(_field)
self.add_error(None, msg)
def do_taxcalc_validations(self):
"""
Do minimal type checking to make sure that we did not get any
malicious input
"""
fields = self.cleaned_data['raw_gui_field_inputs']
for param_name, value in fields.items():
# make sure the text parses OK
if param_name == 'data_source':
assert value in ('CPS', 'PUF')
elif isinstance(value, six.string_types) and len(value) > 0:
if not is_safe(value):
# Parse Error - we don't recognize what they gave us
self.add_error(param_name,
"Unrecognized value: {}".format(value))
try:
# reverse character is not at the beginning
assert value.find('<') <= 0
except AssertionError:
self.add_error(
param_name,
("Operator '<' can only be used "
"at the beginning")
)
else:
assert isinstance(value, bool) or len(value) == 0
@staticmethod
def set_form(defaults):
"""
Setup all of the form fields and widgets with the the 2016 default data
"""
widgets = {}
labels = {}
update_fields = {}
boolean_fields = []
for param in list(defaults.values()):
for field in param.col_fields:
attrs = {
'class': 'form-control',
'placeholder': field.default_value,
}
if param.coming_soon:
attrs['disabled'] = True
if param.tc_id in boolean_fields:
checkbox = forms.CheckboxInput(
attrs=attrs, check_test=bool_like)
widgets[field.id] = checkbox
update_fields[field.id] = forms.BooleanField(
label=field.label,
widget=widgets[field.id],
required=False,
disabled=param.gray_out
)
else:
widgets[field.id] = forms.TextInput(attrs=attrs)
update_fields[field.id] = forms.fields.CharField(
label=field.label,
widget=widgets[field.id],
required=False,
disabled=param.gray_out
)
labels[field.id] = field.label
if getattr(param, "inflatable", False):
field = param.cpi_field
attrs = {
'class': 'form-control sr-only',
'placeholder': bool(field.default_value),
}
widgets[field.id] = forms.NullBooleanSelect(attrs=attrs)
update_fields[field.id] = forms.NullBooleanField(
label=field.label,
widget=widgets[field.id],
required=False,
disabled=param.gray_out
)
return widgets, labels, update_fields
TAXCALC_DEFAULTS = {
(int(START_YEAR), True): defaults_all(int(START_YEAR),
use_puf_not_cps=True)
}
class TaxBrainForm(PolicyBrainForm, ModelForm):
def __init__(self, first_year, use_puf_not_cps, *args, **kwargs):
# the start year and the data source form object for later access.
# This should be refactored into `process_model`
if first_year is None:
first_year = START_YEAR
self._first_year = int(first_year)
# reset form data; form data from the `Meta` class is not updated each
# time a new `TaxBrainForm` instance is created
self.set_form_data(self._first_year, use_puf_not_cps)
# move parameter fields into `raw_fields` JSON object
args = self.add_fields(args)
# Override `initial` with `instance`. The only relevant field
# in `instance` is `raw_input_fields` which contains all of the user
# input data from the stored run. By overriding the `initial` kw
# argument we are making all of the user input from the previous run
# as stored in the `raw_input_fields` field of `instance` available
# to the fields attribute in django forms. This front-end data is
# derived from this fields attribute.
# Take a look at the source code for more info:
# https://github.com/django/django/blob/1.9/django/forms/models.py#L284-L285
if "instance" in kwargs:
kwargs["initial"] = kwargs["instance"].raw_gui_field_inputs
# Update CPI flags if either
# 1. initial is specified in `kwargs` (reform has warning/error msgs)
# 2. if `instance` is specified and `initial` is added above
# (edit parameters page)
if "initial" in kwargs:
for k, v in kwargs["initial"].items():
if k.endswith("cpi") and v:
# raw data is stored as choices 1, 2, 3 with the following
# mapping:
# '1': unknown
# '2': True
# '3': False
# value_from_datadict unpacks this data:
# https://github.com/django/django/blob/1.9/django/forms/widgets.py#L582-L589
if v == '1':
continue
django_val = self.widgets[k].value_from_datadict(
kwargs["initial"],
None,
k
)
self.widgets[k].attrs["placeholder"] = django_val
if not hasattr(self, 'cleaned_data'):
self.cleaned_data = {'raw_gui_field_inputs': kwargs['initial']}
super(TaxBrainForm, self).__init__(*args, **kwargs)
# update fields in a similar way as
# https://www.pydanny.com/overloading-form-fields.html
self.fields.update(self.update_fields.copy())
def clean(self):
"""
" This method should be used to provide custom model validation, and to
modify attributes on your model if desired. For instance, you could use
it to automatically provide a value for a field, or to do validation
that requires access to more than a single field."
per https://docs.djangoproject.com/en/1.8/ref/models/instances/
Note that this can be defined both on forms and on the model, but is
only automatically called on form submissions.
"""
self.do_taxcalc_validations()
self.add_errors_on_extra_inputs()
def add_error(self, field, error):
"""
Safely adds errors. There was an issue where the `cleaned_data`
attribute wasn't created after `is_valid` was called. This ensures
that the `cleaned_data` attribute is there.
"""
if getattr(
self,
"cleaned_data",
None) is None or self.cleaned_data is None:
self.cleaned_data = {}
ModelForm.add_error(self, field, error)
def set_form_data(self, start_year, use_puf_not_cps):
defaults_key = (start_year, use_puf_not_cps)
if defaults_key not in TAXCALC_DEFAULTS:
TAXCALC_DEFAULTS[defaults_key] = defaults_all(
start_year, use_puf_not_cps)
defaults = TAXCALC_DEFAULTS[defaults_key]
(self.widgets, self.labels,
self.update_fields) = PolicyBrainForm.set_form(defaults)
class Meta:
model = TaxSaveInputs
# we are only updating the "first_year", "raw_fields", and "fields"
# fields
fields = ['first_year', 'data_source', 'raw_gui_field_inputs',
'gui_field_inputs']
start_year = int(START_YEAR)
default_policy = taxcalc.Policy.default_data(
start_year=int(START_YEAR),
metadata=True
)
default_behv = taxcalc.Behavior.default_data(
start_year=int(START_YEAR),
metadata=True
)
default_params = dict(default_policy, **default_behv)
defaults_key = (start_year, True)
if defaults_key not in TAXCALC_DEFAULTS:
TAXCALC_DEFAULTS[defaults_key] = defaults_all(
start_year,
use_puf_not_cps=True
)
(widgets, labels,
update_fields) = PolicyBrainForm.set_form(
TAXCALC_DEFAULTS[defaults_key]
)
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 4 11:52:11 2017
@author: lracuna
"""
from vision.camera import *
from vision.plane import Plane
import autograd.numpy as np
from autograd import grad
from vision.error_functions import geometric_distance_points, get_matrix_conditioning_number, volker_metric,calculate_A_matrix
class Gradient:
def __init__(self):
self.dx1 = None
self.dy1 = None
self.dx2 = None
self.dy2 = None
self.dx3 = None
self.dy3 = None
self.dx4 = None
self.dy4 = None
self.dx5 = None
self.dy5 = None
self.dx6 = None
self.dy6 = None
self.dx1_eval = 0
self.dy1_eval = 0
self.dx2_eval = 0
self.dy2_eval = 0
self.dx3_eval = 0
self.dy3_eval = 0
self.dx4_eval = 0
self.dy4_eval = 0
self.dx5_eval = 0
self.dy5_eval = 0
self.dx6_eval = 0
self.dy6_eval = 0
self.dx1_eval_old = 0
self.dy1_eval_old = 0
self.dx2_eval_old = 0
self.dy2_eval_old = 0
self.dx3_eval_old = 0
self.dy3_eval_old = 0
self.dx4_eval_old = 0
self.dy4_eval_old = 0
self.dx5_eval_old = 0
self.dy5_eval_old = 0
self.dx6_eval_old = 0
self.dy6_eval_old = 0
self.n = 0.0001 #step in gradient descent
self.n_pos = 0.02*self.n # for SuperSAB
self.n_neg = 0.03*self.n # for SuperSAB
self.n_x1 = self.n
self.n_x2 = self.n
self.n_x3 = self.n
self.n_x4 = self.n
self.n_x5 = self.n
self.n_x6 = self.n
self.n_y1 = self.n
self.n_y2 = self.n
self.n_y3 = self.n
self.n_y4 = self.n
self.n_y5 = self.n
self.n_y6 = self.n
def set_n(self,n):
self.n = n
self.n_pos = 2*n # for SuperSAB
self.n_neg = 0.1*n # for SuperSAB
self.n_x1 = n
self.n_x2 = n
self.n_x3 = n
self.n_x4 = n
self.n_x5 = n
self.n_x6 = n
self.n_y1 = n
self.n_y2 = n
self.n_y3 = n
self.n_y4 = n
self.n_y5 = n
self.n_y6 = n
def calculate_A_matrix_autograd(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6,P,normalize=False):
""" Calculate the A matrix for the DLT algorithm: A.H = 0
all coordinates are in object plane
"""
X1 = np.array([[x1],[y1],[0.],[1.]]).reshape(4,1)
X2 = np.array([[x2],[y2],[0.],[1.]]).reshape(4,1)
X3 = np.array([[x3],[y3],[0.],[1.]]).reshape(4,1)
X4 = np.array([[x4],[y4],[0.],[1.]]).reshape(4,1)
X5 = np.array([[x5],[y5],[0.],[1.]]).reshape(4,1)
X6 = np.array([[x6],[y6],[0.],[1.]]).reshape(4,1)
U1 = np.array(np.dot(P,X1)).reshape(3,1)
U2 = np.array(np.dot(P,X2)).reshape(3,1)
U3 = np.array(np.dot(P,X3)).reshape(3,1)
U4 = np.array(np.dot(P,X4)).reshape(3,1)
U5 = np.array(np.dot(P,X5)).reshape(3,1)
U6 = np.array(np.dot(P,X6)).reshape(3,1)
object_pts = np.hstack([X1,X2,X3,X4,X5,X6])
image_pts = np.hstack([U1,U2,U3,U4,U5,U6])
if normalize:
object_pts_norm,T1 = normalise_points(object_pts)
image_pts_norm,T2 = normalise_points(image_pts)
else:
object_pts_norm = object_pts[[0,1,3],:]
image_pts_norm = image_pts
x1 = object_pts_norm[0,0]/object_pts_norm[2,0]
y1 = object_pts_norm[1,0]/object_pts_norm[2,0]
x2 = object_pts_norm[0,1]/object_pts_norm[2,1]
y2 = object_pts_norm[1,1]/object_pts_norm[2,1]
x3 = object_pts_norm[0,2]/object_pts_norm[2,2]
y3 = object_pts_norm[1,2]/object_pts_norm[2,2]
x4 = object_pts_norm[0,3]/object_pts_norm[2,3]
y4 = object_pts_norm[1,3]/object_pts_norm[2,3]
x5 = object_pts_norm[0,4]/object_pts_norm[2,4]
y5 = object_pts_norm[1,4]/object_pts_norm[2,4]
x6 = object_pts_norm[0,5]/object_pts_norm[2,5]
y6 = object_pts_norm[1,5]/object_pts_norm[2,5]
u1 = image_pts_norm[0,0]/image_pts_norm[2,0]
v1 = image_pts_norm[1,0]/image_pts_norm[2,0]
u2 = image_pts_norm[0,1]/image_pts_norm[2,1]
v2 = image_pts_norm[1,1]/image_pts_norm[2,1]
u3 = image_pts_norm[0,2]/image_pts_norm[2,2]
v3 = image_pts_norm[1,2]/image_pts_norm[2,2]
u4 = image_pts_norm[0,3]/image_pts_norm[2,3]
v4 = image_pts_norm[1,3]/image_pts_norm[2,3]
u5 = image_pts_norm[0,4]/image_pts_norm[2,4]
v5 = image_pts_norm[1,4]/image_pts_norm[2,4]
u6 = image_pts_norm[0,5]/image_pts_norm[2,5]
v6 = image_pts_norm[1,5]/image_pts_norm[2,5]
A = np.array([ [ 0, 0, 0, -x1, -y1, -1, v1*x1, v1*y1, v1],
[x1, y1, 1, 0, 0, 0, -u1*x1, -u1*y1, -u1],
[ 0, 0, 0, -x2, -y2, -1, v2*x2, v2*y2, v2],
[x2, y2, 1, 0, 0, 0, -u2*x2, -u2*y2, -u2],
[ 0, 0, 0, -x3, -y3, -1, v3*x3, v3*y3, v3],
[x3, y3, 1, 0, 0, 0, -u3*x3, -u3*y3, -u3],
[0, 0, 0, -x4, -y4, -1, v4*x4, v4*y4, v4],
[x4, y4, 1, 0, 0, 0, -u4*x4, -u4*y4, -u4],
[0, 0, 0, -x5, -y5, -1, v5*x5, v5*y5, v5],
[x5, y5, 1, 0, 0, 0, -u5*x5, -u5*y5, -u5],
[0, 0, 0, -x6, -y6, -1, v6*x6, v6*y6, v6],
[x6, y6, 1, 0, 0, 0, -u6*x6, -u6*y6, -u6],
])
return A
# THIS FUNCTION DOESNT WORK WITH NORMALIZATION YET
def volker_metric_autograd(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6,P):
A = calculate_A_matrix_autograd(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6,P)
# nomarlize each row
#A = A/np.linalg.norm(A,axis=1, ord = 1, keepdims=True)
row_sums = list()
for i in range(A.shape[0]):
squared_sum = 0
for j in range(A.shape[1]):
squared_sum += np.sqrt(A[i,j]**2)
#A[i,:] = A[i,:] / squared_sum
row_sums.append(squared_sum)
row_sums = np.array(row_sums).reshape(1,12)
A = A/(row_sums.T)
# compute the dot product
As = np.dot(A,A.T)
# we are interested only on the upper top triangular matrix coefficients
metric = 0
start = 1
for i in range(As.shape[0]):
for j in range(start,As.shape[0]):
metric = metric + As[i,j]**2
start = start +1
#An alternative would be to use only the coefficients which correspond
# to different points.
#metric = np.sqrt(np.sum(As[[0,2,4,6],[1,3,5,7]]**2))
#X vs X
#metric = np.sum(As[[0,0,0,2,2,4],[2,4,6,4,6,6]]**2)
#Y vs Y
#metric = metric + np.sum(As[[1,1,1,3,3,5],[3,5,7,5,7,7]]**2)
return metric
# DONT USE PNORM
def matrix_pnorm_condition_number_autograd(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6,P):
A = calculate_A_matrix_autograd(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6,P)
A = np.conjugate(A)
U, s, Vt = np.linalg.svd(A,0)
m = U.shape[0]
n = Vt.shape[1]
rcond=1e-10
cutoff = rcond*np.max(s)
# for i in range(min(n, m)):
# if s[i] > cutoff:
# s[i] = 1./s[i]
# else:
# s[i] = 0.
new_s = list()
for i in range(min(n, m)):
if s[i] > cutoff:
new_s.append(1./s[i])
else:
new_s.append(0.)
new_s = np.array(new_s)
pinv = np.dot(Vt.T, np.multiply(s[:, np.newaxis], U.T))
#https://de.mathworks.com/help/symbolic/cond.html?requestedDomain=www.mathworks.com
return np.linalg.norm(A)*np.linalg.norm(pinv)
def matrix_condition_number_autograd(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6,P,normalize = False):
A = calculate_A_matrix_autograd(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6,P,normalize)
U, s, V = np.linalg.svd(A,full_matrices=False)
greatest_singular_value = s[0]
# rcond=1e-5
# if s[-1] > rcond:
# smalles_singular_value = s[-1]
# else:
# smalles_singular_value = s[-2]
smallest_singular_value = s[-2]
return greatest_singular_value/smallest_singular_value
def hom_3d_to_2d(pts):
pts = pts[[0,1,3],:]
return pts
def hom_2d_to_3d(pts):
pts = np.insert(pts,2,np.zeros(pts.shape[1]),0)
return pts
def normalise_points(pts):
"""
Function translates and normalises a set of 2D or 3d homogeneous points
so that their centroid is at the origin and their mean distance from
the origin is sqrt(2). This process typically improves the
conditioning of any equations used to solve homographies, fundamental
matrices etc.
Inputs:
pts: 3xN array of 2D homogeneous coordinates
Returns:
newpts: 3xN array of transformed 2D homogeneous coordinates. The
scaling parameter is normalised to 1 unless the point is at
infinity.
T: The 3x3 transformation matrix, newpts = T*pts
"""
if pts.shape[0] == 4:
pts = hom_3d_to_2d(pts)
if pts.shape[0] != 3 and pts.shape[0] != 4 :
print "Shape error"
finiteind = np.nonzero(abs(pts[2,:]) > np.spacing(1))
if len(finiteind[0]) != pts.shape[1]:
print('Some points are at infinity')
dist = []
pts = pts/pts[2,:]
for i in finiteind:
# pts[0,i] = pts[0,i]/pts[2,i]
# pts[1,i] = pts[1,i]/pts[2,i]
# pts[2,i] = 1;
c = np.mean(pts[0:2,i].T, axis=0).T
newp1 = pts[0,i]-c[0]
newp2 = pts[1,i]-c[1]
dist.append(np.sqrt(newp1**2 + newp2**2))
dist = np.array(dist)
meandist = np.mean(dist)
scale = np.sqrt(2)/meandist
T = np.array([[scale, 0, -scale*c[0]], [0, scale, -scale*c[1]], [0, 0, 1]])
newpts = np.dot(T,pts)
return newpts, T
def create_gradient(metric='condition_number', n = 0.000001):
""""
metric: 'condition_number' (default)
'volker_metric
"""
if metric == 'condition_number':
metric_function = matrix_condition_number_autograd
elif metric == 'pnorm_condition_number':
metric_function = matrix_pnorm_condition_number_autograd
elif metric == 'volker_metric':
metric_function = volker_metric_autograd
gradient = Gradient()
gradient.set_n(n)
gradient.dx1 = grad(metric_function,0)
gradient.dy1 = grad(metric_function,1)
gradient.dx2 = grad(metric_function,2)
gradient.dy2 = grad(metric_function,3)
gradient.dx3 = grad(metric_function,4)
gradient.dy3 = grad(metric_function,5)
gradient.dx4 = grad(metric_function,6)
gradient.dy4 = grad(metric_function,7)
gradient.dx5 = grad(metric_function,8)
gradient.dy5 = grad(metric_function,9)
gradient.dx6 = grad(metric_function,10)
gradient.dy6 = grad(metric_function,11)
return gradient
def extract_objectpoints_vars(objectPoints):
x1 = objectPoints[0,0]
y1 = objectPoints[1,0]
x2 = objectPoints[0,1]
y2 = objectPoints[1,1]
x3 = objectPoints[0,2]
y3 = objectPoints[1,2]
x4 = objectPoints[0,3]
y4 = objectPoints[1,3]
x5 = objectPoints[0,4]
y5 = objectPoints[1,4]
x6 = objectPoints[0,5]
y6 = objectPoints[1,5]
return [x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6]
def evaluate_gradient(gradient, objectPoints, P, normalize = False):
x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6 = extract_objectpoints_vars(objectPoints)
gradient.dx1_eval_old = gradient.dx1_eval
gradient.dy1_eval_old = gradient.dy1_eval
gradient.dx2_eval_old = gradient.dx2_eval
gradient.dy2_eval_old = gradient.dy2_eval
gradient.dx3_eval_old = gradient.dx3_eval
gradient.dy3_eval_old = gradient.dy3_eval
gradient.dx4_eval_old = gradient.dx4_eval
gradient.dy4_eval_old = gradient.dy4_eval
gradient.dx5_eval_old = gradient.dx5_eval
gradient.dy5_eval_old = gradient.dy5_eval
gradient.dx6_eval_old = gradient.dx6_eval
gradient.dy6_eval_old = gradient.dy6_eval
gradient.dx1_eval = gradient.dx1(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6, P, normalize)*gradient.n_x1
gradient.dy1_eval = gradient.dy1(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6, P, normalize)*gradient.n_y1
gradient.dx2_eval = gradient.dx2(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6, P, normalize)*gradient.n_x2
gradient.dy2_eval = gradient.dy2(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6, P, normalize)*gradient.n_y2
gradient.dx3_eval = gradient.dx3(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6, P, normalize)*gradient.n_x3
gradient.dy3_eval = gradient.dy3(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6, P, normalize)*gradient.n_y3
gradient.dx4_eval = gradient.dx4(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6, P, normalize)*gradient.n_x4
gradient.dy4_eval = gradient.dy4(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6, P, normalize)*gradient.n_y4
gradient.dx5_eval = gradient.dx5(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6, P, normalize)*gradient.n_x5
gradient.dy5_eval = gradient.dy5(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6, P, normalize)*gradient.n_y5
gradient.dx6_eval = gradient.dx6(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6, P, normalize)*gradient.n_x6
gradient.dy6_eval = gradient.dy6(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6, P, normalize)*gradient.n_y6
gradient.n_x1 = supersab(gradient.n_x1,gradient.dx1_eval,gradient.dx1_eval_old,gradient.n_pos,gradient.n_neg)
gradient.n_x2 = supersab(gradient.n_x2,gradient.dx2_eval,gradient.dx2_eval_old,gradient.n_pos,gradient.n_neg)
gradient.n_x3 = supersab(gradient.n_x3,gradient.dx3_eval,gradient.dx3_eval_old,gradient.n_pos,gradient.n_neg)
gradient.n_x4 = supersab(gradient.n_x4,gradient.dx4_eval,gradient.dx4_eval_old,gradient.n_pos,gradient.n_neg)
gradient.n_x5 = supersab(gradient.n_x5,gradient.dx5_eval,gradient.dx5_eval_old,gradient.n_pos,gradient.n_neg)
gradient.n_x6 = supersab(gradient.n_x6,gradient.dx6_eval,gradient.dx6_eval_old,gradient.n_pos,gradient.n_neg)
gradient.n_y1 = supersab(gradient.n_y1,gradient.dy1_eval,gradient.dy1_eval_old,gradient.n_pos,gradient.n_neg)
gradient.n_y2 = supersab(gradient.n_y2,gradient.dy2_eval,gradient.dy2_eval_old,gradient.n_pos,gradient.n_neg)
gradient.n_y3 = supersab(gradient.n_y3,gradient.dy3_eval,gradient.dy3_eval_old,gradient.n_pos,gradient.n_neg)
gradient.n_y4 = supersab(gradient.n_y4,gradient.dy4_eval,gradient.dy4_eval_old,gradient.n_pos,gradient.n_neg)
gradient.n_y5 = supersab(gradient.n_y5,gradient.dy5_eval,gradient.dy5_eval_old,gradient.n_pos,gradient.n_neg)
gradient.n_y6 = supersab(gradient.n_y6,gradient.dy6_eval,gradient.dy6_eval_old,gradient.n_pos,gradient.n_neg)
## Limit
limit = 0.05
gradient.dx1_eval = np.clip(gradient.dx1_eval, -limit, limit)
gradient.dy1_eval = np.clip(gradient.dy1_eval, -limit, limit)
gradient.dx2_eval = np.clip(gradient.dx2_eval, -limit, limit)
gradient.dy2_eval = np.clip(gradient.dy2_eval, -limit, limit)
gradient.dx3_eval = np.clip(gradient.dx3_eval, -limit, limit)
gradient.dy3_eval = np.clip(gradient.dy3_eval, -limit, limit)
gradient.dx4_eval = np.clip(gradient.dx4_eval, -limit, limit)
gradient.dy4_eval = np.clip(gradient.dy4_eval, -limit, limit)
gradient.dx5_eval = np.clip(gradient.dx5_eval, -limit, limit)
gradient.dy5_eval = np.clip(gradient.dy5_eval, -limit, limit)
gradient.dx6_eval = np.clip(gradient.dx6_eval, -limit, limit)
gradient.dy6_eval = np.clip(gradient.dy6_eval, -limit, limit)
return gradient
def supersab(n, gradient_eval_current, gradient_eval_old, n_pos,n_neg):
if np.sign(gradient_eval_current*gradient_eval_old) > 0:
n = n + n_pos
else:
n = n*n_neg
return n
def update_points(gradient, objectPoints, limitx=0.15,limity=0.15):
op = np.copy(objectPoints)
op[0,0] += - gradient.dx1_eval
op[1,0] += - gradient.dy1_eval
op[0,1] += - gradient.dx2_eval
op[1,1] += - gradient.dy2_eval
op[0,2] += - gradient.dx3_eval
op[1,2] += - gradient.dy3_eval
op[0,3] += - gradient.dx4_eval
op[1,3] += - gradient.dy4_eval
op[0,4] += - gradient.dx5_eval
op[1,4] += - gradient.dy5_eval
op[0,5] += - gradient.dx6_eval
op[1,5] += - gradient.dy6_eval
circle = True
radius = 0.15
if (circle):
for i in range(op.shape[1]):
distance = np.sqrt(op[0,i]**2+op[1,i]**2)
if distance > radius:
op[:3,i] = op[:3,i]*radius/distance
else:
op[0,:] = np.clip(op[0,:], -limitx, limitx)
op[1,:] = np.clip(op[1,:], -limity, limity)
return op
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of helper functions used by the CLI and its Plugins.
"""
import imp
import importlib
import ipaddress
import json
import os
import re
import textwrap
import urllib.parse
from kazoo.client import KazooClient
from cli.exceptions import CLIException
def import_modules(package_paths, module_type):
"""
Looks for python packages under `package_paths` and imports
them as modules. Returns a dictionary of the basename of the
`package_paths` to the imported modules.
"""
modules = {}
for package_path in package_paths:
# We put the imported module into the namespace of
# "mesos.<module_type>.<>" to keep it from cluttering up
# the import namespace elsewhere.
package_name = os.path.basename(package_path)
package_dir = os.path.dirname(package_path)
module_name = "cli." + module_type + "." + package_name
try:
module = importlib.import_module(module_name)
except Exception:
obj, filename, data = imp.find_module(package_name, [package_dir])
module = imp.load_module(module_name, obj, filename, data)
modules[package_name] = module
return modules
def get_module(modules, import_path):
"""
Given a modules dictionary returned by `import_modules()`,
return a reference to the module at `import_path` relative
to the base module. For example, get_module(modules, "example.stuff")
will return a reference to the "stuff" module inside the
imported "example" plugin.
"""
import_path = import_path.split('.')
try:
module = modules[import_path[0]]
if len(import_path) > 1:
module = getattr(module, ".".join(import_path[1:]))
except Exception as exception:
raise CLIException("Unable to get module: {error}"
.format(error=str(exception)))
return module
def completions(comp_words, current_word, argv):
"""
Helps autocomplete by returning the appropriate
completion words under three conditions.
1) Returns `comp_words` if the completion word is
potentially in that list.
2) Returns an empty list if there is no possible
completion.
3) Returns `None` if the autocomplete is already done.
"""
comp_words += ["-h", "--help", "--version"]
if not argv:
return comp_words
if len(argv) == 1:
if argv[0] not in comp_words and current_word:
return comp_words
if argv[0] in comp_words and current_word:
return comp_words
if argv[0] not in comp_words and not current_word:
return []
if argv[0] in comp_words and not current_word:
return None
if len(argv) > 1 and argv[0] not in comp_words:
return []
if len(argv) > 1 and argv[0] in comp_words:
return None
raise CLIException("Unreachable")
def format_commands_help(cmds):
"""
Helps format plugin commands for display.
"""
longest_cmd_name = max(list(cmds.keys()), key=len)
help_string = ""
for cmd in sorted(cmds.keys()):
# For the top-level entry point, `cmds` is a single-level
# dictionary with `short_help` as the values. For plugins,
# `cmds` is a two-level dictionary, where `short_help` is a
# field in each sub-dictionary.
short_help = cmds[cmd]
if isinstance(short_help, dict):
short_help = short_help["short_help"]
num_spaces = len(longest_cmd_name) - len(cmd) + 2
help_string += " %s%s%s\n" % (cmd, " " * num_spaces, short_help)
return help_string
def format_subcommands_help(cmd):
"""
Helps format plugin subcommands for display.
"""
arguments = " ".join(cmd["arguments"])
short_help = cmd["short_help"]
long_help = textwrap.dedent(cmd["long_help"].rstrip())
long_help = " " + "\n ".join(long_help.lstrip().split('\n'))
flags = cmd["flags"]
flags["-h --help"] = "Show this screen."
flag_string = ""
if list(flags.keys()) != 0:
longest_flag_name = max(list(flags.keys()), key=len)
for flag in sorted(flags.keys()):
num_spaces = len(longest_flag_name) - len(flag) + 2
flag_string += " %s%s%s\n" % (flag, " " * num_spaces, flags[flag])
flag_string = flag_string.rstrip()
return (arguments, short_help, long_help, flag_string)
def join_plugin_paths(settings, config):
"""
Return all the plugin paths combined
from both settings and the config file.
"""
builtin_paths = settings.PLUGINS
try:
config_paths = config.plugins()
except Exception as exception:
raise CLIException("Error: {error}.".format(error=str(exception)))
return builtin_paths + config_paths
def sanitize_address(address):
"""
Sanitize an address, ensuring that it has a format recognizable by the CLI.
"""
# Try and parse the address to make sure it is parseable.
try:
parsed = urllib.parse.urlparse(address)
except Exception as exception:
raise CLIException("Unable to parse address: {error}"
.format(error=str(exception)))
# Since we allow addresses to be specified without an
# explicit scheme, some fields in the parsed address may
# be missing. Patch it up to force an implicit HTTP scheme.
if parsed.scheme == "" and parsed.netloc == "":
address = "http://{addr}".format(addr=address)
elif parsed.scheme == "" and parsed.netloc != "":
address = "http:{addr}".format(addr=address)
# Try and parse the address again to make sure it
# now has all the parts we expect and that they are valid.
try:
parsed = urllib.parse.urlparse(address)
except Exception as exception:
raise CLIException("Unable to parse address: {error}"
.format(error=str(exception)))
# We only support HTTP and HTTPS schemes.
if parsed.scheme != "http" and parsed.scheme != "https":
raise CLIException("Invalid scheme '{scheme}' in address"
.format(scheme=parsed.scheme))
# There must be a hostname present.
if parsed.hostname == "":
raise CLIException("Missing hostname in address")
# We do not support IPv6 in the hostname (yet).
try:
ipaddress.IPv6Address(parsed.hostname)
raise CLIException("IPv6 addresses are unsupported")
except Exception as exception:
pass
valid_ip_v4_address = False
# We either accept IPv4 addresses, or DNS names as the hostname. In the
# check below we try and parse the hostname as an IPv4 address, if this
# does not succeed, then we assume the hostname is formatted as a DNS name.
try:
ipaddress.IPv4Address(parsed.hostname)
valid_ip_v4_address = True
except Exception as exception:
pass
# If we have an IPv4 address then we require a port to be specified.
if valid_ip_v4_address and parsed.port is None:
raise CLIException("Addresses formatted as IP must contain a port")
# We allow ports for both IPv4 addresses and DNS
# names, but they must be in a specific range.
if parsed.port and (parsed.port < 0 or parsed.port > 65535):
raise CLIException("Port '{port}' is out of range"
.format(port=parsed.port))
return address
def zookeeper_resolve_leader(addresses, path):
"""
Resolve the leader using a znode path. ZooKeeper imposes a total
order on the elements of the queue, guaranteeing that the
oldest element of the queue is the first one. We can
thus return the first address we get from ZooKeeper.
"""
hosts = ",".join(addresses)
try:
zk = KazooClient(hosts=hosts)
zk.start()
except Exception as exception:
raise CLIException("Unable to initialize Zookeeper Client: {error}"
.format(error=exception))
try:
children = zk.get_children(path)
except Exception as exception:
raise CLIException("Unable to get children of {zk_path}: {error}"
.format(zk_path=path, error=exception))
masters = sorted(
# 'json.info' is the prefix for master nodes.
child for child in children if child.startswith("json.info")
)
address = ""
for master in masters:
try:
node_path = "{path}/{node}".format(path=path, node=master)
json_data, _ = zk.get(node_path)
except Exception as exception:
raise CLIException("Unable to get the value of '{node}': {error}"
.format(node=node_path, error=exception))
try:
data = json.loads(json_data)
except Exception as exception:
raise CLIException("Could not load JSON from '{data}': {error}"
.format(data=data, error=str(exception)))
if ("address" in data and "ip" in data["address"] and
"port" in data["address"]):
address = "{ip}:{port}".format(ip=data["address"]["ip"],
port=data["address"]["port"])
break
try:
zk.stop()
except Exception as exception:
raise CLIException("Unable to stop Zookeeper Client: {error}"
.format(error=exception))
if not address:
raise CLIException("Unable to resolve the leading"
" master using ZooKeeper")
return address
class Table():
"""
Defines a custom table structure for printing to the terminal.
"""
def __init__(self, columns):
"""
Initialize a table with a list of column names
to act as headers for each column in the table.
"""
if not isinstance(columns, list):
raise CLIException("Column headers must be supplied as a list")
for column in columns:
if re.search(r"(\s)\1{2,}", column):
raise CLIException("Column headers cannot have more"
" than one space between words")
self.table = [columns]
self.padding = [len(column) for column in columns]
def __getitem__(self, index):
return list(self.table[index])
def dimensions(self):
"""
Returns the dimensions of the table as (<num-rows>, <num-columns>).
"""
return (len(self.table), len(self.table[0]))
def add_row(self, row):
"""
Add a row to the table. Input must be a list where each entry
corresponds to its respective column in order.
"""
if len(row) != len(self.table[0]):
raise CLIException("Number of entries and columns do not match!")
# Adjust padding for each column.
for index, elem in enumerate(row):
if len(elem) > self.padding[index]:
self.padding[index] = len(elem)
self.table.append(row)
def __str__(self):
"""
Convert a table to string for printing.
"""
table_string = ""
for r_index, row in enumerate(self.table):
for index, entry in enumerate(row):
table_string += "%s%s" % \
(entry, " " * (self.padding[index] - len(entry) + 2))
if r_index != len(self.table) - 1:
table_string += "\n"
return table_string
@staticmethod
def parse(string):
"""
Parse a string previously printed as a `Table` back into a `Table`.
"""
lines = string.split("\n")
# Find the location and contents of column headers in the string.
# Assume only single spaces between words in column headers.
matches = re.finditer(r"([\w\d]+\s?[\w\d]+)+", lines[0])
columns = [(m.start(), m.group()) for m in matches]
# Build a table from the column header contents.
table = Table([c[1] for c in columns])
# Fill in the subsequent rows.
for line in lines[1:]:
row = []
start_indices = [c[0] for c in columns]
for i, start_index in enumerate(start_indices):
if i + 1 < len(start_indices):
column = line[start_index:start_indices[i + 1]]
else:
column = line[start_index:]
row.append(str(column.strip()))
table.add_row(row)
return table
|
|
# project/user/views.py
#################
#### imports ####
#################
import datetime
from flask import render_template, Blueprint, url_for, redirect, flash, request
from flask.ext.login import login_user, logout_user, login_required, current_user
from sqlalchemy import desc
from project.token import generate_confirmation_token, confirm_token
from project.decorators import check_confirmed
from project.email import send_email
from project import db, bcrypt
from project.models import (
User,
Employment,
Education,
Publication,
Patent
)
from .forms import (
LoginForm,
RegisterForm,
ChangePasswordForm,
EmploymentForm,
EmploymentListForm,
EducationForm,
EducationFormListForm,
EditPersonalForm,
PublicationForm,
PublicationFormListForm,
PatentForm,
PatentFormListForm
)
################
#### config ####
################
user_blueprint = Blueprint('user', __name__,)
################
#### routes ####
################
@user_blueprint.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if form.validate_on_submit():
user = User(
email=form.email.data,
password=form.password.data,
confirmed=False
)
db.session.add(user)
db.session.commit()
token = generate_confirmation_token(user.email)
confirm_url = url_for('user.confirm_email', token=token, _external=True)
html = render_template('user/activate.html', confirm_url=confirm_url)
subject = "Please confirm your email"
send_email(user.email, subject, html)
login_user(user)
flash('You registered and are now logged in. Welcome!', 'success')
return redirect(url_for('user.unconfirmed'))
return render_template('user/register.html', form=form)
@user_blueprint.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(
user.password, request.form['password']):
login_user(user)
flash('Welcome.', 'success')
return redirect(url_for('user.profile', username=user.username))
else:
flash('Invalid email and/or password.', 'danger')
return render_template('user/login.html', form=form)
return render_template('user/login.html', form=form)
@user_blueprint.route('/logout')
@login_required
def logout():
logout_user()
flash('You were logged out.', 'success')
return redirect(url_for('user.login'))
@user_blueprint.route('/changepassword', methods=['GET', 'POST'])
@login_required
@check_confirmed
def changepassword():
user = User.query.filter_by(email=current_user.email).first()
if not user:
flash('Password successfully changed.', 'success')
return redirect(url_for('user.logout'))
form = ChangePasswordForm(request.form, prefix='pwd')
if form.validate_on_submit():
user = User.query.filter_by(email=current_user.email).first()
if user:
user.password = bcrypt.generate_password_hash(form.password.data)
db.session.commit()
flash('Password successfully changed.', 'success')
return redirect(url_for('user.profile', username=user.username))
else:
flash('Password change was unsuccessful.', 'danger')
return redirect(url_for('user.changepassword'))
return render_template('user/changepassword.html', form=form)
@user_blueprint.route('/confirm/<token>')
@login_required
def confirm_email(token):
try:
email = confirm_token(token)
except:
flash('The confirmation link is invalid or has expired.', 'danger')
user = User.query.filter_by(email=email).first_or_404()
if user.confirmed:
flash('Account already confirmed. Please login.', 'success')
else:
user.confirmed = True
user.confirmed_on = datetime.datetime.now()
db.session.add(user)
db.session.commit()
flash('You have confirmed your account. Thanks!', 'success')
return redirect(url_for('main.home'))
@user_blueprint.route('/unconfirmed')
@login_required
def unconfirmed():
if current_user.confirmed:
return redirect('main.home')
flash('Please confirm your account!', 'warning')
return render_template('user/unconfirmed.html')
@user_blueprint.route('/resend')
@login_required
def resend_confirmation():
token = generate_confirmation_token(current_user.email)
confirm_url = url_for('user.confirm_email', token=token, _external=True)
html = render_template('user/activate.html', confirm_url=confirm_url)
subject = "Please confirm your email"
send_email(current_user.email, subject, html)
flash('A new confirmation email has been sent.', 'success')
return redirect(url_for('user.unconfirmed'))
@user_blueprint.route('/user/<username>')
@login_required
def profile(username):
user = User.query.filter_by(username=username).first()
if user == None:
flash('User %s not found.' % username, 'danger')
return redirect(url_for('main.home'))
form = EditPersonalForm(request.form)
return render_template('user/profile.html', user=user, form=form)
@user_blueprint.route('/user_edit_firstname', methods=['POST'])
@login_required
def user_edit_firstname():
user = User.query.filter_by(email=current_user.email).first()
user.firstname = request.form['value']
db.session.commit()
return redirect(url_for('user.profile', username=user.username))
@user_blueprint.route('/user_edit_lasttname', methods=['POST'])
@login_required
def user_edit_lastname():
user = User.query.filter_by(email=current_user.email).first()
user.surname = request.form['value']
db.session.commit()
return redirect(url_for('user.profile', username=user.username))
@user_blueprint.route('/user_edit_dob', methods=['POST'])
@login_required
def user_edit_dob():
user = User.query.filter_by(email=current_user.email).first()
user.birthdate = request.form['value']
db.session.commit()
return redirect(url_for('user.profile', username=user.username))
@user_blueprint.route('/user_edit_gender', methods=['POST'])
@login_required
def user_edit_gender():
user = User.query.filter_by(email=current_user.email).first()
user.gender = request.form['value']
db.session.commit()
return redirect(url_for('user.profile', username=user.username))
@user_blueprint.route('/user/employment_add/<human_id>',
methods=['GET', 'POST'])
@login_required
def employment_add(human_id):
user = User.query.filter_by(id=human_id).first()
if user == None:
flash('User not found.', 'danger')
return redirect(url_for('main.home'))
form = EmploymentForm(request.form)
if form.validate_on_submit():
start_date = None
end_date = None
if request.form["start_date"] != '':
start_date = request.form["start_date"]
if request.form["end_date"] != '':
end_date = request.form["end_date"]
emp = Employment(
human_id=human_id,
employer=request.form["employer"],
position=request.form["position"],
start_date=start_date ,
end_date=end_date,
job_desc=request.form["job_desc"]
)
db.session.add(emp)
db.session.commit()
flash('New employer added.', 'success')
return redirect(url_for('user.employment_list', human_id=human_id))
if form.errors:
print(form.errors)
return render_template('user/employment_add.html', username=user.username,
email=user.email, form=form, human_id=human_id)
@user_blueprint.route('/employment_list/<human_id>', methods=['GET'])
@login_required
def employment_list(human_id):
user = User.query.filter_by(id=human_id).first()
if user == None:
flash('User not found', 'danger')
return redirect(url_for('main.home'))
emp = Employment.query.filter_by(human_id=user.id).order_by(
desc(Employment.start_date))
if emp == None:
flash('No employment details saved', 'danger')
data = {'employmentlist': emp}
form = EmploymentListForm(data=data)
return render_template('user/employment_list.html', form=form,
human_id=user.id)
@user_blueprint.route('/employment_edit/<emp_id>', methods=['GET'])
@login_required
def employment_edit(emp_id):
emp = Employment.query.filter_by(id=emp_id).first()
if emp == None:
flash('No Employment Details. Please add', 'danger')
return redirect(url_for('user.employment_add',
human_id=current_user.id))
return render_template('user/employment_edit.html', emp=emp)
@user_blueprint.route('/employment_edit_employer/<emp_id>', methods=['POST'])
@login_required
def employment_edit_employer(emp_id):
emp = Employment.query.filter_by(id=emp_id).first()
emp.employer = request.form['value']
db.session.commit()
return redirect(url_for('user.employment_list', human_id=emp.human_id))
@user_blueprint.route('/employment_edit_position/<emp_id>', methods=['POST'])
@login_required
def employment_edit_position(emp_id):
emp = Employment.query.filter_by(id=emp_id).first()
emp.position = request.form['value']
db.session.commit()
return redirect(url_for('user.employment_list', human_id=emp.human_id))
@user_blueprint.route('/employment_edit_description/<emp_id>',
methods=['POST'])
@login_required
def employment_edit_description(emp_id):
emp = Employment.query.filter_by(id=emp_id).first()
emp.job_desc = request.form['value']
db.session.commit()
return redirect(url_for('user.employment_list', human_id=emp.human_id))
@user_blueprint.route('/employment_edit_start_date/<emp_id>',
methods=['POST'])
@login_required
def employment_edit_start_date(emp_id):
emp = Employment.query.filter_by(id=emp_id).first()
emp.start_date = request.form['value']
db.session.commit()
return redirect(url_for('user.employment_list', human_id=emp.human_id))
@user_blueprint.route('/employment_edit_end_date/<emp_id>', methods=['POST'])
@login_required
def employment_edit_end_date(emp_id):
emp = Employment.query.filter_by(id=emp_id).first()
emp.end_date = request.form['value']
db.session.commit()
return redirect(url_for('user.employment_list', human_id=emp.human_id))
@user_blueprint.route('/user/education_add/<human_id>',
methods=['GET', 'POST'])
@login_required
def education_add(human_id):
user = User.query.filter_by(id=human_id).first()
if user == None:
flash('User not found.', 'danger')
return redirect(url_for('main.home'))
form = EducationForm(request.form)
if form.validate_on_submit():
start_date = None
end_date = None
if form.start_date.data != '':
start_date = form.start_date.data
if form.end_date.data != '':
end_date = form.end_date.data
ed = Education(
human_id=human_id,
educational_institution=form.educational_institution.data,
course_studied=form.course_studied.data,
start_date=start_date,
end_date=end_date,
accolades=form.accolades.data,
educational_institution_type=form.educational_institution_type_list.data
)
db.session.add(ed)
db.session.commit()
flash('New educational details added.', 'success')
return redirect(url_for('user.education_list', human_id=human_id))
return render_template('user/education_add.html', username=user.username,
email=user.email, form=form)
@user_blueprint.route('/education_list/<human_id>', methods=['GET'])
@login_required
def education_list(human_id):
user = User.query.filter_by(id=human_id).first()
if user == None:
flash('User not found', 'danger')
return redirect(url_for('main.home'))
ed = Education.query.filter_by(human_id=user.id).order_by(
desc(Education.start_date))
if ed == None:
flash('No educational details saved', 'danger')
ed_data = {'educationlist': ed}
ed_form = EducationFormListForm(data=ed_data)
return render_template('user/education_list.html', human_id=human_id,
ed_form=ed_form)
@user_blueprint.route('/education_edit/<id>', methods=['GET'])
@login_required
def education_edit(id):
ed = Education.query.filter_by(id=id).first()
if ed == None:
flash('No Education Details. Please add', 'danger')
return redirect(url_for('main.home'))
return render_template('user/education_edit.html', ed=ed)
@user_blueprint.route('/education_edit_educational_institution/<id>',
methods=['POST'])
@login_required
def education_edit_educational_institution(id):
ed = Education.query.filter_by(id=id).first()
ed.educational_institution = request.form['value']
db.session.commit()
return redirect(url_for('user.education_list', human_id=ed.human_id))
@user_blueprint.route('/education_edit_course_studied/<id>', methods=['POST'])
@login_required
def education_edit_course_studied(id):
ed = Education.query.filter_by(id=id).first()
ed.course_studied = request.form['value']
db.session.commit()
return redirect(url_for('user.education_list', human_id=ed.human_id))
@user_blueprint.route('/education_edit_start_date/<id>', methods=['POST'])
@login_required
def education_edit_start_date(id):
ed = Education.query.filter_by(id=id).first()
ed.start_date = request.form['value']
db.session.commit()
return redirect(url_for('user.education_list', human_id=ed.human_id))
@user_blueprint.route('/education_edit_end_date/<id>', methods=['POST'])
@login_required
def education_end_start_date(id):
ed = Education.query.filter_by(id=id).first()
ed.end_date = request.form['value']
db.session.commit()
return redirect(url_for('user.education_list', human_id=ed.human_id))
@user_blueprint.route('/user/publication_add/<human_id>',
methods=['GET', 'POST'])
@login_required
def publication_add(human_id):
user = User.query.filter_by(id=human_id).first()
if user == None:
flash('User not found.', 'danger')
return redirect(url_for('main.home'))
form = PublicationForm(request.form)
if form.validate_on_submit():
publication_date = None
if form.publication_date.data != '':
publication_date = form.publication_date.data
pub = Publication(
human_id=human_id,
title=form.title.data,
authors=form.authors.data,
publication_date=publication_date,
publisher=form.publisher.data,
publication_url=form.publication_url.data,
description=form.description.data,
publication_category=form.publication_category_list.data
)
db.session.add(pub)
db.session.commit()
flash('New publication details added.', 'success')
return redirect(url_for('user.publication_list', human_id=human_id))
return render_template('user/publication_add.html', human_id=human_id,
form=form)
@user_blueprint.route('/user/publication_list/<human_id>', methods=['GET'])
@login_required
def publication_list(human_id):
user = User.query.filter_by(id=human_id).first()
if user == None:
flash('User not found', 'danger')
return redirect(url_for('main.home'))
pub = Publication.query.filter_by(human_id=human_id).order_by(
desc(Publication.publication_date))
if pub == None:
flash('No publication details saved', 'danger')
pub_data = {'publicationlist': pub}
pub_form = PublicationFormListForm(data=pub_data)
return render_template('user/publication_list.html', human_id=human_id,
pub_form=pub_form)
@user_blueprint.route('/publication_edit/<id>', methods=['GET'])
@login_required
def publication_edit(id):
pub = Publication.query.filter_by(id=id).first()
if pub == None:
flash('No Publication Details. Please add', 'danger')
return redirect(url_for('main.home'))
form = PublicationFormListForm(pub=pub)
return render_template('user/publication_edit.html', pub=pub, form=form)
@user_blueprint.route('/publication_edit_title/<id>', methods=['POST'])
@login_required
def publication_edit_title(id):
pub = Publication.query.filter_by(id=id).first()
pub.title = request.form['value']
db.session.commit()
return redirect(url_for('user.publication_list', human_id=pub.human_id))
@user_blueprint.route('/publication_edit_authors/<id>', methods=['POST'])
@login_required
def publication_edit_authors(id):
pub = Publication.query.filter_by(id=id).first()
pub.authors = request.form['value']
db.session.commit()
return redirect(url_for('user.publication_list', human_id=pub.human_id))
@user_blueprint.route('/publication_edit_publisher/<id>', methods=['POST'])
@login_required
def publication_edit_publisher(id):
pub = Publication.query.filter_by(id=id).first()
pub.publisher = request.form['value']
db.session.commit()
return redirect(url_for('user.publication_list', human_id=pub.human_id))
@user_blueprint.route('/publication_edit_publication_date/<id>',
methods=['POST'])
@login_required
def publication_edit_publication_date(id):
pub = Publication.query.filter_by(id=id).first()
pub.publication_date = request.form['value']
db.session.commit()
return redirect(url_for('user.publication_list', human_id=pub.human_id))
@user_blueprint.route('/publication_edit_description/<id>', methods=['POST'])
@login_required
def publication_edit_description(id):
pub = Publication.query.filter_by(id=id).first()
pub.description = request.form['value']
db.session.commit()
return redirect(url_for('user.publication_list', human_id=pub.human_id))
@user_blueprint.route('/publication_edit_publication_url/<id>',
methods=['POST'])
@login_required
def publication_edit_publication_url(id):
pub = Publication.query.filter_by(id=id).first()
pub.publication_url = request.form['value']
db.session.commit()
return redirect(url_for('user.publication_list', human_id=pub.human_id))
@user_blueprint.route('/publication_edit_publication_category/<id>',
methods=['POST'])
@login_required
def publication_edit_publication_category(id):
pub = Publication.query.filter_by(id=id).first()
pub.publication_category = request.form['value']
db.session.commit()
return redirect(url_for('user.publication_list', human_id=pub.human_id))
@user_blueprint.route('/user/patent_add/<human_id>', methods=['GET', 'POST'])
@login_required
def patent_add(human_id):
user = User.query.filter_by(id=human_id).first()
if user == None:
flash('User not found.', 'danger')
return redirect(url_for('main.home'))
form = PatentForm(request.form)
if form.validate_on_submit():
issue_date = None
if form.issue_date.data != '':
issue_date = form.issue_date.data
pat = Patent(
human_id=human_id,
title=form.title.data,
description=form.description.data,
patent_number=form.patent_number.data,
inventors=form.inventors.data,
issue_date=issue_date,
patent_office=form.patent_office_list.data,
patent_status=form.patent_status_list.data,
patent_url=form.patent_url.data
)
db.session.add(pat)
db.session.commit()
flash('New patent details added.', 'success')
return redirect(url_for('user.patent_list', human_id=human_id))
return render_template('user/patent_add.html', human_id=human_id,
form=form)
@user_blueprint.route('/user/patent_list/<human_id>', methods=['GET'])
@login_required
def patent_list(human_id):
user = User.query.filter_by(id=human_id).first()
if user == None:
flash('User not found', 'danger')
return redirect(url_for('main.home'))
pat = Patent.query.filter_by(human_id=human_id).order_by(desc(Patent.issue_date))
if pat == None:
flash('No patent details saved')
pat_data = {'patentlist': pat}
pat_form = PatentFormListForm(data=pat_data)
return render_template('user/patent_list.html', human_id=human_id,
pat_form=pat_form)
@user_blueprint.route('/patent_edit/<id>', methods=['GET'])
@login_required
def patent_edit(id):
pat = Patent.query.filter_by(id=id).first()
if pat == None:
flash('No Patent Details. Please add', 'danger')
return redirect(url_for('main.home'))
return render_template('user/patent_edit.html', pat=pat)
@user_blueprint.route('/patent_edit_title/<id>', methods=['POST'])
@login_required
def patent_edit_title(id):
pat = Patent.query.filter_by(id=id).first()
pat.title = request.form['value']
db.session.commit()
return redirect(url_for('user.patent_list', human_id=pat.human_id))
@user_blueprint.route('/patent_edit_authors/<id>', methods=['POST'])
@login_required
def patent_edit_authors(id):
pat = Patent.query.filter_by(id=id).first()
pat.authors = request.form['value']
db.session.commit()
return redirect(url_for('user.patent_list', human_id=pat.human_id))
@user_blueprint.route('/patent_edit_publisher/<id>', methods=['POST'])
@login_required
def patent_edit_publisher(id):
pat = Patent.query.filter_by(id=id).first()
pat.publisher = request.form['value']
db.session.commit()
return redirect(url_for('user.patent_list', human_id=pat.human_id))
@user_blueprint.route('/patent_edit_patent_date/<id>', methods=['POST'])
@login_required
def patent_edit_patent_date(id):
pat = Patent.query.filter_by(id=id).first()
pat.patent_date = request.form['value']
db.session.commit()
return redirect(url_for('user.patent_list', human_id=pat.human_id))
@user_blueprint.route('/patent_edit_description/<id>', methods=['POST'])
@login_required
def patent_edit_description(id):
pat = Patent.query.filter_by(id=id).first()
pat.description = request.form['value']
db.session.commit()
return redirect(url_for('user.patent_list', human_id=pat.human_id))
@user_blueprint.route('/patent_edit_patent_url/<id>', methods=['POST'])
@login_required
def patent_edit_patent_url(id):
pat = Patent.query.filter_by(id=id).first()
pat.patent_url = request.form['value']
db.session.commit()
return redirect(url_for('user.patent_list', human_id=pat.human_id))
@user_blueprint.route('/patent_edit_patent_category/<id>', methods=['POST'])
@login_required
def patent_edit_patent_category(id):
pat = Patent.query.filter_by(id=id).first()
pat.patent_category = request.form['value']
db.session.commit()
return redirect(url_for('user.patent_list', human_id=pat.human_id))
@user_blueprint.route('/user/academic_record/<human_id>', methods=['GET'])
@login_required
def academic_record(human_id):
ed = Education.query.filter_by(human_id=human_id).order_by(desc(Education.start_date))
ed_data = {'educationlist': ed}
ed_form = EducationFormListForm(data=ed_data)
pat = Patent.query.filter_by(human_id=human_id).order_by(desc(Patent.issue_date))
pat_data = {'patentlist': pat}
pat_form = PatentFormListForm(data=pat_data)
pub = Publication.query.filter_by(human_id=human_id).order_by(desc(Publication.publication_date))
pub_data = {'publicationlist': pub}
pub_form = PublicationFormListForm(data=pub_data)
return render_template('user/academic_record.html',
human_id=human_id,
ed_form=ed_form,
pat_form=pat_form,
pub_form=pub_form)
|
|
# -*- coding: utf-8 -*-
# Copyright IBM Corp. 2015, 2017
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from gpclient import GPClient
from test import common
class TestGPClient(unittest.TestCase):
@classmethod
def setUpClass(self):
"""Setting up the globalization pipeline for testing"""
acc = common.get_admin_gpserviceaccount()
client = GPClient(acc)
try:
client.delete_bundle(common.bundleId1)
client.delete_bundle(common.bundleId2)
data = {}
data['sourceLanguage'] = "en"
#data['targetLanguages'] = ["fr","es-mx"]
data['targetLanguages'] =[]
data['notes']=["string"]
data['metadata']={}
data['partner']=''
data['segmentSeparatorPattern']='string'
data['noTranslationPattern']='string'
client.create_bundle(common.bundleId1, data=data)
bundle_entries = {}
bundle_entries['greet'] = "Hello"
bundle_entries['weather'] = "It is snowing"
client.upload_resource_entries(common.bundleId1, "en", data=bundle_entries)
bundle1_entries = {}
bundle1_entries['greet'] = "Salut"
bundle1_entries['weather'] = "Il neige"
client.upload_resource_entries(common.bundleId1, "fr", data=bundle1_entries)
bundle3_entries = {}
bundle3_entries['greet'] = "Salut"
bundle3_entries['weather'] = "Il neige"
client.upload_resource_entries(common.bundleId1, "es-mx", data=bundle3_entries)
client.create_bundle(common.bundleId2, data=data)
bundle0_entries = {}
bundle0_entries['exit'] = "Goodbye"
bundle0_entries['show'] = "The Wire"
client.upload_resource_entries(common.bundleId2, "en", data=bundle0_entries)
bundle2_entries = {}
bundle2_entries['exit']= u'Au revoir'
bundle2_entries['show']= u'Le Fil'
client.upload_resource_entries(common.bundleId2, "fr", data=bundle2_entries)
except:
pass
@classmethod
def tearDownClass(self):
pass
def setUp(self):
pass
def tearDown(self):
pass
#@unittest.skip("skipping")
def test_admin_basic_auth(self):
"""Verify basic auth fails with admin account"""
acc = common.get_admin_gpserviceaccount()
client = GPClient(acc, auth=GPClient.BASIC_AUTH)
ids = client.get_bundles()
self.assertEqual(0, len(ids), "Admin account can not use basic authentication")
#@unittest.skip("skipping")
def test_basic_auth_translation(self):
"""Test if translation works with basic auth"""
acc = common.get_gpserviceaccount()
client = GPClient(acc, auth=GPClient.BASIC_AUTH)
languages=['fr']
t = client.gp_translation(bundleId=common.bundleId2,
languages=languages)
_ = t.gettext
value = _('show')
common.my_assert_equal(self, u'Le Fil', value,
'incorrect translated value')
#@unittest.skip("skipping")
def test_create_bundle(self):
"""Test to create a new bundle"""
acc = common.get_admin_gpserviceaccount()
client = GPClient(acc)
tresp = client.create_bundle("test-bundle")
common.my_assert_equal(self, "SUCCESS", tresp["status"],
'bundle could not be created')
#@unittest.skip("skipping")
def test_delete_bundle_fail(self):
"""Test to delete a specific bundle which doesn't exist"""
acc = common.get_admin_gpserviceaccount()
client = GPClient(acc)
tresp = client.delete_bundle("test-bundle-notexists")
common.my_assert_equal(self, "SUCCESS", tresp["status"],
'a bundle which does not exist can not be deleted')
#@unittest.skip("skipping")
def test_delete_bundle_success(self):
"""Test to delete a specific bundle which exists"""
acc = common.get_admin_gpserviceaccount()
client = GPClient(acc)
tresp = client.delete_bundle("test-bundle")
common.my_assert_equal(self, "SUCCESS", tresp["status"],
'bundle could not be deleted')
#@unittest.skip("skipping")
def test_english_values(self):
"""Verify English values are returned when asked for"""
acc = common.get_gpserviceaccount()
client = GPClient(acc)
languages=['en']
t = client.gp_translation(bundleId=common.bundleId1,
languages=languages)
_ = t.gettext
value = _('greet')
common.my_assert_equal(self, 'Hello', value,
'incorrect value')
#@unittest.skip("skipping")
def test_example_1(self):
"""Test example 1 used in the docs"""
#common.set_vcap_env_vars()
acc = common.get_gpserviceaccount()
client = GPClient(acc)
languages=['fr'] # languages=[locale.getdefaultlocale()[0]]
t = client.gp_translation(bundleId=common.bundleId1,
languages=languages)
_ = t.gettext
value = _('greet') # 'greet' key will be localized/translated to French
common.my_assert_equal(self, 'Salut', value,
'incorrect translated value')
#@unittest.skip("skipping")
def test_example_2(self):
"""Test example 2 used in the docs"""
acc = common.get_gpserviceaccount()
client = GPClient(acc)
languages=['fr'] # languages=[locale.getdefaultlocale()[0]]
t = client.gp_translation(bundleId=common.bundleId2,
languages=languages)
_ = t.gettext
value = _('exit') # 'exit' key will be localized/translated to French
common.my_assert_equal(self, u'Au revoir', value,
'incorrect translated value')
#@unittest.skip("skipping")
def test_get_gaas_hmac_headers(self):
"""Test if the GaaS HMAC header generation is correct """
method = 'POST'
url = 'https://example.com/gaas'
date = 'Mon, 30 Jun 2014 00:00:00 GMT'
body = '{"param":"value"}'
userId = 'MyUser'
secret = 'MySecret'
expectedHeaders = {'GP-Date': 'Mon, 30 Jun 2014 00:00:00 GMT',
'Authorization': 'GP-HMAC MyUser:ONBJapYEveDZfsPFdqZHQ64GDgc='}
acc = common.get_gpserviceaccount()
client = GPClient(acc)
headers = client._GPClient__get_gaas_hmac_headers( method=method,
url=url, date=date, body=body, secret=secret, userId=userId)
common.my_assert_equal(self, expectedHeaders, headers,
'incorrect GaaS HMAC headers')
#@unittest.skip("skipping")
def test_get_language_match(self):
"""Test the matching of langauge codes to supported langauges"""
# supported languages in GP
supportedLangs = ['en','de','es','fr','it', 'ja','ko', 'pt-BR',
'zh-Hans', 'zh-Hant']
acc = common.get_gpserviceaccount()
client = GPClient(acc)
get_language_match = client._GPClient__get_language_match
expectedMatches = {
'en': 'en', 'en_US': 'en', 'en-US': 'en',
'de': 'de', 'de_at': 'de', 'de-at': 'de',
'es': 'es', 'es_mx': 'es', 'es-mx': 'es',
'fr': 'fr', 'fr_FR': 'fr', 'fr-Fr': 'fr', 'fr_CA': 'fr',
'it': 'it', 'it_ch': 'it', 'it-ch': 'it', 'it-IT': 'it',
'ja': 'ja', 'ja_JA': 'ja', 'ja-JA': 'ja',
'ko': 'ko', 'ko_KO': 'ko', 'ko-KO': 'ko',
'pt-BR': 'pt-BR', 'pt': None,
'zh': 'zh-Hans', 'zh-tw': 'zh-Hant', 'zh-cn': 'zh-Hans',
'zh-hk': 'zh-Hant', 'zh-sg': 'zh-Hans',
}
for langCode in expectedMatches:
match = get_language_match(langCode, supportedLangs)
expectedMatch = expectedMatches[langCode]
common.my_assert_equal(self, expectedMatch, match,
'incorrect langauge match (Input= %s)' % (langCode,))
#@unittest.skip("skipping")
def test_gp_fallback(self):
"""Test the fallback feature, i.e. when a translated value is not found
for a language, the fallback language should be used - if there is no
fallback language then the source value should be returned.
If the key is not found, the key should be returned.
"""
acc = common.get_gpserviceaccount()
client = GPClient(acc)
# should fallbcak to 'fr', 'ur' is not supported
languages=['ur', 'fr']
t = client.gp_translation(bundleId=common.bundleId2,
languages=languages)
_ = t.gettext
value = _('exit')
common.my_assert_equal(self, u'Au revoir', value,
'incorrect translated value - should have used fr fallback')
# should return key back, key doesn't exist
languages=['es-mx']
t = client.gp_translation(bundleId=common.bundleId2,
languages=languages)
_ = t.gettext
key = 'badKey'
value = _(key)
common.my_assert_equal(self, key, value,
'incorrect translated value - key doesn\'t exist')
#@unittest.skip("skipping")
def test_local_fallback(self):
"""Verify local translations are used with expected"""
acc = common.get_gpserviceaccount()
client = GPClient(acc)
languages=['fo', 'fr']
t = client.translation(bundleId=common.bundleId1,
languages=languages, priority='local', domain='messages',
localedir='test/data/translations', class_=None, codeset=None)
_ = t.gettext
value = _('greet')
common.my_assert_equal(self, 'greet in French (local)', value,
'incorrect value; should have returned local translation')
#@unittest.skip("skipping")
def test_local_translations(self):
"""Verify local translations are used with expected"""
acc = common.get_gpserviceaccount()
client = GPClient(acc)
languages=['fr']
t = client.translation(bundleId=common.bundleId1,
languages=languages, priority='local', domain='messages',
localedir='test/data/translations', class_=None, codeset=None)
_ = t.gettext
value = _('greet')
common.my_assert_equal(self, 'greet in French (local)', value,
'incorrect value; should have returned local translation')
#@unittest.skipIf(common.isReaderCredNotAvailable(),"Reader credentials not available")
def test_reader_get_bundles(self):
"""Verify bundles can not be obtained with reader acc"""
acc = common.get_gpserviceaccount()
client = GPClient(acc)
expectedBundles = []
actualBundles = client.get_bundles()
common.my_assert_equal(self, expectedBundles, actualBundles,
'reader acc can not get bundles list')
#@unittest.skip("skipping")
def test_translation_priority(self):
"""Verify that the priority option in GPClient.translation works"""
acc = common.get_gpserviceaccount()
client = GPClient(acc)
languages=['fr']
# prioritize local
t = client.translation(bundleId=common.bundleId1,
languages=languages, priority='local', domain='messages',
localedir='test/data/translations', class_=None, codeset=None)
_ = t.gettext
value = _('greet')
common.my_assert_equal(self, 'greet in French (local)', value,
'incorrect value; should have returned local translation')
# prioritize gp
t = client.translation(bundleId=common.bundleId1,
languages=languages, priority='gp', domain='messages',
localedir='test/data/translations', class_=None, codeset=None)
_ = t.gettext
value = _('greet')
common.my_assert_equal(self, 'Salut', value,
'incorrect value; should have returned gp translation')
#@unittest.skip("skipping")
def test_update_resource_entry(self):
"""Test to update a resource entry"""
acc = common.get_admin_gpserviceaccount()
client = GPClient(acc)
data = {}
data['value'] = "weather in spanish"
tresp = client.update_resource_entry(common.bundleId1,"es-mx","weather", data=data)
common.my_assert_equal(self, "SUCCESS", tresp["status"],
'bundle resource entry for the language could not be updated')
#@unittest.skip("skipping")
def test_update_resource_entries(self):
"""Test to update resource entries"""
acc = common.get_admin_gpserviceaccount()
client = GPClient(acc)
data = {}
data["welcome"]="Welcome"
tresp = client.update_resource_entries(common.bundleId1,"en", data=data)
common.my_assert_equal(self, "SUCCESS", tresp["status"],
'bundle resource entries for the language could not be updated')
#@unittest.skip("skipping")
def test_upload_resource_entries(self):
"""Test to upload resource entries"""
acc = common.get_admin_gpserviceaccount()
client = GPClient(acc)
data = {}
data["welcome"]="Hello"
tresp = client.upload_resource_entries(common.bundleId1,"en", data=data)
common.my_assert_equal(self, "SUCCESS", tresp["status"],
'bundle resource entries could not be uploaded')
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
"""
----
Authors:
Uri Nieto ([email protected])
Eric J. Humphrey ([email protected])
----
License:
This code is distributed under the GNU LESSER PUBLIC LICENSE
(LGPL, see www.gnu.org).
Copyright (c) 2012-2013 MARL@NYU.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of MARL, NYU nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
import os
import sys
import cPickle
import pickle
import numpy as np
import argparse
from scipy.spatial import distance
from multiprocessing import Pool
import time
import glob
# local stuff
import hdf5_getters as GETTERS
import dan_tools
import time
import utils
import scipy.cluster.vq as vq
import pylab as plt
from transforms import load_transform
import analyze_stats as anst
# params, for ICMR paper: 75 and 1.96
WIN = 75
PATCH_LEN = WIN*12
# Set up logger
logger = utils.configure_logger()
# Global models
lda = None
pca = None
def compute_codes_orig_it(track_ids, maindir, clique_ids, start_idx, end_idx):
"""Computes the original features, based on Thierry and Ellis, 2012.
Dimensionality reduction using PCA of 50, 100, and 200 components."""
res = []
trainedpca = utils.load_pickle("models/pca_250Kexamples_900dim_nocovers.pkl")
pca_components = [50,100,200]
# Init codes
codes = []
for n_comp in pca_components:
codes.append(np.ones((end_idx-start_idx,n_comp)) * np.nan)
for i, tid in enumerate(track_ids[start_idx:end_idx]):
path = utils.path_from_tid(maindir, tid)
feats = utils.extract_feats(path)
if feats == None:
continue
med = np.median(feats, axis=0)
for pca_idx, n_comp in enumerate(pca_components):
tmp = dan_tools.chromnorm(med.reshape(med.shape[0],
1)).squeeze()
codes[pca_idx][i] = trainedpca.apply_newdata(tmp, ndims=n_comp)
if i % 1000 == 0:
logger.info("Computed %d of %d track(s)" % (i, end_idx-start_idx))
res = (codes, track_ids[start_idx:end_idx], clique_ids[start_idx:end_idx])
return res
def compute_codes_it(track_ids, maindir, d, clique_ids,
start_idx, end_idx, origcodes=None, norm=False):
"""Computes the features based on Humphrey, Nieto and Bello, 2013.
Dimensionality reduction using LDA of 50, 100, and 200 components."""
fx = load_transform(d)
res = []
K = int(d.split("_")[1].split("E")[1])
# Init codes
codes = []
if lda is not None:
lda_components = [50,100,200]
for n_comp in lda_components:
codes.append(np.ones((end_idx-start_idx,n_comp)) * np.nan)
else:
codes.append(np.ones((end_idx-start_idx, K)) * np.nan)
for i, tid in enumerate(track_ids[start_idx:end_idx]):
if origcodes is None:
path = utils.path_from_tid(maindir, tid)
feats = utils.extract_feats(path)
if feats == None:
continue
code = np.median(fx(feats), axis=0)
else:
code = origcodes[i]
if norm:
code = dan_tools.chromnorm(code.reshape(code.shape[0],
1)).squeeze()
if pca is not None:
code = pca.transform(code)
if lda is not None:
for lda_idx, n_comp in enumerate(lda_components):
tmp = lda[lda_idx].transform(code)
codes[lda_idx][i] = dan_tools.chromnorm(tmp.reshape(tmp.shape[0],
1)).squeeze()
else:
codes[0][i] = code
if i % 1000 == 0:
logger.info("Computed %d of %d track(s)" % (i, end_idx-start_idx))
res = (codes, track_ids[start_idx:end_idx], clique_ids[start_idx:end_idx])
return res
def compute_codes(args):
"""Computes maximum 10,000 x 10 tracks. N is the index in the MSD:
e.g.
if N = 1: tracks computed: from 100,000 to 199,999
if N = 5: tracks computed: from 500,000 to 599,999
"""
track_ids = args["track_ids"]
maindir = args["maindir"]
d = args["d"]
N = args["N"]
clique_ids = args["clique_ids"]
outdir = args["outdir"]
origcodesdir = args["origcodesdir"]
pca_n = args["pca_n"]
norm = args["norm"]
MAX = 1e5 / 1
ITER = 1e4 / 1
for it in xrange(10):
logger.info("Computing %d of 10 iteration" % it)
start_idx = int(N*MAX + it*ITER)
end_idx = int(start_idx + ITER)
codes = []
strN = str(N)
if N < 10:
strN = "0" + str(N)
out_file = os.path.join(outdir, strN) + str(it) + "-msd-codes.pk"
if origcodesdir is None:
origcodes = None
else:
origcodes_file = os.path.join(origcodesdir, strN) + str(it) + \
"-msd-codes.pk"
origcodes = utils.load_pickle(origcodes_file)[0][0]
#origcodes = utils.load_pickle(origcodes_file)[0]
if d == "":
codes = compute_codes_orig_it(track_ids, maindir, clique_ids,
start_idx, end_idx)
else:
codes = compute_codes_it(track_ids, maindir, d, clique_ids,
start_idx, end_idx, origcodes=origcodes, norm=norm)
utils.save_pickle(codes, out_file)
def score(feats, clique_ids, N=5236, lda_idx=0):
stats = [np.inf] * N
# For each track id that has a clique id
logger.info("Computing scores for the MSD...")
q = 0
for i, clique_id in enumerate(clique_ids):
if clique_id == -1:
continue
D = distance.cdist(feats[i][np.newaxis,:], feats, metric="euclidean")
s = np.argsort(D)[0]
sorted_cliques = clique_ids[s]
r = np.argwhere( sorted_cliques == clique_id )[1:]
if len(r) > 0:
stats[q] = r
q += 1
if q % 400 == 0:
logger.info('After %d queries: average rank per track: %.2f'
', clique: %.2f, MAP: %.2f%%' \
% (q, anst.average_rank_per_track(stats),
anst.average_rank_per_clique(stats),
anst.mean_average_precision(stats) * 100))
return stats
def load_codes(codesdir, lda_idx, max_files=None):
code_files = glob.glob(os.path.join(codesdir, "*.pk"))
if lda_idx == 0:
n_comp = 50
elif lda_idx == 1:
n_comp = 100
elif lda_idx == 2:
n_comp = 200
elif lda_idx == -1:
n_comp = 2045
feats = np.empty((0,n_comp))
track_ids = []
clique_ids = []
if max_files is not None:
code_files = code_files[:max_files]
for code_file in code_files:
codes = utils.load_pickle(code_file)
feats = np.append(feats, codes[0][lda_idx], axis=0)
track_ids += codes[1]
clique_ids += list(codes[2])
track_ids = np.asarray(track_ids)
clique_ids = np.asarray(clique_ids)
return feats, track_ids, clique_ids
def main():
# Args parser
parser = argparse.ArgumentParser(description=
"Evaluates the average rank and mean AP for the test SHS " \
"over the entire MSD",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("msd_dir", action="store",
help="Million Song Dataset main directory")
parser.add_argument("-dictfile", action="store", default="",
help="Pickle to the learned dictionary")
parser.add_argument("-outdir", action="store", default="msd_codes",
help="Output directory for the features")
parser.add_argument("-N", action="store", default=10, type=int,
help="Number of processors to use when computing " \
"the codes for 1M tracks,")
parser.add_argument("-lda", action="store", default=None,
help="LDA file")
parser.add_argument("-pca", nargs=2, metavar=('f.pkl', 'n'),
default=(None, 0),
help="pca model saved in a pickle file, " \
"use n dimensions")
parser.add_argument("-codes", action="store", nargs=2, default=[None,0],
dest="codesdir", metavar=("msd_codes/", "n"),
help="Path to the folder with all the codes and "
"version to evaluate")
parser.add_argument("-orig_codes", action="store", default=None,
dest="origcodesdir",
help="Path to the folder with all the codes without "
"dimensionality reduction")
parser.add_argument("-norm", action="store_true", dest="norm", default=False,
help="Normalize before LDA/PCA or not")
args = parser.parse_args()
start_time = time.time()
maindir = args.msd_dir
shsf = "SHS/shs_dataset_test.txt"
global lda
global pca
# sanity cheks
utils.assert_file(maindir)
utils.assert_file(shsf)
utils.create_dir(args.outdir)
# read cliques and all tracks
cliques, all_tracks = utils.read_shs_file(shsf)
track_ids = utils.load_pickle("SHS/track_ids_test.pk")
clique_ids = utils.load_pickle("SHS/clique_ids_test.pk")
# read codes file
codesdir = args.codesdir[0]
if codesdir is not None:
if os.path.isfile(codesdir):
c = utils.load_pickle(codesdir)
feats = c[0]
track_ids = c[1]
clique_ids = c[2]
else:
feats, track_ids, clique_ids = load_codes(codesdir,
lda_idx=int(args.codesdir[1]))
logger.info("Codes files read")
print feats.shape
else:
# Read PCA file
if args.pca[0] is not None:
pca = utils.load_pickle(args.pca[0])[int(args.pca[1])]
# read LDA file
lda_file = args.lda
if lda_file is not None:
lda = utils.load_pickle(lda_file)
utils.assert_file(args.dictfile)
# Prepare Multiprocessing computation
input = []
pool = Pool(processes=args.N)
for n in xrange(args.N):
arg = {}
arg["track_ids"] = track_ids
arg["maindir"] = maindir
arg["d"] = args.dictfile
arg["N"] = n
arg["clique_ids"] = clique_ids
arg["outdir"] = args.outdir
arg["origcodesdir"] = args.origcodesdir
arg["pca_n"] = int(args.pca[1])
arg["norm"] = args.norm
input.append(arg)
# Start computing the codes
pool.map(compute_codes, input)
# Done!
logger.info("Codes computation done!")
logger.info("Took %.2f seconds" % (time.time() - start_time))
sys.exit()
# Scores
feats, clique_ids, track_ids = utils.clean_feats(feats, clique_ids, track_ids)
stats = score(feats, clique_ids, N=len(all_tracks))
# TODO: change file name
utils.save_pickle(stats, "stats.pk")
# done
logger.info('Average rank per track: %.2f, clique: %.2f, MAP: %.2f%%' \
% (anst.average_rank_per_track(stats),
anst.average_rank_per_clique(stats),
anst.mean_average_precision(stats) * 100))
logger.info("Done! Took %.2f seconds" % (time.time() - start_time))
if __name__ == '__main__':
main()
|
|
#sbaas
from SBaaS_base.postgresql_orm_base import *
class data_stage02_isotopomer_models(Base):
__tablename__ = 'data_stage02_isotopomer_models'
id = Column(Integer, Sequence('data_stage02_isotopomer_models_id_seq'), primary_key=True)
model_id = Column(String(50), primary_key=True)
model_name = Column(String(100))
model_description = Column(String(100))
model_file = Column(Text)
file_type = Column(String(50))
date = Column(DateTime)
def __init__(self,model_id_I,
model_name_I,
model_description_I,
model_file_I,
file_type_I,
date_I):
self.model_id=model_id_I
self.model_name=model_name_I
self.model_description=model_description_I
self.model_file=model_file_I
self.file_type=file_type_I
self.date=date_I
def __repr__dict__(self):
return {'id':self.id,
'model_id':self.model_id,
'model_name':self.model_name,
'model_description':self.model_description,
'file':self.file,
'file_type':self.file_type,
'date':self.date}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage02_isotopomer_modelReactions(Base):
__tablename__ = 'data_stage02_isotopomer_modelReactions'
id = Column(Integer, Sequence('data_stage02_isotopomer_modelReactions_id_seq'), primary_key=True)
model_id = Column(String(50), primary_key=True)
rxn_id = Column(String(50), primary_key=True)
rxn_name = Column(String(100))
equation = Column(String(4000));
subsystem = Column(String(255));
gpr = Column(Text);
genes = Column(postgresql.ARRAY(String(50)));
reactants_stoichiometry = Column(postgresql.ARRAY(Float)) # stoichiometry of metabolites
products_stoichiometry = Column(postgresql.ARRAY(Float))
reactants_ids = Column(postgresql.ARRAY(String(100))) # list of met_ids that are in the reaction
products_ids = Column(postgresql.ARRAY(String(100)))
lower_bound = Column(Float) #derived from experimentally measured values or estimations from simulations
upper_bound = Column(Float) #derived from experimentally measured values or estimations from simulations
objective_coefficient = Column(Float)
flux_units = Column(String(50))
fixed = Column(Boolean)
free = Column(Boolean)
reversibility = Column(Boolean)
weight = Column(Float) #weighting given in the optimization problem
used_ = Column(Boolean)
comment_ = Column(Text);
def __init__(self,model_id_I,
rxn_id_I,
equation_I,
subsystem_I,
gpr_I,
genes_I,
reactants_stoichiometry_I,
products_stoichiometry_I,
reactants_ids_I,
products_ids_I,
lower_bound_I,
upper_bound_I,
objective_coefficient_I,
flux_units_I,
fixed_I,
free_I,
reversibility_I,
weight_I,
used__I,
comment__I):
self.model_id=model_id_I
self.rxn_id=rxn_id_I
self.equation=equation_I
self.subsystem=subsystem_I
self.gpr=gpr_I
self.genes=genes_I
self.reactants_stoichiometry=reactants_stoichiometry_I
self.products_stoichiometry=products_stoichiometry_I
self.reactants_ids=reactants_ids_I
self.products_ids=products_ids_I
self.lower_bound=lower_bound_I
self.upper_bound=upper_bound_I
self.objective_coefficient=objective_coefficient_I
self.flux_units=flux_units_I
self.fixed=fixed_I
self.free=free_I
self.reversibility=reversibility_I
self.weight=weight_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'model_id':self.model_id,
'rxn_id':self.rxn_id,
'equation':self.equation,
'subsystem':self.subsystem,
'gpr':self.gpr,
'genes':self.genes,
'reactants_stoichiometry':self.reactants_stoichiometry,
'products_stoichiometry':self.products_stoichiometry,
'reactants_ids':self.reactants_ids,
'products_ids':self.products_ids,
'lower_bound':self.lower_bound,
'upper_bound':self.upper_bound,
'objective_coefficient':self.objective_coefficient,
'flux_units':self.flux_units,
'fixed':self.fixed,
'free':self.free,
'reversibility':self.reversibility,
'weight':self.weight,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage02_isotopomer_modelMetabolites(Base):
__tablename__ = 'data_stage02_isotopomer_modelMetabolites'
id = Column(Integer, Sequence('data_stage02_isotopomer_modelMetabolites_id_seq'), primary_key=True)
model_id = Column(String(50), primary_key=True)
met_name = Column(String(500))
met_id = Column(String(100), primary_key=True)
formula = Column(String(100))
charge = Column(Integer)
compartment = Column(String(50))
bound = Column(Float)
constraint_sense = Column(String(5))
used_ = Column(Boolean)
comment_ = Column(Text);
lower_bound = Column(Float) #derived from experimentally measured values or estimations from simulations
upper_bound = Column(Float) #derived from experimentally measured values or estimations from simulations
balanced = Column(Boolean)
fixed = Column(Boolean)
def __init__(self,model_id_I,
met_name_I,
met_id_I,
formula_I,
charge_I,
compartment_I,
bound_I,
constraint_sense_I,
lower_bound_I,
upper_bound_I,
balanced_I,
fixed_I,
used__I,
comment__I):
self.model_id=model_id_I
self.met_name=met_name_I
self.met_id=met_id_I
self.formula=formula_I
self.charge=charge_I
self.compartment=compartment_I
self.bound=bound_I
self.constraint_sense=constraint_sense_I
self.lower_bound=lower_bound_I
self.upper_bound=upper_bound_I
self.balanced=balanced_I
self.fixed=fixed_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'model_id':self.model_id,
'met_name':self.met_name,
'met_id':self.met_id,
'formula':self.formula,
'charge':self.charge,
'bound':self.bound,
'constraint_sense':self.constraint_sense,
'compartment':self.compartment,
'lower_bound':self.lower_bound,
'upper_bound':self.upper_bound,
'balanced':self.balanced,
'fixed':self.fixed,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage02_isotopomer_atomMappingReactions(Base):
__tablename__ = 'data_stage02_isotopomer_atomMappingReactions'
id = Column(Integer, Sequence('data_stage02_isotopomer_atomMappingReactions_id_seq'), primary_key=True)
mapping_id = Column(String(100), primary_key=True)
rxn_id = Column(String(50), primary_key=True)
rxn_description = Column(String(500))
reactants_stoichiometry_tracked = Column(postgresql.ARRAY(Float)) # stoichiometry of metabolites (e.g. ['-1','-1'])
products_stoichiometry_tracked = Column(postgresql.ARRAY(Float))
reactants_ids_tracked = Column(postgresql.ARRAY(String(100))) # list of met_ids that are tracked (e.g. ['pyr_c','accoa_c'])
products_ids_tracked = Column(postgresql.ARRAY(String(100)))
reactants_elements_tracked = Column(postgresql.JSON) # list of elements that are tracked (e.g. ['C','C'])
products_elements_tracked = Column(postgresql.JSON)
reactants_positions_tracked = Column(postgresql.JSON) # list of elements that are tracked (e.g. ['C','C'])
products_positions_tracked = Column(postgresql.JSON)
reactants_mapping = Column(postgresql.ARRAY(String(20000))) # mappings of each atom for each met_id that are tracked (e.g. ['abc','de'])
products_mapping = Column(postgresql.ARRAY(String(20000)))
rxn_equation = Column(String(5000)) #formatted version of rxn_formula and rxn_mapping depending on the fluxomics software
used_ = Column(Boolean);
comment_ = Column(Text);
def __init__(self,
#id_I,
mapping_id_I,
rxn_id_I,
rxn_description_I,
reactants_stoichiometry_tracked_I,
products_stoichiometry_tracked_I,
reactants_ids_tracked_I,
products_ids_tracked_I,
reactants_elements_tracked_I,
products_elements_tracked_I,
reactants_positions_tracked_I,
products_positions_tracked_I,
reactants_mapping_I,
products_mapping_I,
rxn_equation_I,
used__I,
comment__I):
#self.id=id_I
self.mapping_id=mapping_id_I
self.rxn_id=rxn_id_I
self.rxn_description=rxn_description_I
self.reactants_stoichiometry_tracked=reactants_stoichiometry_tracked_I
self.products_stoichiometry_tracked=products_stoichiometry_tracked_I
self.reactants_ids_tracked=reactants_ids_tracked_I
self.products_ids_tracked=products_ids_tracked_I
self.reactants_elements_tracked=reactants_elements_tracked_I
self.products_elements_tracked=products_elements_tracked_I
self.reactants_positions_tracked=reactants_positions_tracked_I
self.products_positions_tracked=products_positions_tracked_I
self.reactants_mapping=reactants_mapping_I
self.products_mapping=products_mapping_I
self.rxn_equation=rxn_equation_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'mapping_id':self.mapping_id,
'rxn_id':self.rxn_id,
'rxn_description':self.rxn_description,
'reactants_stoichiometry_tracked':self.reactants_stoichiometry_tracked,
'products_stoichiometry_tracked':self.products_stoichiometry_tracked,
'reactants_ids_tracked':self.reactants_ids_tracked,
'products_ids_tracked':self.products_ids_tracked,
'reactants_elements_tracked':self.reactants_elements_tracked,
'products_elements_tracked':self.products_elements_tracked,
'reactants_positions_tracked':self.reactants_positions_tracked,
'products_positions_tracked':self.products_positions_tracked,
'reactants_mapping':self.reactants_mapping,
'products_mapping':self.products_mapping,
'rxn_equation':self.rxn_equation,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__());
class data_stage02_isotopomer_atomMappingMetabolites(Base):
__tablename__ = 'data_stage02_isotopomer_atomMappingMetabolites'
id = Column(Integer, Sequence('data_stage02_isotopomer_atomMappingMetabolites_id_seq'), primary_key=True)
mapping_id = Column(String(100), primary_key=True)
#met_name = Column(String(500))
met_id = Column(String(100), primary_key=True)
#formula = Column(String(100))
met_elements = Column(postgresql.ARRAY(String(3))) # the elements that are tracked (e.g. C,C,C)
met_atompositions = Column(postgresql.ARRAY(Integer)) #the atoms positions that are tracked (e.g. 1,2,3)
met_symmetry_elements = Column(postgresql.ARRAY(String(3))) #symmetric molecules can alternatively be indicated in the reaction mapping
met_symmetry_atompositions = Column(postgresql.ARRAY(Integer)) #maps the symmetric atom positions
used_ = Column(Boolean)
comment_ = Column(Text);
met_mapping=Column(postgresql.JSON())
#met_mapping=Column(postgresql.ARRAY(String(5000)))
base_met_ids=Column(postgresql.ARRAY(String(100)))
base_met_elements=Column(postgresql.JSON())
#base_met_elements=Column(postgresql.ARRAY(String(3)))
base_met_atompositions=Column(postgresql.JSON())
#base_met_atompositions=Column(postgresql.ARRAY(Integer))
base_met_symmetry_elements=Column(postgresql.JSON())
#base_met_symmetry_elements=Column(postgresql.ARRAY(String(3)))
base_met_symmetry_atompositions=Column(postgresql.JSON())
#base_met_symmetry_atompositions=Column(postgresql.ARRAY(Integer))
base_met_indices=Column(postgresql.ARRAY(Integer))
def __init__(self,
mapping_id_I,
#met_name_I,
met_id_I,
#formula_I,
met_elements_I,
met_atompositions_I,
met_symmetry_elements_I,
met_symmetry_atompositions_I,
used__I,
comment__I,
met_mapping_I=None,
base_met_ids_I=None,
base_met_elements_I=None,
base_met_atompositions_I=None,
base_met_symmetry_elements_I=None,
base_met_symmetry_atompositions_I=None,
base_met_indices_I=None):
self.mapping_id=mapping_id_I
#self.met_name=met_name_I
self.met_id=met_id_I
#self.formula=formula_I
self.met_elements=met_elements_I
self.met_atompositions=met_atompositions_I
self.met_symmetry_elements=met_symmetry_elements_I
self.met_symmetry_atompositions=met_symmetry_atompositions_I
self.used_=used__I
self.comment_=comment__I
self.met_mapping=met_mapping_I;
self.base_met_ids=base_met_ids_I;
self.base_met_elements=base_met_elements_I;
self.base_met_atompositions=base_met_atompositions_I;
self.base_met_symmetry_elements=base_met_symmetry_elements_I;
self.base_met_symmetry_atompositions=base_met_symmetry_atompositions_I;
self.base_met_indices = base_met_indices_I;
def __repr__dict__(self):
return {'id':self.id,
'mapping_id':self.mapping_id,
#'met_name':self.met_name,
'met_id':self.met_id,
#'formula':self.formula,
'met_elements':self.met_elements,
'met_atompositions':self.met_atompositions,
'met_symmetry_elements':self.met_symmetry_elements,
'met_symmetry_atompositions':self.met_symmetry_atompositions,
'used_':self.used_,
'comment_':self.comment_,
'met_mapping':self.met_mapping,
'base_met_ids':self.base_met_ids,
'base_met_elements':self.base_met_elements,
'base_met_atompositions':self.base_met_atompositions,
'base_met_symmetry_elements':self.base_met_symmetry_elements,
'base_met_symmetry_atompositions':self.base_met_symmetry_atompositions,
'base_met_indices':self.base_met_indices}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
|
|
def get_idx(in_files, stop_idx=None, start_idx=None):
"""
Method to get the first and the last volume for
the functional run. It verifies the user specified
first and last volume. If the values are not valid, it
calculates and returns the very first and the last slice
Parameters
----------
in_file : string (nifti file)
Path to input functional run
stop_idx : int
Last volume to be considered, specified by user
in the configuration file
stop_idx : int
First volume to be considered, specified by user
in the configuration file
Returns
-------
stop_idx : int
Value of first volume to consider for the functional run
start_idx : int
Value of last volume to consider for the functional run
"""
#stopidx = None
#startidx = None
from nibabel import load
nvols = load(in_files).shape[3]
if (start_idx == None) or (start_idx < 0) or (start_idx > (nvols - 1)):
startidx = 0
else:
startidx = start_idx
if (stop_idx == None) or (stop_idx > (nvols - 1)):
stopidx = nvols - 1
else:
stopidx = stop_idx
return stopidx, startidx
def func_motion_correct_workflow(workflow, resource_pool, config):
# resource pool should have:
# functional_scan
import os
import sys
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.fsl.maths as fsl
from nipype.interfaces.afni import preprocess
from workflow_utils import check_input_resources, \
check_config_settings
check_input_resources(resource_pool, "functional_scan")
check_config_settings(config, "start_idx")
check_config_settings(config, "stop_idx")
check_config_settings(config, "slice_timing_correction")
func_get_idx = pe.Node(util.Function(input_names=['in_files',
'stop_idx',
'start_idx'],
output_names=['stopidx',
'startidx'],
function=get_idx),
name='func_get_idx')
func_get_idx.inputs.in_files = resource_pool["functional_scan"]
func_get_idx.inputs.start_idx = config["start_idx"]
func_get_idx.inputs.stop_idx = config["stop_idx"]
func_drop_trs = pe.Node(interface=preprocess.Calc(),
name='func_drop_trs')
func_drop_trs.inputs.in_file_a = resource_pool["functional_scan"]
func_drop_trs.inputs.expr = 'a'
func_drop_trs.inputs.outputtype = 'NIFTI_GZ'
workflow.connect(func_get_idx, 'startidx',
func_drop_trs, 'start_idx')
workflow.connect(func_get_idx, 'stopidx',
func_drop_trs, 'stop_idx')
#workflow.connect(func_drop_trs, 'out_file',
# outputNode, 'drop_tr')
func_slice_timing_correction = pe.Node(interface=preprocess.TShift(),
name='func_slice_time_correction')
func_slice_timing_correction.inputs.outputtype = 'NIFTI_GZ'
func_deoblique = pe.Node(interface=preprocess.Refit(),
name='func_deoblique')
func_deoblique.inputs.deoblique = True
if config["slice_timing_correction"] == True:
workflow.connect(func_drop_trs, 'out_file',
func_slice_timing_correction,'in_file')
workflow.connect(func_slice_timing_correction, 'out_file',
func_deoblique, 'in_file')
else:
workflow.connect(func_drop_trs, 'out_file',
func_deoblique, 'in_file')
func_reorient = pe.Node(interface=preprocess.Resample(),
name='func_reorient')
func_reorient.inputs.orientation = 'RPI'
func_reorient.inputs.outputtype = 'NIFTI_GZ'
workflow.connect(func_deoblique, 'out_file',
func_reorient, 'in_file')
func_get_mean_RPI = pe.Node(interface=preprocess.TStat(),
name='func_get_mean_RPI')
func_get_mean_RPI.inputs.options = '-mean'
func_get_mean_RPI.inputs.outputtype = 'NIFTI_GZ'
workflow.connect(func_reorient, 'out_file',
func_get_mean_RPI, 'in_file')
# calculate motion parameters
func_motion_correct = pe.Node(interface=preprocess.Volreg(),
name='func_motion_correct')
func_motion_correct.inputs.args = '-Fourier -twopass'
func_motion_correct.inputs.zpad = 4
func_motion_correct.inputs.outputtype = 'NIFTI_GZ'
workflow.connect(func_reorient, 'out_file',
func_motion_correct, 'in_file')
workflow.connect(func_get_mean_RPI, 'out_file',
func_motion_correct, 'basefile')
func_get_mean_motion = func_get_mean_RPI.clone('func_get_mean_motion')
workflow.connect(func_motion_correct, 'out_file',
func_get_mean_motion, 'in_file')
func_motion_correct_A = func_motion_correct.clone('func_motion_correct_A')
func_motion_correct_A.inputs.md1d_file = 'max_displacement.1D'
workflow.connect(func_reorient, 'out_file',
func_motion_correct_A, 'in_file')
workflow.connect(func_get_mean_motion, 'out_file',
func_motion_correct_A, 'basefile')
resource_pool["func_motion_correct"] = (func_motion_correct_A, 'out_file')
resource_pool["coordinate_transformation"] = \
(func_motion_correct_A, 'oned_matrix_save')
return workflow, resource_pool
def run_func_motion_correct(functional_scan, start_idx, stop_idx,
slice_timing_correction=False, run=True):
# stand-alone runner for functional motion correct workflow
import os
import sys
import glob
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
workflow = pe.Workflow(name='func_motion_correct_workflow')
current_dir = os.getcwd()
workflow_dir = os.path.join(current_dir, "func_motion_correct")
workflow.base_dir = workflow_dir
resource_pool = {}
config = {}
num_cores_per_subject = 1
resource_pool["functional_scan"] = functional_scan
config["start_idx"] = start_idx
config["stop_idx"] = stop_idx
config["slice_timing_correction"] = slice_timing_correction
workflow, resource_pool = \
func_motion_correct_workflow(workflow, resource_pool, config)
ds = pe.Node(nio.DataSink(), name='datasink_func_motion_correct')
ds.inputs.base_directory = workflow_dir
node, out_file = resource_pool["func_motion_correct"]
workflow.connect(node, out_file, ds, 'func_motion_correct')
ds = pe.Node(nio.DataSink(), name='datasink_coordinate_transformation')
ds.inputs.base_directory = workflow_dir
node, out_file = resource_pool["coordinate_transformation"]
workflow.connect(node, out_file, ds, 'coordinate_transformation')
if run == True:
workflow.run(plugin='MultiProc', plugin_args= \
{'n_procs': num_cores_per_subject})
outpath = glob.glob(os.path.join(workflow_dir, "func_motion_correct",\
"*"))[0]
return outpath
else:
return workflow, workflow.base_dir
def functional_brain_mask_workflow(workflow, resource_pool, config):
# resource pool should have:
# func_motion_correct
import os
import sys
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.fsl as fsl
from nipype.interfaces.afni import preprocess
#check_input_resources(resource_pool, "func_motion_correct")
if "use_bet" not in config.keys():
config["use_bet"] = False
if "func_motion_correct" not in resource_pool.keys():
from functional_preproc import func_motion_correct_workflow
workflow, resource_pool = \
func_motion_correct_workflow(workflow, resource_pool, config)
if config["use_bet"] == False:
func_get_brain_mask = pe.Node(interface=preprocess.Automask(),
name='func_get_brain_mask')
func_get_brain_mask.inputs.outputtype = 'NIFTI_GZ'
else:
func_get_brain_mask = pe.Node(interface=fsl.BET(),
name='func_get_brain_mask_BET')
func_get_brain_mask.inputs.mask = True
func_get_brain_mask.inputs.functional = True
erode_one_voxel = pe.Node(interface=fsl.ErodeImage(),
name='erode_one_voxel')
erode_one_voxel.inputs.kernel_shape = 'box'
erode_one_voxel.inputs.kernel_size = 1.0
#if isinstance(tuple, resource_pool["func_motion_correct"]):
if len(resource_pool["func_motion_correct"]) == 2:
node, out_file = resource_pool["func_motion_correct"]
workflow.connect(node, out_file, func_get_brain_mask, 'in_file')
else:
func_get_brain_mask.inputs.in_file = \
resource_pool["func_motion_correct"]
if config["use_bet"] == False:
resource_pool["functional_brain_mask"] = (func_get_brain_mask, \
'out_file')
else:
workflow.connect(func_get_brain_mask, 'mask_file',
erode_one_voxel, 'in_file')
resource_pool["functional_brain_mask"] = (erode_one_voxel, 'out_file')
return workflow, resource_pool
def run_functional_brain_mask(func_motion_correct, use_bet=False, run=True):
# stand-alone runner for functional brain mask workflow
import os
import sys
import glob
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
output = "functional_brain_mask"
workflow = pe.Workflow(name='%s_workflow' % output)
current_dir = os.getcwd()
workflow_dir = os.path.join(current_dir, output)
workflow.base_dir = workflow_dir
resource_pool = {}
config = {}
num_cores_per_subject = 1
resource_pool["func_motion_correct"] = func_motion_correct
config["use_bet"] = use_bet
workflow, resource_pool = \
functional_brain_mask_workflow(workflow, resource_pool, config)
ds = pe.Node(nio.DataSink(), name='datasink_%s' % output)
ds.inputs.base_directory = workflow_dir
node, out_file = resource_pool[output]
workflow.connect(node, out_file, ds, output)
if run == True:
workflow.run(plugin='MultiProc', plugin_args= \
{'n_procs': num_cores_per_subject})
outpath = glob.glob(os.path.join(workflow_dir, "functional_brain" \
"_mask", "*"))[0]
return outpath
else:
return workflow, workflow.base_dir
def mean_functional_workflow(workflow, resource_pool, config):
# resource pool should have:
# func_motion_correct
''' this version does NOT remove background noise '''
import os
import sys
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.fsl.maths as fsl
from nipype.interfaces.afni import preprocess
from workflow_utils import check_input_resources
#check_input_resources(resource_pool, "func_motion_correct")
#check_input_resources(resource_pool, "functional_brain_mask")
if "func_motion_correct" not in resource_pool.keys():
from functional_preproc import func_motion_correct_workflow
workflow, resource_pool = \
func_motion_correct_workflow(workflow, resource_pool, config)
func_mean_skullstrip = pe.Node(interface=preprocess.TStat(),
name='func_mean_skullstrip')
func_mean_skullstrip.inputs.options = '-mean'
func_mean_skullstrip.inputs.outputtype = 'NIFTI_GZ'
if len(resource_pool["func_motion_correct"]) == 2:
node, out_file = resource_pool["func_motion_correct"]
workflow.connect(node, out_file, func_mean_skullstrip, 'in_file')#func_edge_detect, 'in_file_a')
else:
func_mean_skullstrip.inputs.in_file = \
resource_pool["func_motion_correct"]
resource_pool["mean_functional"] = (func_mean_skullstrip, 'out_file')
return workflow, resource_pool
def run_mean_functional(func_motion_correct, run=True):
# stand-alone runner for mean functional workflow
''' this version does NOT remove background noise '''
import os
import sys
import glob
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
output = "mean_functional"
workflow = pe.Workflow(name='%s_workflow' % output)
current_dir = os.getcwd()
workflow_dir = os.path.join(current_dir, output)
workflow.base_dir = workflow_dir
resource_pool = {}
config = {}
num_cores_per_subject = 1
resource_pool["func_motion_correct"] = func_motion_correct
workflow, resource_pool = \
mean_functional_workflow(workflow, resource_pool, config)
ds = pe.Node(nio.DataSink(), name='datasink_%s' % output)
ds.inputs.base_directory = workflow_dir
node, out_file = resource_pool[output]
workflow.connect(node, out_file, ds, output)
if run == True:
workflow.run(plugin='MultiProc', plugin_args= \
{'n_procs': num_cores_per_subject})
outpath = glob.glob(os.path.join(workflow_dir, "mean_functional", \
"*"))[0]
return outpath
else:
return workflow, workflow.base_dir
|
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from ggrc import db
from ggrc.login import get_current_user
from ggrc.models import all_models
from ggrc.models.object_person import ObjectPerson
from ggrc.models.object_owner import ObjectOwner
from ggrc.models.relationship import Relationship
from ggrc_basic_permissions.models import UserRole
from ggrc_basic_permissions import objects_via_assignable_query
from ggrc_basic_permissions import program_relationship_query
from ggrc.rbac import permissions, context_query_filter
from sqlalchemy import \
event, and_, or_, literal, union, alias, case, func, distinct
from sqlalchemy.sql import false
from sqlalchemy.schema import DDL
from sqlalchemy.ext.declarative import declared_attr
from .sql import SqlIndexer
class MysqlRecordProperty(db.Model):
__tablename__ = 'fulltext_record_properties'
key = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(64), primary_key=True)
context_id = db.Column(db.Integer)
tags = db.Column(db.String)
property = db.Column(db.String(64), primary_key=True)
content = db.Column(db.Text)
@declared_attr
def __table_args__(self):
return (
# NOTE
# This is here to prevent Alembic from wanting to drop the index, but
# the DDL below or a similar Alembic migration should be used to create
# the index.
db.Index('{}_text_idx'.format(self.__tablename__), 'content'),
# These are real indexes
db.Index('ix_{}_key'.format(self.__tablename__), 'key'),
db.Index('ix_{}_type'.format(self.__tablename__), 'type'),
db.Index('ix_{}_tags'.format(self.__tablename__), 'tags'),
db.Index('ix_{}_context_id'.format(self.__tablename__), 'context_id'),
# Only MyISAM supports fulltext indexes until newer MySQL/MariaDB
{'mysql_engine': 'myisam'},
)
event.listen(
MysqlRecordProperty.__table__,
'after_create',
DDL('ALTER TABLE {tablename} ADD FULLTEXT INDEX {tablename}_text_idx '
'(content)'.format(tablename=MysqlRecordProperty.__tablename__))
)
class MysqlIndexer(SqlIndexer):
record_type = MysqlRecordProperty
def _get_type_query(self, model_names, permission_type='read',
permission_model=None):
type_queries = []
for model_name in model_names:
type_query = None
if permission_type == 'read':
contexts = permissions.read_contexts_for(
permission_model or model_name)
resources = permissions.read_resources_for(
permission_model or model_name)
elif permission_type == 'create':
contexts = permissions.create_contexts_for(
permission_model or model_name)
resources = permissions.create_resources_for(
permission_model or model_name)
elif permission_type == 'update':
contexts = permissions.update_contexts_for(
permission_model or model_name)
resources = permissions.update_resources_for(
permission_model or model_name)
elif permission_type == 'delete':
contexts = permissions.delete_contexts_for(
permission_model or model_name)
resources = permissions.delete_resources_for(
permission_model or model_name)
if permission_model and contexts:
contexts = set(contexts) & set(
permissions.read_contexts_for(model_name))
if contexts is not None:
# Don't filter out None contexts here
if None not in contexts and permission_type == "read":
contexts.append(None)
if resources:
resource_sql = and_(
MysqlRecordProperty.type == model_name,
MysqlRecordProperty.key.in_(resources))
else:
resource_sql = false()
type_query = or_(
and_(
MysqlRecordProperty.type == model_name,
context_query_filter(MysqlRecordProperty.context_id, contexts)
),
resource_sql)
type_queries.append(type_query)
else:
type_queries.append(MysqlRecordProperty.type == model_name)
return and_(
MysqlRecordProperty.type.in_(model_names),
or_(*type_queries))
def _get_filter_query(self, terms):
whitelist = or_(
# Because property values for custom attributes are
# `attribute_value_<id>`
MysqlRecordProperty.property.contains('attribute_value'),
MysqlRecordProperty.property.in_(
['title', 'name', 'email', 'notes', 'description', 'slug'])
)
if not terms:
return whitelist
elif terms:
return and_(whitelist, MysqlRecordProperty.content.contains(terms))
# FIXME: Temporary (slow) fix for words shorter than MySQL default limit
# elif len(terms) < 4:
# return MysqlRecordProperty.content.contains(terms)
# else:
# return MysqlRecordProperty.content.match(terms)
def _get_type_select_column(self, model):
mapper = model._sa_class_manager.mapper
if mapper.polymorphic_on is None:
type_column = literal(mapper.class_.__name__)
else:
# Handle polymorphic types with CASE
type_column = case(
value=mapper.polymorphic_on,
whens={
val: m.class_.__name__
for val, m in mapper.polymorphic_map.items()
})
return type_column
def _types_to_type_models(self, types):
if types is None:
return all_models.all_models
return [m for m in all_models.all_models if m.__name__ in types]
# filters by "myview" for a given person
def _add_owner_query(self, query, types=None, contact_id=None): # noqa
'''
Finds all objects which might appear on a user's Profile or Dashboard
pages, including:
Objects mapped via ObjectPerson
Objects owned via ObjectOwner
Objects in private contexts via UserRole (e.g. for Private Programs)
Objects for which the user is the "contact"
Objects for which the user is the "primary_assessor" or
"secondary_assessor"
Objects to which the user is mapped via a custom attribute
Assignable objects for which the user is an assignee
This method only *limits* the result set -- Contexts and Roles will still
filter out forbidden objects.
'''
# Check if the user has Creator role
current_user = get_current_user()
my_objects = contact_id is not None
if current_user.system_wide_role == "Creator":
contact_id = current_user.id
if not contact_id:
return query
type_models = self._types_to_type_models(types)
model_names = [model.__name__ for model in type_models]
models = []
for model in type_models:
base_model = model._sa_class_manager.mapper.primary_base_mapper.class_
if base_model not in models:
models.append(base_model)
models = [(model, self._get_type_select_column(model)) for model in models]
type_union_queries = []
all_people = db.session.query(
all_models.Person.id.label('id'),
literal(all_models.Person.__name__).label('type'),
literal(None).label('context_id')
)
type_union_queries.append(all_people)
# Objects to which the user is "mapped"
# We don't return mapped objects for the Creator because being mapped
# does not give the Creator necessary permissions to view the object.
if current_user.system_wide_role != "Creator":
object_people_query = db.session.query(
ObjectPerson.personable_id.label('id'),
ObjectPerson.personable_type.label('type'),
literal(None).label('context_id')
).filter(
and_(
ObjectPerson.person_id == contact_id,
ObjectPerson.personable_type.in_(model_names)
)
)
type_union_queries.append(object_people_query)
# Objects for which the user is an "owner"
object_owners_query = db.session.query(
ObjectOwner.ownable_id.label('id'),
ObjectOwner.ownable_type.label('type'),
literal(None).label('context_id')
).filter(
and_(
ObjectOwner.person_id == contact_id,
ObjectOwner.ownable_type.in_(model_names),
)
)
type_union_queries.append(object_owners_query)
# Objects to which the user is mapped via a custom attribute
ca_mapped_objects_query = db.session.query(
all_models.CustomAttributeValue.attributable_id.label('id'),
all_models.CustomAttributeValue.attributable_type.label('type'),
literal(None).label('context_id')
).filter(
and_(
all_models.CustomAttributeValue.attribute_value == "Person",
all_models.CustomAttributeValue.attribute_object_id == contact_id
)
)
type_union_queries.append(ca_mapped_objects_query)
# Objects for which the user is assigned
model_assignee_query = db.session.query(
Relationship.destination_id.label('id'),
Relationship.destination_type.label('type'),
literal(None).label('context_id'),
).filter(
and_(
Relationship.source_type == "Person",
Relationship.source_id == contact_id,
),
)
type_union_queries.append(model_assignee_query)
model_assignee_query = db.session.query(
Relationship.source_id.label('id'),
Relationship.source_type.label('type'),
literal(None).label('context_id'),
).filter(
and_(
Relationship.destination_type == "Person",
Relationship.destination_id == contact_id,
),
)
type_union_queries.append(model_assignee_query)
if not my_objects:
type_union_queries.append(
program_relationship_query(contact_id, True))
type_union_queries.append(
objects_via_assignable_query(contact_id)
)
# FIXME The following line crashes if the Workflow extension is not enabled
for model in [all_models.Program, all_models.Audit, all_models.Workflow]:
context_query = db.session.query(
model.id.label('id'),
literal(model.__name__).label('type'),
literal(None).label('context_id'),
).join(
UserRole,
and_(
UserRole.context_id == model.context_id,
UserRole.person_id == contact_id,
)
)
type_union_queries.append(context_query)
for model, type_column in models:
# Objects for which the user is the "contact" or "secondary contact"
if hasattr(model, 'contact_id'):
model_type_query = db.session.query(
model.id.label('id'),
type_column.label('type'),
literal(None).label('context_id')
).filter(
model.contact_id == contact_id
).distinct()
type_union_queries.append(model_type_query)
# Objects for which the user is the "contact"
if hasattr(model, 'secondary_contact_id'):
model_type_query = db.session.query(
model.id.label('id'),
type_column.label('type'),
literal(None).label('context_id')
).filter(
model.secondary_contact_id == contact_id
).distinct()
type_union_queries.append(model_type_query)
if model is all_models.Control:
# Control also has `principal_assessor` and `secondary_assessor`
assessor_queries = []
if hasattr(model, 'principal_assessor_id'):
assessor_queries.append(or_(
model.principal_assessor_id == contact_id))
if hasattr(model, 'secondary_assessor_id'):
assessor_queries.append(or_(
model.secondary_assessor_id == contact_id))
model_type_query = db.session.query(
model.id.label('id'),
type_column.label('type'),
literal(None).label('context_id')
).filter(
or_(*assessor_queries)
).distinct()
type_union_queries.append(model_type_query)
# Construct and JOIN to the UNIONed result set
type_union_query = alias(union(*type_union_queries))
query = query.join(
type_union_query,
and_(
type_union_query.c.id == MysqlRecordProperty.key,
type_union_query.c.type == MysqlRecordProperty.type),
)
return query
def _add_extra_params_query(self, query, type, extra_param):
if not extra_param:
return query
models = [m for m in all_models.all_models if m.__name__ == type]
if len(models) == 0:
return query
model = models[0]
return query.filter(self.record_type.key.in_(
db.session.query(
model.id.label('id')
).filter_by(**extra_param)
))
def _get_grouped_types(self, types, extra_params=None):
model_names = [model.__name__ for model in all_models.all_models]
if types is not None:
model_names = [m for m in model_names if m in types]
if extra_params is not None:
model_names = [m for m in model_names if m not in extra_params]
return model_names
def search(self, terms, types=None, permission_type='read',
permission_model=None, contact_id=None, extra_params={}):
model_names = self._get_grouped_types(types, extra_params)
query = db.session.query(
self.record_type.key, self.record_type.type,
self.record_type.property, self.record_type.content)
query = query.filter(
self._get_type_query(model_names, permission_type, permission_model))
query = query.filter(self._get_filter_query(terms))
query = self._add_owner_query(query, types, contact_id)
model_names = [model.__name__ for model in all_models.all_models]
if types is not None:
model_names = [m for m in model_names if m in types]
unions = []
# Add extra_params and extra_colums:
for k, v in extra_params.iteritems():
if k not in model_names:
continue
q = db.session.query(
self.record_type.key, self.record_type.type,
self.record_type.property, self.record_type.content)
q = q.filter(
self._get_type_query([k], permission_type, permission_model))
q = q.filter(self._get_filter_query(terms))
q = self._add_owner_query(q, [k], contact_id)
q = self._add_extra_params_query(q, k, v)
unions.append(q)
# Sort by title:
# FIXME: This only orders by `title` if title was the matching property
query = query.union(*unions)
query = query.order_by(case(
[(self.record_type.property == "title", self.record_type.content)],
else_=literal("ZZZZZ")))
return query
def counts(self, terms, group_by_type=True, types=None, contact_id=None,
extra_params={}, extra_columns={}):
model_names = self._get_grouped_types(types, extra_params)
query = db.session.query(
self.record_type.type, func.count(distinct(
self.record_type.key)), literal(""))
query = query.filter(self._get_type_query(model_names))
query = query.filter(self._get_filter_query(terms))
query = self._add_owner_query(query, types, contact_id)
query = query.group_by(self.record_type.type)
all_extra_columns = dict(extra_columns.items() +
[(p, p) for p in extra_params
if p not in extra_columns])
if not all_extra_columns:
return query.all()
# Add extra_params and extra_colums:
for k, v in all_extra_columns.iteritems():
q = db.session.query(
self.record_type.type, func.count(
distinct(self.record_type.key)), literal(k))
q = q.filter(self._get_type_query([v]))
q = q.filter(self._get_filter_query(terms))
q = self._add_owner_query(q, [v], contact_id)
q = self._add_extra_params_query(q, v, extra_params.get(k, None))
q = q.group_by(self.record_type.type)
query = query.union(q)
return query.all()
Indexer = MysqlIndexer
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging utilities."""
# pylint: disable=unused-import
# pylint: disable=g-bad-import-order
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
import threading
import six
from tensorflow.python.util.tf_export import tf_export
# Don't use this directly. Use _get_logger() instead.
_logger = None
_logger_lock = threading.Lock()
def _get_logger():
global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
if _logger:
return _logger
_logger_lock.acquire()
try:
if _logger:
return _logger
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger('tensorflow')
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1: _interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
_handler.setFormatter(_logging.Formatter(_logging.BASIC_FORMAT, None))
logger.addHandler(_handler)
_logger = logger
return _logger
finally:
_logger_lock.release()
@tf_export('logging.log')
def log(level, msg, *args, **kwargs):
_get_logger().log(level, msg, *args, **kwargs)
@tf_export('logging.debug')
def debug(msg, *args, **kwargs):
_get_logger().debug(msg, *args, **kwargs)
@tf_export('logging.error')
def error(msg, *args, **kwargs):
_get_logger().error(msg, *args, **kwargs)
@tf_export('logging.fatal')
def fatal(msg, *args, **kwargs):
_get_logger().fatal(msg, *args, **kwargs)
@tf_export('logging.info')
def info(msg, *args, **kwargs):
_get_logger().info(msg, *args, **kwargs)
@tf_export('logging.warn')
def warn(msg, *args, **kwargs):
_get_logger().warn(msg, *args, **kwargs)
@tf_export('logging.warning')
def warning(msg, *args, **kwargs):
_get_logger().warning(msg, *args, **kwargs)
_level_names = {
FATAL: 'FATAL',
ERROR: 'ERROR',
WARN: 'WARN',
INFO: 'INFO',
DEBUG: 'DEBUG',
}
# Mask to convert integer thread ids to unsigned quantities for logging
# purposes
_THREAD_ID_MASK = 2 * _sys.maxsize + 1
_log_prefix = None # later set to google2_log_prefix
# Counter to keep track of number of log entries per token.
_log_counter_per_token = {}
@tf_export('logging.TaskLevelStatusMessage')
def TaskLevelStatusMessage(msg):
error(msg)
@tf_export('logging.flush')
def flush():
raise NotImplementedError()
# Code below is taken from pyglib/logging
@tf_export('logging.vlog')
def vlog(level, msg, *args, **kwargs):
_get_logger().log(level, msg, *args, **kwargs)
def _GetNextLogCountPerToken(token):
"""Wrapper for _log_counter_per_token.
Args:
token: The token for which to look up the count.
Returns:
The number of times this function has been called with
*token* as an argument (starting at 0)
"""
global _log_counter_per_token # pylint: disable=global-variable-not-assigned
_log_counter_per_token[token] = 1 + _log_counter_per_token.get(token, -1)
return _log_counter_per_token[token]
@tf_export('logging.log_every_n')
def log_every_n(level, msg, n, *args):
"""Log 'msg % args' at level 'level' once per 'n' times.
Logs the 1st call, (N+1)st call, (2N+1)st call, etc.
Not threadsafe.
Args:
level: The level at which to log.
msg: The message to be logged.
n: The number of times this should be called before it is logged.
*args: The args to be substituted into the msg.
"""
count = _GetNextLogCountPerToken(_GetFileAndLine())
log_if(level, msg, not (count % n), *args)
@tf_export('logging.log_first_n')
def log_first_n(level, msg, n, *args): # pylint: disable=g-bad-name
"""Log 'msg % args' at level 'level' only first 'n' times.
Not threadsafe.
Args:
level: The level at which to log.
msg: The message to be logged.
n: The number of times this should be called before it is logged.
*args: The args to be substituted into the msg.
"""
count = _GetNextLogCountPerToken(_GetFileAndLine())
log_if(level, msg, count < n, *args)
@tf_export('logging.log_if')
def log_if(level, msg, condition, *args):
"""Log 'msg % args' at level 'level' only if condition is fulfilled."""
if condition:
vlog(level, msg, *args)
def _GetFileAndLine():
"""Returns (filename, linenumber) for the stack frame."""
# Use sys._getframe(). This avoids creating a traceback object.
# pylint: disable=protected-access
f = _sys._getframe()
# pylint: enable=protected-access
our_file = f.f_code.co_filename
f = f.f_back
while f:
code = f.f_code
if code.co_filename != our_file:
return (code.co_filename, f.f_lineno)
f = f.f_back
return ('<unknown>', 0)
def google2_log_prefix(level, timestamp=None, file_and_line=None):
"""Assemble a logline prefix using the google2 format."""
# pylint: disable=global-variable-not-assigned
global _level_names
# pylint: enable=global-variable-not-assigned
# Record current time
now = timestamp or _time.time()
now_tuple = _time.localtime(now)
now_microsecond = int(1e6 * (now % 1.0))
(filename, line) = file_and_line or _GetFileAndLine()
basename = _os.path.basename(filename)
# Severity string
severity = 'I'
if level in _level_names:
severity = _level_names[level][0]
s = '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] ' % (
severity,
now_tuple[1], # month
now_tuple[2], # day
now_tuple[3], # hour
now_tuple[4], # min
now_tuple[5], # sec
now_microsecond,
_get_thread_id(),
basename,
line)
return s
@tf_export('logging.get_verbosity')
def get_verbosity():
"""Return how much logging output will be produced."""
return _get_logger().getEffectiveLevel()
@tf_export('logging.set_verbosity')
def set_verbosity(v):
"""Sets the threshold for what messages will be logged."""
_get_logger().setLevel(v)
def _get_thread_id():
"""Get id of current thread, suitable for logging as an unsigned quantity."""
# pylint: disable=protected-access
thread_id = six.moves._thread.get_ident()
# pylint:enable=protected-access
return thread_id & _THREAD_ID_MASK
_log_prefix = google2_log_prefix
tf_export('logging.DEBUG').export_constant(__name__, 'DEBUG')
tf_export('logging.ERROR').export_constant(__name__, 'ERROR')
tf_export('logging.FATAL').export_constant(__name__, 'FATAL')
tf_export('logging.INFO').export_constant(__name__, 'INFO')
tf_export('logging.WARN').export_constant(__name__, 'WARN')
|
|
from flask import Flask, request, redirect, session
import lob
import twilio.twiml
from parse_rest.connection import register
from parse_rest.datatypes import Object
from parse_rest.query import QueryResourceDoesNotExist
import time
import re
from flask.ext.cors import CORS
app = Flask(__name__)
app.config.from_object(__name__)
CORS(app)
userClassName = "user"
User = Object.factory(userClassName)
citationClassName = "citations"
Citation = Object.factory(citationClassName)
violationClassName = "violations"
Violation = Object.factory(violationClassName)
lob.api_key = 'test_00f79ccdc57159f0a24923537e716623ebb'
register("vvMc0yrmqU1kbU2nOieYTQGV0QzzfVQg4kHhQWWL", "waZK2MtE4TMszpU0mYSbkB9VmgLdLxfYf8XCuN7D", master_key="YPyRj37OFlUjHmmpE8YY3pfbZs7FqnBngxX4tezk")
@app.route("/payment", methods=['POST'])
def handle_payment():
#TODO
pass
@app.route("/", methods=['POST'])
def hello():
"""Main method to handle incoming SMS."""
from_number = request.values.get('From')
body = request.values.get('Body')
print request.values
print from_number
print body
try:
user = User.Query.get(phone_number=from_number)
except QueryResourceDoesNotExist:
user = User(first_name=None, last_name=None, phone_number=None, birthdate=None)
body = from_number
if user and user.first_name and user.last_name and user.phone_number and user.birthdate:
resp = handle_inquery(user, body)
else:
resp = signup_new_user(user, body)
return str(resp)
def signup_new_user(user, body=None):
"""Registers new user."""
resp = twilio.twiml.Response()
if not user.phone_number:
user.phone_number = body
user.save()
message = "Welcome to Proactive Law! Tell us your first name to get started and to subscribe to court proceedings."
elif not user.first_name:
user.first_name = body
user.save()
message = "Hello %s, what's your last name?" % user.first_name
elif not user.last_name:
user.last_name = body
user.save()
message = "Welcome %s %s, we just need your birthdate (MM/DD/YYYY) to verify your identity" % (user.first_name, user.last_name)
elif not user.birthdate:
user.birthdate = body
user.save()
message = "%s, Thanks for signing up for Proactive Law! How can we help you? (You can type START for a list of options)" % user.first_name
else:
"Welcome %s %s, how can we help you today? (You can type START for a list of options)" % (user.first_name, user.last_name)
resp.sms(message)
print message
return resp
def handle_inquery(user, body=None):
"""Handles incoming requests."""
resp = twilio.twiml.Response()
if body == "START":
message = """Proactive Law is here to help you with your legal questions, 24/7.\n
Type CITATIONS to view outstanding citations.\n
VIOLATIONS to view outstanding violations.\n
WARRANTS to view outstanding warrants.\n
PAY to pay outstanding bills."""
elif body == "CITATIONS":
try:
citations = Citation.Query.filter(
first_name=user.first_name, last_name=user.last_name,
date_of_birth=user.birthdate).order_by("citation_number")
except QueryResourceDoesNotExist:
message = "Congratulations! You currently have no oustanding citations"
resp.sms(message)
return resp
citation_ids = []
for citation in citations:
citation_ids.append(citation.citation_number)
"""
try:
violations = Violation.Query.filter(
citation_number__in=citation_ids, status__nin=["CLOSED", "DISMISS WITHOUT COSTS"])
except QueryResourceDoesNotExist:
message = "Congratulations! You currently have no outstanding citations"
resp.sms(message)
return resp
outstanding_citations = []
for violation in violations:
if violation.citation_number not in outstanding_citations:
outstanding_citations.append(violation.citation_number)
"""
message = "You have %s outstanding citations:\n" % len(citations)
index = 1
for citation in citations:
#if citation.citation_number in outstanding_citations:
message = message + "%s) Citation number: %s with court proceeding date: %s at: %s, %s\n\n" % (
index, int(citation.citation_number), citation.court_date.split(" ")[0], citation.court_address, citation.court_location.title())
index = index + 1
message = message + "Reply with the citation number to view a specific citation or enter START to view the main menu\n"
# Match a citation
elif re.match('^^[0-9]{8,9}$', body):
pass
#TODO
elif body == "VIOLATIONS":
try:
citations = Citation.Query.filter(
first_name=user.first_name, last_name=user.last_name,
date_of_birth=user.birthdate).order_by("citation_number")
except QueryResourceDoesNotExist:
message = "Congratulations! You currently have no outstanding violations"
resp.sms(message)
return resp
citation_ids = []
for citation in citations:
citation_ids.append(citation.citation_number)
try:
violations = Violation.Query.filter(
citation_number__in=citation_ids, status__nin=["CLOSED", "DISMISS WITHOUT COSTS"]).order_by("violation_number")
except QueryResourceDoesNotExist:
message = "Congratulations! You currently have no outstanding violations"
resp.sms(message)
return resp
message = "You have %s outstanding violations:\n" % len(violations)
total_amount = 0
for i, violation in enumerate(violations):
message = message + "%s) Violation number: %s for: %s with fines: $%s" % (
i+1, violation.violation_number, violation.violation_description, violation.court_cost + violation.fine_amount)
if violation.warrant_status:
message = message + " and warrant: %s issued\n\n" % violation.warrant_number
else:
message = message + "\n\n"
total_amount = total_amount + violation.court_cost + violation.fine_amount
message = message + "Your total amount owning is: $%s\n" % total_amount
message = message + "Reply PAY violation number to pay a specific violation or enter START to view the main menu\n"
elif body == "WARRANTS":
try:
citations = Citation.Query.filter(
first_name=user.first_name, last_name=user.last_name,
date_of_birth=user.birthdate).order_by("-citation_number")
except QueryResourceDoesNotExist:
message = "Congratulations! You currently have no outstanding warrants"
resp.sms(message)
return resp
citation_ids = []
for citation in citations:
citation_ids.append(citation.citation_number)
try:
violations = Violation.Query.filter(
citation_number__in=citation_ids, status__nin=["CLOSED", "DISMISS WITHOUT COSTS"], warrant_status=True)
except QueryResourceDoesNotExist:
message = "Congratulations! You currently have no outstanding warrants"
resp.sms(message)
return resp
message = "You have %s outstanding warrants:\n" % len(violations)
for i, violation in enumerate(violations):
message = message + "%s) Warrant number: %s for: %s with violation number: %s and fines: $%s\n\n" % (
i+1, violation.warrant_number, violation.violation_description, violation.violation_number, violation.court_cost + violation.fine_amount)
message = message + "Reply PAY violation number to pay a specific violation or enter START to view the main menu\n"
elif body.startswith("PAY"):
if body == "PAY":
message = """Please reply PAY violation number to pay a specific violation.\n
To view your outstanding violations, reply VIOLATIONS."""
resp.sms(message)
return resp
violation_id = body.strip().split(" ")[1]
try:
violation = Violation.Query.get(violation_number=violation_id)
except QueryResourceDoesNotExist:
message = "Sorry, the violation number you entered was not found. Please try again or reply START to view the main menu."
resp.sms(message)
return resp
# message = "You are about to pay $%s for Violation number: %s for: %s\n" % (violation.court_cost + violation.fine_amount, violation.violation_number, violation.violation_description)
# message = message + """Which payment method would you liked to use?\n
# SMS %s to pay by phone.\n
# Reply CHECK and attach a picture of your check via MMS to pay by cheque\n
# MONEYORDER and attach a picture of your money order via MMS to pay by money order.\n""" % violation.violation_number
# elif body.startswith("SMS"):
# if body == "SMS":
# message = """Please reply SMS violation number to pay a specific violation.\n
# To view your outstanding violations, reply VIOLATIONS."""
# resp.sms(message)
# return resp
# violation_id = body.strip().split(" ")[1]
# try:
# violation = Violation.Query.get(violation_number=violation_id)
# except QueryResourceDoesNotExist:
# message = "Sorry, the violation number you entered was not found. Please try again or reply START to view the main menu."
# resp.sms(message)
# return resp
try:
citation = Citation.Query.get(citation_number=violation.citation_number)
except QueryResourceDoesNotExist:
message = "Sorry, the violation number you entered was not found. Please try again or reply START to view the main menu."
resp.sms(message)
return resp
cheque = lob.Check.create(
description = violation.violation_number,
to_address = {
'name': citation.court_location + ' MUNICIPALITY COURT',
'address_line1': citation.court_address,
'address_city': citation.court_location.title(),
'address_state': 'MO',
'address_zip': '63301',
'address_country': 'US'
},
bank_account = 'bank_ad79e048fe93310',
amount = violation.court_cost + violation.fine_amount,
memo = ("%s %s %s" % (user.first_name, user.last_name, violation.violation_number))[0:39],
logo = 'https://s3-us-west-2.amazonaws.com/lob-assets/lob_check_logo.png',
file = '<h2 style=\'padding-top:4in;\'>Check mailed on your behalf to {{court}} for violation {{violation}}</h2>',
data = {
'court': citation.court_location + ' MUNICIPALITY COURT',
'violation': violation.violation_number
}
)
print cheque
time.sleep(3)
#message = "Please text PAYCOURT to shortcode 77345 and we will reply with a confirmation once your payment is processed"
message = twilio.twiml.Message("Thanks for paying your violation! Here is the cheque that we mailed out on your behalf.\n")
message.media(cheque.thumbnails[0].large)
resp.append(message)
#resp.media(cheque.thumbnails[0].large)
#with resp.message() as message:
#message.body = "Thanks for paying your violation! Here is the cheque that we will mail out on your behalf\n"
#message.media = cheque.thumbnails[0].large
print message
return resp
else:
message = "Sorry, we did not understand your command, please enter START to view the main menu.\n"
resp.sms(message)
print message
return resp
def run():
app.run(debug=True)
if __name__ == "__main__":
app.run(debug=True)
|
|
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
- Use sentry for error logging
- Use opbeat for error reporting
'''
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
import logging
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
# raven sentry client
# See https://docs.getsentry.com/hosted/clients/python/integrations/django/
INSTALLED_APPS += ('raven.contrib.django.raven_compat', )
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
RAVEN_MIDDLEWARE = ('raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',
'raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware',)
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + \
RAVEN_MIDDLEWARE + MIDDLEWARE_CLASSES
# opbeat integration
# See https://opbeat.com/languages/django/
INSTALLED_APPS += ('opbeat.contrib.django',)
OPBEAT = {
'ORGANIZATION_ID': env('DJANGO_OPBEAT_ORGANIZATION_ID'),
'APP_ID': env('DJANGO_OPBEAT_APP_ID'),
'SECRET_TOKEN': env('DJANGO_OPBEAT_SECRET_TOKEN')
}
MIDDLEWARE_CLASSES = (
'opbeat.contrib.django.middleware.OpbeatAPMMiddleware',
) + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['venkat.com'])
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE
STATIC_URL = MEDIA_URL
# See: https://github.com/antonagestam/collectfast
# For Django 1.7+, 'collectfast' should come before
# 'django.contrib.staticfiles'
AWS_PRELOAD_METADATA = True
INSTALLED_APPS = ('collectfast', ) + INSTALLED_APPS
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='icecream_project <[email protected]>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[icecream_project] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
NEW_RELIC_LICENSE_KEY = env('NEW_RELIC_LICENSE_KEY')
NEW_RELIC_APP_NAME = 'icecream_project'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{0}/{1}".format(env.cache_url('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Recipe module to ensure a checkout is consistent on a bot."""
from recipe_engine import recipe_api
from PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2
class BotUpdateApi(recipe_api.RecipeApi):
def __init__(self, properties, deps_revision_overrides, fail_patch, *args,
**kwargs):
self._deps_revision_overrides = deps_revision_overrides
self._fail_patch = fail_patch
self._last_returned_properties = {}
super(BotUpdateApi, self).__init__(*args, **kwargs)
def __call__(self, name, cmd, **kwargs):
"""Wrapper for easy calling of bot_update."""
assert isinstance(cmd, (list, tuple))
bot_update_path = self.resource('bot_update.py')
kwargs.setdefault('infra_step', True)
# If a Git HTTP request is constantly below GIT_HTTP_LOW_SPEED_LIMIT
# bytes/second for GIT_HTTP_LOW_SPEED_TIME seconds then such request will be
# aborted. Otherwise, it would wait for global timeout to be reached.
env = {
'GIT_HTTP_LOW_SPEED_LIMIT': '102400', # in bytes
'GIT_HTTP_LOW_SPEED_TIME': 1800, # in seconds
}
if self.m.buildbucket.build.id == 0:
env['DEPOT_TOOLS_COLLECT_METRICS'] = '0'
else:
env['DEPOT_TOOLS_REPORT_BUILD'] = '%s/%s/%s/%s' % (
self.m.buildbucket.build.builder.project,
self.m.buildbucket.build.builder.bucket,
self.m.buildbucket.build.builder.builder,
self.m.buildbucket.build.id)
with self.m.context(env=env):
with self.m.depot_tools.on_path():
return self.m.step(name,
['python3', '-u', bot_update_path] + cmd,
**kwargs)
@property
def last_returned_properties(self):
return self._last_returned_properties
def _get_commit_repo_path(self, commit, gclient_config):
"""Returns local path to the repo that the commit is associated with.
The commit must be a self.m.buildbucket.common_pb2.GitilesCommit.
If commit does not specify any repo, returns name of the first solution.
Raises an InfraFailure if the commit specifies a repo unexpected by gclient.
"""
assert gclient_config.solutions, 'gclient_config.solutions is empty'
# if repo is not specified, choose the first solution.
if not (commit.host and commit.project):
return gclient_config.solutions[0].name
assert commit.host and commit.project
repo_url = self.m.gitiles.unparse_repo_url(commit.host, commit.project)
repo_path = self.m.gclient.get_repo_path(
repo_url, gclient_config=gclient_config)
if not repo_path:
raise self.m.step.InfraFailure(
'invalid (host, project) pair in '
'buildbucket.build.input.gitiles_commit: '
'(%s, %s) does not match any of configured gclient solutions '
'and not present in gclient_config.repo_path_map' % (
commit.host, commit.project))
return repo_path
def ensure_checkout(self,
gclient_config=None,
suffix=None,
patch=True,
update_presentation=True,
patch_root=None,
with_branch_heads=False,
with_tags=False,
no_fetch_tags=False,
refs=None,
clobber=False,
root_solution_revision=None,
gerrit_no_reset=False,
gerrit_no_rebase_patch_ref=False,
assert_one_gerrit_change=True,
patch_refs=None,
ignore_input_commit=False,
add_blamelists=False,
set_output_commit=False,
step_test_data=None,
enforce_fetch=False,
**kwargs):
"""
Args:
* gclient_config: The gclient configuration to use when running bot_update.
If omitted, the current gclient configuration is used.
* no_fetch_tags: When true, the root git repo being checked out will not
fetch any tags referenced from the references being fetched. When a repo
has many references, it can become a performance bottleneck, so avoid
tags if the checkout will not need them present.
* ignore_input_commit: if True, ignore api.buildbucket.gitiles_commit.
Exists for historical reasons. Please do not use.
* add_blamelists: if True, add blamelist pins for all of the repos that had
revisions specified in the gclient config.
* set_output_commit: if True, mark the checked out commit as the
primary output commit of this build, i.e. call
api.buildbucket.set_output_gitiles_commit.
In case of multiple repos, the repo is the one specified in
api.buildbucket.gitiles_commit or the first configured solution.
When sorting builds by commit position, this commit will be used.
Requires falsy ignore_input_commit.
* step_test_data: a null function that returns test bot_update.py output.
Use test_api.output_json to generate test data.
* enforce_fetch: Enforce a new fetch to refresh the git cache, even if the
solution revision passed in already exists in the current git cache.
* assert_one_gerrit_change: if True, assert that there is at most one
change in self.m.buildbucket.build.input.gerrit_changes, because
bot_update module ONLY supports one change. Users may specify a change
via tryserver.set_change() and explicitly set this flag False.
"""
assert not (ignore_input_commit and set_output_commit)
if assert_one_gerrit_change:
assert len(self.m.buildbucket.build.input.gerrit_changes) <= 1, (
'bot_update does not support more than one '
'buildbucket.build.input.gerrit_changes')
refs = refs or []
# We can re-use the gclient spec from the gclient module, since all the
# data bot_update needs is already configured into the gclient spec.
cfg = gclient_config or self.m.gclient.c
assert cfg is not None, (
'missing gclient_config or forgot api.gclient.set_config(...) before?')
# Construct our bot_update command. This basically be inclusive of
# everything required for bot_update to know:
patch_root = patch_root or self.m.gclient.get_gerrit_patch_root(
gclient_config=cfg)
# Allow patched project's revision if necessary.
# This is important for projects which are checked out as DEPS of the
# gclient solution.
self.m.gclient.set_patch_repo_revision(cfg)
reverse_rev_map = self.m.gclient.got_revision_reverse_mapping(cfg)
flags = [
# What do we want to check out (spec/root/rev/reverse_rev_map).
['--spec-path', self.m.raw_io.input(
self.m.gclient.config_to_pythonish(cfg))],
['--patch_root', patch_root],
['--revision_mapping_file', self.m.json.input(reverse_rev_map)],
['--git-cache-dir', cfg.cache_dir],
['--cleanup-dir', self.m.path['cleanup'].join('bot_update')],
# Hookups to JSON output back into recipes.
['--output_json', self.m.json.output()],
]
# How to find the patch, if any
if patch:
repo_url = self.m.tryserver.gerrit_change_repo_url
fetch_ref = self.m.tryserver.gerrit_change_fetch_ref
target_ref = self.m.tryserver.gerrit_change_target_ref
if repo_url and fetch_ref:
flags.append([
'--patch_ref',
'%s@%s:%s' % (repo_url, target_ref, fetch_ref),
])
if patch_refs:
flags.extend(
['--patch_ref', patch_ref]
for patch_ref in patch_refs)
# Compute requested revisions.
revisions = {}
for solution in cfg.solutions:
if solution.revision:
revisions[solution.name] = solution.revision
# HACK: ensure_checkout API must be redesigned so that we don't pass such
# parameters. Existing semantics is too opiniated.
in_commit = self.m.buildbucket.gitiles_commit
in_commit_rev = in_commit.id or in_commit.ref
if not ignore_input_commit and in_commit_rev:
# Note: this is not entirely correct. build.input.gitiles_commit
# definition says "The Gitiles commit to run against.".
# However, here we ignore it if the config specified a revision.
# This is necessary because existing builders rely on this behavior,
# e.g. they want to force refs/heads/main at the config level.
in_commit_repo_path = self._get_commit_repo_path(in_commit, cfg)
# The repo_path that comes back on Windows will have backslashes, which
# won't match the paths that the gclient configs and bot_update script use
in_commit_repo_path = in_commit_repo_path.replace(self.m.path.sep, '/')
revisions[in_commit_repo_path] = (
revisions.get(in_commit_repo_path) or in_commit_rev)
parsed_solution_urls = set(
self.m.gitiles.parse_repo_url(s.url) for s in cfg.solutions)
if (in_commit.id and in_commit.ref
and (in_commit.host, in_commit.project) in parsed_solution_urls):
refs = [in_commit.ref] + refs
# Guarantee that first solution has a revision.
# TODO(machenbach): We should explicitly pass HEAD for ALL solutions
# that don't specify anything else.
first_sol = cfg.solutions[0].name
revisions[first_sol] = revisions.get(first_sol) or 'HEAD'
if cfg.revisions:
# Only update with non-empty values. Some recipe might otherwise
# overwrite the HEAD default with an empty string.
revisions.update(
(k, v) for k, v in cfg.revisions.items() if v)
if cfg.solutions and root_solution_revision:
revisions[first_sol] = root_solution_revision
# Allow for overrides required to bisect into rolls.
revisions.update(self._deps_revision_overrides)
# Compute command-line parameters for requested revisions.
# Also collect all fixed revisions to simulate them in the json output.
# Fixed revision are the explicit input revisions of bot_update.py, i.e.
# every command line parameter "--revision name@value".
fixed_revisions = {}
for name, revision in sorted(revisions.items()):
fixed_revision = self.m.gclient.resolve_revision(revision)
if fixed_revision:
fixed_revisions[name] = fixed_revision
if fixed_revision.upper() == 'HEAD' and patch:
# Sync to correct destination ref
fixed_revision = self._destination_ref(cfg, name)
# If we're syncing to a ref, we want to make sure it exists before
# trying to check it out.
if (fixed_revision.startswith('refs/') and
# TODO(crbug.com/874501): fetching additional refs is currently
# only supported for the root solution. We should investigate
# supporting it for other dependencies.
cfg.solutions and
cfg.solutions[0].name == name):
# Handle the "ref:revision" syntax, e.g.
# refs/branch-heads/4.2:deadbeef
refs.append(fixed_revision.split(':')[0])
flags.append(['--revision', '%s@%s' % (name, fixed_revision)])
for ref in refs:
assert not ref.startswith('refs/remotes/'), (
'The "refs/remotes/*" syntax is not supported.\n'
'The "remotes" syntax is dependent on the way the local repo is '
'configured, and while there are defaults that can often be '
'assumed, there is no guarantee the mapping will always be done in '
'a particular way.')
# Add extra fetch refspecs.
for ref in refs:
flags.append(['--refs', ref])
# Filter out flags that are None.
cmd = [item for flag_set in flags
for item in flag_set if flag_set[1] is not None]
if clobber:
cmd.append('--clobber')
if with_branch_heads or cfg.with_branch_heads:
cmd.append('--with_branch_heads')
if with_tags or cfg.with_tags:
cmd.append('--with_tags')
if gerrit_no_reset:
cmd.append('--gerrit_no_reset')
if enforce_fetch:
cmd.append('--enforce_fetch')
if no_fetch_tags:
cmd.append('--no_fetch_tags')
if gerrit_no_rebase_patch_ref:
cmd.append('--gerrit_no_rebase_patch_ref')
if self.m.properties.get('bot_update_experiments'):
cmd.append('--experiments=%s' %
','.join(self.m.properties['bot_update_experiments']))
# Inject Json output for testing.
first_sln = cfg.solutions[0].name
step_test_data = step_test_data or (lambda: self.test_api.output_json(
patch_root, first_sln, reverse_rev_map, self._fail_patch,
fixed_revisions=fixed_revisions))
name = 'bot_update'
if not patch:
name += ' (without patch)'
if suffix:
name += ' - %s' % suffix
# Ah hah! Now that everything is in place, lets run bot_update!
step_result = None
try:
# Error code 88 is the 'patch failure' code for patch apply failure.
step_result = self(name, cmd, step_test_data=step_test_data,
ok_ret=(0, 88), **kwargs)
except self.m.step.StepFailure as f:
step_result = f.result
raise
finally:
if step_result and step_result.json.output:
result = step_result.json.output
self._last_returned_properties = result.get('properties', {})
if update_presentation:
# Set properties such as got_revision.
for prop_name, prop_value in (
self.last_returned_properties.items()):
step_result.presentation.properties[prop_name] = prop_value
# Add helpful step description in the step UI.
if 'step_text' in result:
step_text = result['step_text']
step_result.presentation.step_text = step_text
if result.get('patch_failure'):
patch_body = result.get('failed_patch_body')
if patch_body:
step_result.presentation.logs['patch error'] = (
patch_body.splitlines())
if result.get('patch_apply_return_code') == 3:
# This is download failure, hence an infra failure.
raise self.m.step.InfraFailure(
'Patch failure: Git reported a download failure')
else:
# Mark it as failure so we provide useful logs
# https://crbug.com/1207685
step_result.presentation.status = 'FAILURE'
# This is actual patch failure.
self.m.tryserver.set_patch_failure_tryjob_result()
self.m.cq.set_do_not_retry_build()
raise self.m.step.StepFailure(
'Patch failure: See patch error log attached to bot_update. '
'Try rebasing?')
if add_blamelists and 'manifest' in result:
blamelist_pins = []
for name in sorted(revisions):
m = result['manifest'][name]
pin = {'id': m['revision']}
pin['host'], pin['project'] = (
self.m.gitiles.parse_repo_url(m['repository']))
blamelist_pins.append(pin)
result.blamelist_pins = blamelist_pins
self.m.milo.show_blamelist_for(blamelist_pins)
# Set output commit of the build.
if (set_output_commit and
'got_revision' in self._last_returned_properties):
# As of April 2019, got_revision describes the output commit,
# the same commit that Build.output.gitiles_commit describes.
# In particular, users tend to set got_revision to make Milo display
# it. Derive output commit from got_revision.
out_commit = common_pb2.GitilesCommit(
id=self._last_returned_properties['got_revision'],
)
out_solution = reverse_rev_map['got_revision']
out_manifest = result['manifest'][out_solution]
assert out_manifest['revision'] == out_commit.id, (
out_manifest, out_commit.id)
out_commit.host, out_commit.project = (
self.m.gitiles.parse_repo_url(out_manifest['repository'])
)
# Determine the output ref.
got_revision_cp = self._last_returned_properties.get('got_revision_cp')
in_rev = self.m.gclient.resolve_revision(revisions.get(out_solution))
if not in_rev:
in_rev = 'HEAD'
if got_revision_cp:
# If commit position string is available, read the ref from there.
out_commit.ref, out_commit.position = (
self.m.commit_position.parse(got_revision_cp))
elif in_rev.startswith('refs/'):
# If we were asked to check out a specific ref, use it as output
# ref.
out_commit.ref = in_rev
elif in_rev == 'HEAD':
# bot_update.py interprets HEAD as refs/heads/main
out_commit.ref = 'refs/heads/main'
elif out_commit.id == in_commit.id and in_commit.ref:
# Derive output ref from the input ref.
out_commit.ref = in_commit.ref
else: # pragma: no cover
assert False, (
'Unsupported case. '
'Call buildbucket.set_output_gitiles_commit directly.'
)
self.m.buildbucket.set_output_gitiles_commit(out_commit)
# Set the "checkout" path for the main solution.
# This is used by the Chromium module to figure out where to look for
# the checkout.
# bot_update actually just sets root to be the folder name of the
# first solution.
if (result.get('did_run')
and 'checkout' not in self.m.path
and 'root' in result):
co_root = result['root']
cwd = self.m.context.cwd or self.m.path['start_dir']
self.m.path['checkout'] = cwd.join(*co_root.split(self.m.path.sep))
return step_result
def _destination_ref(self, cfg, path):
"""Returns the ref branch of a CL for the matching project if available or
HEAD otherwise.
If there's no Gerrit CL associated with the run, returns 'HEAD'.
Otherwise this queries Gerrit for the correct destination ref, which
might differ from refs/heads/main.
Args:
* cfg: The used gclient config.
* path: The DEPS path of the project this prefix is for. E.g. 'src' or
'src/v8'. The query will only be made for the project that matches
the CL's project.
Returns:
A destination ref as understood by bot_update.py if available
and if different from refs/heads/main, returns 'HEAD' otherwise.
"""
# Ignore project paths other than the one belonging to the current CL.
patch_path = self.m.gclient.get_gerrit_patch_root(gclient_config=cfg)
if patch_path:
patch_path = patch_path.replace(self.m.path.sep, '/')
if not patch_path or path != patch_path:
return 'HEAD'
return self.m.tryserver.gerrit_change_target_ref
def resolve_fixed_revision(self, bot_update_json, name):
"""Sets a fixed revision for a single dependency using project revision
properties.
"""
rev_properties = self.get_project_revision_properties(name)
self.m.gclient.c.revisions = {
name: bot_update_json['properties'][rev_properties[0]]
}
def _resolve_fixed_revisions(self, bot_update_json):
"""Sets all fixed revisions from the first sync to their respective
got_X_revision values.
If on the first sync, a revision was requested to be HEAD, this avoids
using HEAD potentially resolving to a different revision on the second
sync. Instead, we sync explicitly to whatever was checked out the first
time.
Example (chromium trybot used with v8 patch):
First sync was called with
bot_update.py --revision src@abc --revision src/v8@HEAD
Fixed revisions are: src, src/v8
Got_revision_mapping: src->got_revision, src/v8->got_v8_revision
got_revision = abc, got_v8_revision = deadbeef
Second sync will be called with
bot_update.py --revision src@abc --revision src/v8@deadbeef
Example (chromium trybot used with chromium DEPS change, changing v8 from
"v8_before" to "v8_after"):
First sync was called with
bot_update.py --revision src@abc
Fixed revisions are: src
Got_revision_mapping: src->got_revision, src/v8->got_v8_revision
got_revision = abc, got_v8_revision = v8_after
Second sync will be called with
bot_update.py --revision src@abc
When deapplying the patch, v8 will be synced to v8_before.
"""
for name in bot_update_json.get('fixed_revisions', {}):
rev_properties = self.get_project_revision_properties(name)
if (rev_properties and
bot_update_json['properties'].get(rev_properties[0])):
self.m.gclient.c.revisions[name] = str(
bot_update_json['properties'][rev_properties[0]])
# TODO(machenbach): Replace usages of this method eventually by direct calls
# to the manifest output.
def get_project_revision_properties(self, project_name, gclient_config=None):
"""Returns all property names used for storing the checked-out revision of
a given project.
Args:
* project_name (str): The name of a checked-out project as deps path, e.g.
src or src/v8.
* gclient_config: The gclient configuration to use. If omitted, the current
gclient configuration is used.
Returns (list of str): All properties that'll hold the checked-out revision
of the given project. An empty list if no such properties exist.
"""
cfg = gclient_config or self.m.gclient.c
# Sort for determinism. We might have several properties for the same
# project, e.g. got_revision and got_webrtc_revision.
rev_reverse_map = self.m.gclient.got_revision_reverse_mapping(cfg)
return sorted(
prop
for prop, project in rev_reverse_map.items()
if project == project_name
)
def deapply_patch(self, bot_update_step):
"""Deapplies a patch, taking care of DEPS and solution revisions properly.
"""
bot_update_json = bot_update_step.json.output
# We only override first solution here to make sure that we correctly revert
# changes to DEPS file, which is particularly important for auto-rolls. It
# is also imporant that we do not assume that corresponding revision is
# stored in the 'got_revision' as some gclient configs change the default
# mapping for their own purposes.
first_solution_name = self.m.gclient.c.solutions[0].name
rev_property = self.get_project_revision_properties(first_solution_name)[0]
self.m.gclient.c.revisions[first_solution_name] = str(
bot_update_json['properties'][rev_property])
self._resolve_fixed_revisions(bot_update_json)
self.ensure_checkout(
patch=False, no_fetch_tags=True, update_presentation=False)
|
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/textlabels.py
__version__=''' $Id$ '''
import string
from reportlab.lib import colors
from reportlab.lib.utils import simpleSplit, _simpleSplit
from reportlab.lib.validators import isNumber, isNumberOrNone, OneOf, isColorOrNone, isString, \
isTextAnchor, isBoxAnchor, isBoolean, NoneOr, isInstanceOf, isNoneOrString, isNoneOrCallable
from reportlab.lib.attrmap import *
from reportlab.pdfbase.pdfmetrics import stringWidth, getAscentDescent
from reportlab.graphics.shapes import Drawing, Group, Circle, Rect, String, STATE_DEFAULTS
from reportlab.graphics.shapes import _PATH_OP_ARG_COUNT, _PATH_OP_NAMES, definePath
from reportlab.graphics.widgetbase import Widget, PropHolder
from reportlab.graphics.shapes import _baseGFontName
_gs = None
_A2BA= {
'x': {0:'n', 45:'ne', 90:'e', 135:'se', 180:'s', 225:'sw', 270:'w', 315: 'nw', -45: 'nw'},
'y': {0:'e', 45:'se', 90:'s', 135:'sw', 180:'w', 225:'nw', 270:'n', 315: 'ne', -45: 'ne'},
}
def _pathNumTrunc(n):
if int(n)==n: return int(n)
return round(n,5)
def _processGlyph(G, truncate=1, pathReverse=0):
O = []
P = []
R = []
for g in G+(('end',),):
op = g[0]
if O and op in ['moveTo', 'moveToClosed','end']:
if O[0]=='moveToClosed':
O = O[1:]
if pathReverse:
for i in range(0,len(P),2):
P[i+1], P[i] = P[i:i+2]
P.reverse()
O.reverse()
O.insert(0,'moveTo')
O.append('closePath')
i = 0
if truncate: P = list(map(_pathNumTrunc,P))
for o in O:
j = i + _PATH_OP_ARG_COUNT[_PATH_OP_NAMES.index(o)]
if o=='closePath':
R.append(o)
else:
R.append((o,)+ tuple(P[i:j]))
i = j
O = []
P = []
O.append(op)
P.extend(g[1:])
return R
def _text2PathDescription(text, x=0, y=0, fontName=_baseGFontName, fontSize=1000,
anchor='start', truncate=1, pathReverse=0):
global _gs
if not _gs:
import _renderPM
_gs = _renderPM.gstate(1,1)
from reportlab.graphics import renderPM
renderPM._setFont(_gs,fontName,fontSize)
P = []
if not anchor=='start':
textLen = stringWidth(text, fontName,fontSize)
if anchor=='end':
x = x-textLen
elif anchor=='middle':
x = x - textLen/2.
for g in _gs._stringPath(text,x,y):
P.extend(_processGlyph(g,truncate=truncate,pathReverse=pathReverse))
return P
def _text2Path(text, x=0, y=0, fontName=_baseGFontName, fontSize=1000,
anchor='start', truncate=1, pathReverse=0):
return definePath(_text2PathDescription(text,x=x,y=y,fontName=fontName,
fontSize=fontSize,anchor=anchor,truncate=truncate,pathReverse=pathReverse))
_BA2TA={'w':'start','nw':'start','sw':'start','e':'end', 'ne': 'end', 'se':'end', 'n':'middle','s':'middle','c':'middle'}
class Label(Widget):
"""A text label to attach to something else, such as a chart axis.
This allows you to specify an offset, angle and many anchor
properties relative to the label's origin. It allows, for example,
angled multiline axis labels.
"""
# fairly straight port of Robin Becker's textbox.py to new widgets
# framework.
_attrMap = AttrMap(
x = AttrMapValue(isNumber,desc=''),
y = AttrMapValue(isNumber,desc=''),
dx = AttrMapValue(isNumber,desc='delta x - offset'),
dy = AttrMapValue(isNumber,desc='delta y - offset'),
angle = AttrMapValue(isNumber,desc='angle of label: default (0), 90 is vertical, 180 is upside down, etc'),
boxAnchor = AttrMapValue(isBoxAnchor,desc='anchoring point of the label'),
boxStrokeColor = AttrMapValue(isColorOrNone,desc='border color of the box'),
boxStrokeWidth = AttrMapValue(isNumber,desc='border width'),
boxFillColor = AttrMapValue(isColorOrNone,desc='the filling color of the box'),
boxTarget = AttrMapValue(OneOf('normal','anti','lo','hi'),desc="one of ('normal','anti','lo','hi')"),
fillColor = AttrMapValue(isColorOrNone,desc='label text color'),
strokeColor = AttrMapValue(isColorOrNone,desc='label text border color'),
strokeWidth = AttrMapValue(isNumber,desc='label text border width'),
text = AttrMapValue(isString,desc='the actual text to display'),
fontName = AttrMapValue(isString,desc='the name of the font used'),
fontSize = AttrMapValue(isNumber,desc='the size of the font'),
leading = AttrMapValue(isNumberOrNone,desc=''),
width = AttrMapValue(isNumberOrNone,desc='the width of the label'),
maxWidth = AttrMapValue(isNumberOrNone,desc='maximum width the label can grow to'),
height = AttrMapValue(isNumberOrNone,desc='the height of the text'),
textAnchor = AttrMapValue(isTextAnchor,desc='the anchoring point of the text inside the label'),
visible = AttrMapValue(isBoolean,desc="True if the label is to be drawn"),
topPadding = AttrMapValue(isNumber,desc='padding at top of box'),
leftPadding = AttrMapValue(isNumber,desc='padding at left of box'),
rightPadding = AttrMapValue(isNumber,desc='padding at right of box'),
bottomPadding = AttrMapValue(isNumber,desc='padding at bottom of box'),
useAscentDescent = AttrMapValue(isBoolean,desc="If True then the font's Ascent & Descent will be used to compute default heights and baseline."),
customDrawChanger = AttrMapValue(isNoneOrCallable,desc="An instance of CustomDrawChanger to modify the behavior at draw time", _advancedUsage=1),
)
def __init__(self,**kw):
self._setKeywords(**kw)
self._setKeywords(
_text = 'Multi-Line\nString',
boxAnchor = 'c',
angle = 0,
x = 0,
y = 0,
dx = 0,
dy = 0,
topPadding = 0,
leftPadding = 0,
rightPadding = 0,
bottomPadding = 0,
boxStrokeWidth = 0.5,
boxStrokeColor = None,
boxTarget = 'normal',
strokeColor = None,
boxFillColor = None,
leading = None,
width = None,
maxWidth = None,
height = None,
fillColor = STATE_DEFAULTS['fillColor'],
fontName = STATE_DEFAULTS['fontName'],
fontSize = STATE_DEFAULTS['fontSize'],
strokeWidth = 0.1,
textAnchor = 'start',
visible = 1,
useAscentDescent = False,
)
def setText(self, text):
"""Set the text property. May contain embedded newline characters.
Called by the containing chart or axis."""
self._text = text
def setOrigin(self, x, y):
"""Set the origin. This would be the tick mark or bar top relative to
which it is defined. Called by the containing chart or axis."""
self.x = x
self.y = y
def demo(self):
"""This shows a label positioned with its top right corner
at the top centre of the drawing, and rotated 45 degrees."""
d = Drawing(200, 100)
# mark the origin of the label
d.add(Circle(100,90, 5, fillColor=colors.green))
lab = Label()
lab.setOrigin(100,90)
lab.boxAnchor = 'ne'
lab.angle = 45
lab.dx = 0
lab.dy = -20
lab.boxStrokeColor = colors.green
lab.setText('Another\nMulti-Line\nString')
d.add(lab)
return d
def _getBoxAnchor(self):
'''hook for allowing special box anchor effects'''
ba = self.boxAnchor
if ba in ('autox', 'autoy'):
angle = self.angle
na = (int((angle%360)/45.)*45)%360
if not (na % 90): # we have a right angle case
da = (angle - na) % 360
if abs(da)>5:
na = na + (da>0 and 45 or -45)
ba = _A2BA[ba[-1]][na]
return ba
def computeSize(self):
# the thing will draw in its own coordinate system
self._lineWidths = []
topPadding = self.topPadding
leftPadding = self.leftPadding
rightPadding = self.rightPadding
bottomPadding = self.bottomPadding
self._lines = simpleSplit(self._text,self.fontName,self.fontSize,self.maxWidth)
if not self.width:
self._width = leftPadding+rightPadding
if self._lines:
self._lineWidths = [stringWidth(line,self.fontName,self.fontSize) for line in self._lines]
self._width += max(self._lineWidths)
else:
self._width = self.width
if self.useAscentDescent:
self._ascent, self._descent = getAscentDescent(self.fontName,self.fontSize)
self._baselineRatio = self._ascent/(self._ascent-self._descent)
else:
self._baselineRatio = 1/1.2
if self.leading:
self._leading = self.leading
elif self.useAscentDescent:
self._leading = self._ascent - self._descent
else:
self._leading = self.fontSize*1.2
self._height = self.height or (self._leading*len(self._lines) + topPadding + bottomPadding)
self._ewidth = (self._width-leftPadding-rightPadding)
self._eheight = (self._height-topPadding-bottomPadding)
boxAnchor = self._getBoxAnchor()
if boxAnchor in ['n','ne','nw']:
self._top = -topPadding
elif boxAnchor in ['s','sw','se']:
self._top = self._height-topPadding
else:
self._top = 0.5*self._eheight
self._bottom = self._top - self._eheight
if boxAnchor in ['ne','e','se']:
self._left = leftPadding - self._width
elif boxAnchor in ['nw','w','sw']:
self._left = leftPadding
else:
self._left = -self._ewidth*0.5
self._right = self._left+self._ewidth
def _getTextAnchor(self):
'''This can be overridden to allow special effects'''
ta = self.textAnchor
if ta=='boxauto': ta = _BA2TA[self._getBoxAnchor()]
return ta
def _rawDraw(self):
_text = self._text
self._text = _text or ''
self.computeSize()
self._text = _text
g = Group()
g.translate(self.x + self.dx, self.y + self.dy)
g.rotate(self.angle)
y = self._top - self._leading*self._baselineRatio
textAnchor = self._getTextAnchor()
if textAnchor == 'start':
x = self._left
elif textAnchor == 'middle':
x = self._left + self._ewidth*0.5
else:
x = self._right
# paint box behind text just in case they
# fill it
if self.boxFillColor or (self.boxStrokeColor and self.boxStrokeWidth):
g.add(Rect( self._left-self.leftPadding,
self._bottom-self.bottomPadding,
self._width,
self._height,
strokeColor=self.boxStrokeColor,
strokeWidth=self.boxStrokeWidth,
fillColor=self.boxFillColor)
)
fillColor, fontName, fontSize = self.fillColor, self.fontName, self.fontSize
strokeColor, strokeWidth, leading = self.strokeColor, self.strokeWidth, self._leading
svgAttrs=getattr(self,'_svgAttrs',{})
if strokeColor:
for line in self._lines:
s = _text2Path(line, x, y, fontName, fontSize, textAnchor)
s.fillColor = fillColor
s.strokeColor = strokeColor
s.strokeWidth = strokeWidth
g.add(s)
y -= leading
else:
for line in self._lines:
s = String(x, y, line, _svgAttrs=svgAttrs)
s.textAnchor = textAnchor
s.fontName = fontName
s.fontSize = fontSize
s.fillColor = fillColor
g.add(s)
y -= leading
return g
def draw(self):
customDrawChanger = getattr(self,'customDrawChanger',None)
if customDrawChanger:
customDrawChanger(True,self)
try:
return self._rawDraw()
finally:
customDrawChanger(False,self)
else:
return self._rawDraw()
class LabelDecorator:
_attrMap = AttrMap(
x = AttrMapValue(isNumberOrNone,desc=''),
y = AttrMapValue(isNumberOrNone,desc=''),
dx = AttrMapValue(isNumberOrNone,desc=''),
dy = AttrMapValue(isNumberOrNone,desc=''),
angle = AttrMapValue(isNumberOrNone,desc=''),
boxAnchor = AttrMapValue(isBoxAnchor,desc=''),
boxStrokeColor = AttrMapValue(isColorOrNone,desc=''),
boxStrokeWidth = AttrMapValue(isNumberOrNone,desc=''),
boxFillColor = AttrMapValue(isColorOrNone,desc=''),
fillColor = AttrMapValue(isColorOrNone,desc=''),
strokeColor = AttrMapValue(isColorOrNone,desc=''),
strokeWidth = AttrMapValue(isNumberOrNone),desc='',
fontName = AttrMapValue(isNoneOrString,desc=''),
fontSize = AttrMapValue(isNumberOrNone,desc=''),
leading = AttrMapValue(isNumberOrNone,desc=''),
width = AttrMapValue(isNumberOrNone,desc=''),
maxWidth = AttrMapValue(isNumberOrNone,desc=''),
height = AttrMapValue(isNumberOrNone,desc=''),
textAnchor = AttrMapValue(isTextAnchor,desc=''),
visible = AttrMapValue(isBoolean,desc="True if the label is to be drawn"),
)
def __init__(self):
self.textAnchor = 'start'
self.boxAnchor = 'w'
for a in self._attrMap.keys():
if not hasattr(self,a): setattr(self,a,None)
def decorate(self,l,L):
chart,g,rowNo,colNo,x,y,width,height,x00,y00,x0,y0 = l._callOutInfo
L.setText(chart.categoryAxis.categoryNames[colNo])
g.add(L)
def __call__(self,l):
from copy import deepcopy
L = Label()
for a,v in self.__dict__.items():
if v is None: v = getattr(l,a,None)
setattr(L,a,v)
self.decorate(l,L)
isOffsetMode=OneOf('high','low','bar','axis')
class LabelOffset(PropHolder):
_attrMap = AttrMap(
posMode = AttrMapValue(isOffsetMode,desc="Where to base +ve offset"),
pos = AttrMapValue(isNumber,desc='Value for positive elements'),
negMode = AttrMapValue(isOffsetMode,desc="Where to base -ve offset"),
neg = AttrMapValue(isNumber,desc='Value for negative elements'),
)
def __init__(self):
self.posMode=self.negMode='axis'
self.pos = self.neg = 0
def _getValue(self, chart, val):
flipXY = chart._flipXY
A = chart.categoryAxis
jA = A.joinAxis
if val>=0:
mode = self.posMode
delta = self.pos
else:
mode = self.negMode
delta = self.neg
if flipXY:
v = A._x
else:
v = A._y
if jA:
if flipXY:
_v = jA._x
else:
_v = jA._y
if mode=='high':
v = _v + jA._length
elif mode=='low':
v = _v
elif mode=='bar':
v = _v+val
return v+delta
NoneOrInstanceOfLabelOffset=NoneOr(isInstanceOf(LabelOffset))
class PMVLabel(Label):
_attrMap = AttrMap(
BASE=Label,
)
def __init__(self):
Label.__init__(self)
self._pmv = 0
def _getBoxAnchor(self):
a = Label._getBoxAnchor(self)
if self._pmv<0: a = {'nw':'se','n':'s','ne':'sw','w':'e','c':'c','e':'w','sw':'ne','s':'n','se':'nw'}[a]
return a
def _getTextAnchor(self):
a = Label._getTextAnchor(self)
if self._pmv<0: a = {'start':'end', 'middle':'middle', 'end':'start'}[a]
return a
class BarChartLabel(PMVLabel):
"""
An extended Label allowing for nudging, lines visibility etc
"""
_attrMap = AttrMap(
BASE=PMVLabel,
lineStrokeWidth = AttrMapValue(isNumberOrNone, desc="Non-zero for a drawn line"),
lineStrokeColor = AttrMapValue(isColorOrNone, desc="Color for a drawn line"),
fixedEnd = AttrMapValue(NoneOrInstanceOfLabelOffset, desc="None or fixed draw ends +/-"),
fixedStart = AttrMapValue(NoneOrInstanceOfLabelOffset, desc="None or fixed draw starts +/-"),
nudge = AttrMapValue(isNumber, desc="Non-zero sign dependent nudge"),
)
def __init__(self):
PMVLabel.__init__(self)
self.lineStrokeWidth = 0
self.lineStrokeColor = None
self.fixedStart = self.fixedEnd = None
self.nudge = 0
class NA_Label(BarChartLabel):
"""
An extended Label allowing for nudging, lines visibility etc
"""
_attrMap = AttrMap(
BASE=BarChartLabel,
text = AttrMapValue(isNoneOrString, desc="Text to be used for N/A values"),
)
def __init__(self):
BarChartLabel.__init__(self)
self.text = 'n/a'
NoneOrInstanceOfNA_Label=NoneOr(isInstanceOf(NA_Label))
from reportlab.graphics.charts.utils import CustomDrawChanger
class RedNegativeChanger(CustomDrawChanger):
def __init__(self,fillColor=colors.red):
CustomDrawChanger.__init__(self)
self.fillColor = fillColor
def _changer(self,obj):
R = {}
if obj._text.startswith('-'):
R['fillColor'] = obj.fillColor
obj.fillColor = self.fillColor
return R
|
|
import unittest
import time
import subprocess
import re
from os import remove
from sys import platform as _platform
try:
from smartnixietube.SmartNixieTube import SmartNixieTubeDisplay
except ImportError as e:
import sys
import os
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from smartnixietube.SmartNixieTube import SmartNixieTubeDisplay
__author__ = 'Nathan Waddington'
__email__ = '[email protected]'
class TestSmartNixieTubeDisplay(unittest.TestCase):
def setUp(self):
# Create two serial ports and connect them.
self.socatlf = 'socat_out.txt'
if _platform == "linux" or _platform == "linux2" or _platform == "darwin":
args = ['socat', '-d', '-d', '-lf' + self.socatlf, 'pty,raw,echo=0', 'pty,raw,echo=0']
elif _platform == "win32":
# Windows...
self.fail()
self.socatProcess = subprocess.Popen(args, stdout=subprocess.PIPE, bufsize=1)
time.sleep(0.1) # give the system a moment to actually write the socat_out file.
# get the port names
try:
self.inputPort, self.outputPort = self.get_serial_ports_from_socat_output(self.socatlf)
except ValueError as e:
print(str(e))
def tearDown(self):
# kill the existing socat process so we don't have extra ports hanging around.
self.socatProcess.kill()
# reset output file
remove(self.socatlf)
def get_serial_ports_from_socat_output(self, file):
file = open(file, 'r') # file, readonly
lines = []
# get the lines with our ports in them.
for line in file:
if re.search('/dev/', line):
lines.append(line)
# print(lines)
# there should be two lines with ports in them.
if len(lines) == 2:
input_port = lines[0].split()[6]
output_port = lines[1].split()[6]
else:
raise ValueError('%s file malformed' % file)
# print (input_port, output_port)
return input_port, output_port
def test_SmartNixieTube_initialisation(self):
number_of_tubes_in_display = 3
smart_nixie_tube_display = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
self.assertEqual(smart_nixie_tube_display.number_of_tubes_in_display, number_of_tubes_in_display)
def test_SmartNixieTubeDisplay_initialisation_with_no_tubes(self):
number_of_tubes_in_display = 0
try:
self.assertRaises(ValueError, SmartNixieTubeDisplay(number_of_tubes_in_display)) # this should fail
self.fail("Didn't raise ValueError")
except ValueError as e:
self.assertEqual('number_of_tubes_in_display must be greater than 0', str(e))
def test_SmartNixieTubeDisplay_initialisation_with_negative_tubes(self):
number_of_tubes_in_display = -1
try:
self.assertRaises(ValueError, SmartNixieTubeDisplay(number_of_tubes_in_display)) # this should fail
self.fail("Didn't raise ValueError")
except ValueError as e:
self.assertEqual('number_of_tubes_in_display must be greater than 0', str(e))
def test_SmartNixieTubeDisplay_init_with_one_tube(self):
number_of_tubes_in_display = 1
display = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
self.assertEqual(len(display.tubes), number_of_tubes_in_display)
self.assertEqual('$-,N,N,000,000,000,000', display.tubes[0].generate_command_string())
def test_SmartNixieTubeDisplay_generateCommandString(self):
number_of_tubes_in_display = 1
display = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
self.assertEqual('$-,N,N,000,000,000,000!', display.generate_command_string())
def test_SmartNixieTubeDisplay_init_with_two_tubes(self):
number_of_tubes_in_display = 2
display = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
self.assertEqual(len(display.tubes), number_of_tubes_in_display)
for tube in display.tubes:
self.assertEqual('$-,N,N,000,000,000,000', tube.generate_command_string())
def test_SmartNixieTubeDisplay_2tubes_generateCommandString(self):
number_of_tubes_in_display = 2
display = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
self.assertEqual('$-,N,N,000,000,000,000$-,N,N,000,000,000,000!', display.generate_command_string())
def test_SmartNixieTubeDisplay_3tubes_generateCommandString(self):
number_of_tubes_in_display = 3
display = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
self.assertEqual('$-,N,N,000,000,000,000$-,N,N,000,000,000,000$-,N,N,000,000,000,000!',
display.generate_command_string())
def test_SmartNixieTubeDisplay_3tubes_nonDefault_generateCommandString(self):
number_of_tubes_in_display = 3
display = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
display.tubes[0].digit = '0'
display.tubes[1].digit = '1'
display.tubes[2].digit = '2'
self.assertEqual('$2,N,N,000,000,000,000$1,N,N,000,000,000,000$0,N,N,000,000,000,000!',
display.generate_command_string())
def test_SmartNixieTubeDisplay_set_display_numbers_out_of_bounds(self):
number_of_tubes_in_display = 1
display = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
try:
self.assertRaises(ValueError, display.set_display_number(-1)) # this should fail
self.fail("Didn't raise ValueError")
except ValueError as e:
self.assertEqual(str(e), 'Display number must be positive')
try:
self.assertRaises(ValueError, display.set_display_number(10)) # this should fail (too many digits)
self.fail("Didn't raise ValueError")
except ValueError as e:
self.assertEqual(str(e), 'Not enough tubes to display all digits')
def test_SmartNixieTubeDisplay_set_one_tube_display_numbers(self):
# set one tube
number_of_tubes_in_display = 1
display = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
display.set_display_number(9)
self.assertEqual('$9,N,N,000,000,000,000!', display.generate_command_string())
def test_SmartNixieTubeDisplay_set_two_tube_display_numbers(self):
# set two tubes
number_of_tubes_in_display = 2
display2 = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
display2.set_display_number(9)
self.assertEqual('$9,N,N,000,000,000,000$0,N,N,000,000,000,000!', display2.generate_command_string())
display2.set_display_number(90)
self.assertEqual('$0,N,N,000,000,000,000$9,N,N,000,000,000,000!', display2.generate_command_string())
display2.set_display_number(99)
self.assertEqual('$9,N,N,000,000,000,000$9,N,N,000,000,000,000!', display2.generate_command_string())
def test_SmartNixieTubeDisplay_set_three_tube_display_numbers(self):
# set three tubes
number_of_tubes_in_display = 3
display3 = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
display3.set_display_number(9)
self.assertEqual('$9,N,N,000,000,000,000$0,N,N,000,000,000,000$0,N,N,000,000,000,000!',
display3.generate_command_string())
display3.set_display_number(99)
self.assertEqual('$9,N,N,000,000,000,000$9,N,N,000,000,000,000$0,N,N,000,000,000,000!',
display3.generate_command_string())
display3.set_display_number(909)
self.assertEqual('$9,N,N,000,000,000,000$0,N,N,000,000,000,000$9,N,N,000,000,000,000!',
display3.generate_command_string())
display3.set_display_number(990)
self.assertEqual('$0,N,N,000,000,000,000$9,N,N,000,000,000,000$9,N,N,000,000,000,000!',
display3.generate_command_string())
def test_init_display_brightness_out_of_range(self):
try:
self.assertRaises(ValueError,
SmartNixieTubeDisplay(number_of_tubes_in_display=1, brightness=-1,
serial_port_name=self.inputPort)) # this should fail
self.fail("Didn't raise ValueError")
except ValueError as e:
self.assertEqual('Brightness must be between 0-255', str(e))
try:
self.assertRaises(ValueError,
SmartNixieTubeDisplay(number_of_tubes_in_display=1, brightness=256,
serial_port_name=self.inputPort)) # this should fail
self.fail("Didn't raise ValueError")
except ValueError as e:
self.assertEqual('Brightness must be between 0-255', str(e))
def test_init_display_red_out_of_range(self):
try:
self.assertRaises(ValueError, SmartNixieTubeDisplay(number_of_tubes_in_display=1, red=-1,
serial_port_name=self.inputPort)) # this should fail
self.fail("Didn't raise ValueError")
except ValueError as e:
self.assertEqual('Red must be between 0-255', str(e))
try:
self.assertRaises(ValueError, SmartNixieTubeDisplay(number_of_tubes_in_display=1, red=256,
serial_port_name=self.inputPort)) # this should fail
self.fail("Didn't raise ValueError")
except ValueError as e:
self.assertEqual('Red must be between 0-255', str(e))
def test_init_display_blue_out_of_range(self):
try:
self.assertRaises(ValueError, SmartNixieTubeDisplay(number_of_tubes_in_display=1, blue=-1,
serial_port_name=self.inputPort)) # this should fail
self.fail("Didn't raise ValueError")
except ValueError as e:
self.assertEqual('Blue must be between 0-255', str(e))
try:
self.assertRaises(ValueError, SmartNixieTubeDisplay(number_of_tubes_in_display=1, blue=256,
serial_port_name=self.inputPort)) # this should fail
self.fail("Didn't raise ValueError")
except ValueError as e:
self.assertEqual('Blue must be between 0-255', str(e))
def test_init_display_green_out_of_range(self):
try:
self.assertRaises(ValueError, SmartNixieTubeDisplay(number_of_tubes_in_display=1, green=-1,
serial_port_name=self.inputPort)) # this should fail
self.fail("Didn't raise ValueError")
except ValueError as e:
self.assertEqual('Green must be between 0-255', str(e))
try:
self.assertRaises(ValueError,
SmartNixieTubeDisplay(number_of_tubes_in_display=1, green=256,
serial_port_name=self.inputPort)) # this should fail
self.fail("Didn't raise ValueError")
except ValueError as e:
self.assertEqual('Green must be between 0-255', str(e))
def test_init_display_brightness(self):
# set one tube
number_of_tubes_in_display = 1
display = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
display.brightness = 128
self.assertEqual('$-,N,N,128,000,000,000!', display.generate_command_string())
# set two tubes
number_of_tubes_in_display = 2
tube_display = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
display2 = tube_display
display2.brightness = 128
self.assertEqual('$-,N,N,128,000,000,000$-,N,N,128,000,000,000!', display2.generate_command_string())
# set three tubes
number_of_tubes_in_display = 3
display3 = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
display3.brightness = 128
self.assertEqual('$-,N,N,128,000,000,000$-,N,N,128,000,000,000$-,N,N,128,000,000,000!',
display3.generate_command_string())
def test_init_display_red(self):
# set one tube
number_of_tubes_in_display = 1
display = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
display.red = 128
self.assertEqual('$-,N,N,000,128,000,000!', display.generate_command_string())
# set two tubes
number_of_tubes_in_display = 2
display2 = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
display2.red = 128
self.assertEqual('$-,N,N,000,128,000,000$-,N,N,000,128,000,000!', display2.generate_command_string())
# set three tubes
number_of_tubes_in_display = 3
display3 = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
display3.red = 128
self.assertEqual('$-,N,N,000,128,000,000$-,N,N,000,128,000,000$-,N,N,000,128,000,000!',
display3.generate_command_string())
def test_init_display_green(self):
# set one tube
number_of_tubes_in_display = 1
display = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
display.green = 128
self.assertEqual('$-,N,N,000,000,128,000!', display.generate_command_string())
# set two tubes
number_of_tubes_in_display = 2
display2 = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
display2.green = 128
self.assertEqual('$-,N,N,000,000,128,000$-,N,N,000,000,128,000!', display2.generate_command_string())
# set three tubes
number_of_tubes_in_display = 3
display3 = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
display3.green = 128
self.assertEqual('$-,N,N,000,000,128,000$-,N,N,000,000,128,000$-,N,N,000,000,128,000!',
display3.generate_command_string())
def test_init_display_blue(self):
# set one tube
number_of_tubes_in_display = 1
display = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
display.blue = 128
self.assertEqual('$-,N,N,000,000,000,128!', display.generate_command_string())
# set two tubes
number_of_tubes_in_display = 2
display2 = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
display2.blue = 128
self.assertEqual('$-,N,N,000,000,000,128$-,N,N,000,000,000,128!', display2.generate_command_string())
# set three tubes
number_of_tubes_in_display = 3
display3 = SmartNixieTubeDisplay(number_of_tubes_in_display, serial_port_name=self.inputPort)
display3.blue = 128
self.assertEqual('$-,N,N,000,000,000,128$-,N,N,000,000,000,128$-,N,N,000,000,000,128!',
display3.generate_command_string())
if __name__ == '__main__':
# run unit tests
unittest.main(warnings='ignore')
|
|
# Author: Mark Wronkiewicz <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import warnings
import numpy as np
import sys
import scipy
from numpy.testing import assert_equal, assert_allclose
from nose.tools import assert_true, assert_raises
from nose.plugins.skip import SkipTest
from distutils.version import LooseVersion
from mne import compute_raw_covariance, pick_types
from mne.chpi import read_head_pos, filter_chpi
from mne.forward import _prep_meg_channels
from mne.cov import _estimate_rank_meeg_cov
from mne.datasets import testing
from mne.io import Raw, proc_history, read_info, read_raw_bti, read_raw_kit
from mne.preprocessing.maxwell import (
maxwell_filter, _get_n_moments, _sss_basis_basic, _sh_complex_to_real,
_sh_real_to_complex, _sh_negate, _bases_complex_to_real, _trans_sss_basis,
_bases_real_to_complex, _sph_harm, _prep_mf_coils)
from mne.tests.common import assert_meg_snr
from mne.utils import (_TempDir, run_tests_if_main, slow_test, catch_logging,
requires_version, object_diff)
from mne.externals.six import PY3
warnings.simplefilter('always') # Always throw warnings
data_path = testing.data_path(download=False)
sss_path = op.join(data_path, 'SSS')
pre = op.join(sss_path, 'test_move_anon_')
raw_fname = pre + 'raw.fif'
sss_std_fname = pre + 'stdOrigin_raw_sss.fif'
sss_nonstd_fname = pre + 'nonStdOrigin_raw_sss.fif'
sss_bad_recon_fname = pre + 'badRecon_raw_sss.fif'
sss_reg_in_fname = pre + 'regIn_raw_sss.fif'
sss_fine_cal_fname = pre + 'fineCal_raw_sss.fif'
sss_ctc_fname = pre + 'crossTalk_raw_sss.fif'
sss_trans_default_fname = pre + 'transDefault_raw_sss.fif'
sss_trans_sample_fname = pre + 'transSample_raw_sss.fif'
sss_st1FineCalCrossTalkRegIn_fname = \
pre + 'st1FineCalCrossTalkRegIn_raw_sss.fif'
sss_st1FineCalCrossTalkRegInTransSample_fname = \
pre + 'st1FineCalCrossTalkRegInTransSample_raw_sss.fif'
sss_movecomp_fname = pre + 'movecomp_raw_sss.fif'
sss_movecomp_reg_in_fname = pre + 'movecomp_regIn_raw_sss.fif'
sss_movecomp_reg_in_st4s_fname = pre + 'movecomp_regIn_st4s_raw_sss.fif'
erm_fname = pre + 'erm_raw.fif'
sss_erm_std_fname = pre + 'erm_devOrigin_raw_sss.fif'
sss_erm_reg_in_fname = pre + 'erm_regIn_raw_sss.fif'
sss_erm_fine_cal_fname = pre + 'erm_fineCal_raw_sss.fif'
sss_erm_ctc_fname = pre + 'erm_crossTalk_raw_sss.fif'
sss_erm_st_fname = pre + 'erm_st1_raw_sss.fif'
sss_erm_st1FineCalCrossTalk_fname = pre + 'erm_st1FineCalCrossTalk_raw_sss.fif'
sss_erm_st1FineCalCrossTalkRegIn_fname = \
pre + 'erm_st1FineCalCrossTalkRegIn_raw_sss.fif'
sample_fname = op.join(data_path, 'MEG', 'sample_audvis_trunc_raw.fif')
sss_samp_reg_in_fname = op.join(data_path, 'SSS',
'sample_audvis_trunc_regIn_raw_sss.fif')
sss_samp_fname = op.join(data_path, 'SSS', 'sample_audvis_trunc_raw_sss.fif')
pos_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.pos')
bases_fname = op.join(sss_path, 'sss_data.mat')
fine_cal_fname = op.join(sss_path, 'sss_cal_3053.dat')
fine_cal_fname_3d = op.join(sss_path, 'sss_cal_3053_3d.dat')
ctc_fname = op.join(sss_path, 'ct_sparse.fif')
fine_cal_mgh_fname = op.join(sss_path, 'sss_cal_mgh.dat')
ctc_mgh_fname = op.join(sss_path, 'ct_sparse_mgh.fif')
sample_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw.fif')
io_dir = op.join(op.dirname(__file__), '..', '..', 'io')
fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')
int_order, ext_order = 8, 3
mf_head_origin = (0., 0., 0.04)
mf_meg_origin = (0., 0.013, -0.006)
# otherwise we can get SVD error
requires_svd_convergence = requires_version('scipy', '0.12')
# 30 random bad MEG channels (20 grad, 10 mag) that were used in generation
bads = ['MEG0912', 'MEG1722', 'MEG2213', 'MEG0132', 'MEG1312', 'MEG0432',
'MEG2433', 'MEG1022', 'MEG0442', 'MEG2332', 'MEG0633', 'MEG1043',
'MEG1713', 'MEG0422', 'MEG0932', 'MEG1622', 'MEG1343', 'MEG0943',
'MEG0643', 'MEG0143', 'MEG2142', 'MEG0813', 'MEG2143', 'MEG1323',
'MEG0522', 'MEG1123', 'MEG0423', 'MEG2122', 'MEG2532', 'MEG0812']
def _assert_n_free(raw_sss, lower, upper=None):
"""Helper to check the DOF"""
upper = lower if upper is None else upper
n_free = raw_sss.info['proc_history'][0]['max_info']['sss_info']['nfree']
assert_true(lower <= n_free <= upper,
'nfree fail: %s <= %s <= %s' % (lower, n_free, upper))
@slow_test
@testing.requires_testing_data
def test_movement_compensation():
"""Test movement compensation"""
temp_dir = _TempDir()
lims = (0, 4, False)
raw = Raw(raw_fname, allow_maxshield='yes', preload=True).crop(*lims)
head_pos = read_head_pos(pos_fname)
#
# Movement compensation, no regularization, no tSSS
#
raw_sss = maxwell_filter(raw, head_pos=head_pos, origin=mf_head_origin,
regularize=None, bad_condition='ignore')
assert_meg_snr(raw_sss, Raw(sss_movecomp_fname).crop(*lims),
4.6, 12.4, chpi_med_tol=58)
# IO
temp_fname = op.join(temp_dir, 'test_raw_sss.fif')
raw_sss.save(temp_fname)
raw_sss = Raw(temp_fname)
assert_meg_snr(raw_sss, Raw(sss_movecomp_fname).crop(*lims),
4.6, 12.4, chpi_med_tol=58)
#
# Movement compensation, regularization, no tSSS
#
raw_sss = maxwell_filter(raw, head_pos=head_pos, origin=mf_head_origin)
assert_meg_snr(raw_sss, Raw(sss_movecomp_reg_in_fname).crop(*lims),
0.5, 1.9, chpi_med_tol=121)
#
# Movement compensation, regularization, tSSS at the end
#
raw_nohpi = filter_chpi(raw.copy())
with warnings.catch_warnings(record=True) as w: # untested feature
raw_sss_mv = maxwell_filter(raw_nohpi, head_pos=head_pos,
st_duration=4., origin=mf_head_origin,
st_fixed=False)
assert_equal(len(w), 1)
assert_true('is untested' in str(w[0].message))
# Neither match is particularly good because our algorithm actually differs
assert_meg_snr(raw_sss_mv, Raw(sss_movecomp_reg_in_st4s_fname).crop(*lims),
0.6, 1.3)
tSSS_fname = op.join(sss_path, 'test_move_anon_st4s_raw_sss.fif')
assert_meg_snr(raw_sss_mv, Raw(tSSS_fname).crop(*lims),
0.6, 1.0, chpi_med_tol=None)
assert_meg_snr(Raw(sss_movecomp_reg_in_st4s_fname), Raw(tSSS_fname),
0.8, 1.0, chpi_med_tol=None)
#
# Movement compensation, regularization, tSSS at the beginning
#
raw_sss_mc = maxwell_filter(raw_nohpi, head_pos=head_pos, st_duration=4.,
origin=mf_head_origin)
assert_meg_snr(raw_sss_mc, Raw(tSSS_fname).crop(*lims),
0.6, 1.0, chpi_med_tol=None)
assert_meg_snr(raw_sss_mc, raw_sss_mv, 0.6, 1.4)
# some degenerate cases
raw_erm = Raw(erm_fname, allow_maxshield='yes')
assert_raises(ValueError, maxwell_filter, raw_erm, coord_frame='meg',
head_pos=head_pos) # can't do ERM file
assert_raises(ValueError, maxwell_filter, raw,
head_pos=head_pos[:, :9]) # bad shape
assert_raises(TypeError, maxwell_filter, raw, head_pos='foo') # bad type
assert_raises(ValueError, maxwell_filter, raw, head_pos=head_pos[::-1])
head_pos_bad = head_pos.copy()
head_pos_bad[0, 0] = raw.first_samp / raw.info['sfreq'] - 1e-2
assert_raises(ValueError, maxwell_filter, raw, head_pos=head_pos_bad)
# make sure numerical error doesn't screw it up, though
head_pos_bad[0, 0] = raw.first_samp / raw.info['sfreq'] - 5e-4
raw_sss_tweak = maxwell_filter(raw, head_pos=head_pos_bad,
origin=mf_head_origin)
assert_meg_snr(raw_sss_tweak, raw_sss, 2., 10., chpi_med_tol=11)
@slow_test
def test_other_systems():
"""Test Maxwell filtering on KIT, BTI, and CTF files
"""
# KIT
kit_dir = op.join(io_dir, 'kit', 'tests', 'data')
sqd_path = op.join(kit_dir, 'test.sqd')
mrk_path = op.join(kit_dir, 'test_mrk.sqd')
elp_path = op.join(kit_dir, 'test_elp.txt')
hsp_path = op.join(kit_dir, 'test_hsp.txt')
raw_kit = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
with warnings.catch_warnings(record=True): # head fit
assert_raises(RuntimeError, maxwell_filter, raw_kit)
raw_sss = maxwell_filter(raw_kit, origin=(0., 0., 0.04), ignore_ref=True)
_assert_n_free(raw_sss, 65, 65)
# XXX this KIT origin fit is terrible! Eventually we should get a
# corrected HSP file with proper coverage
with warnings.catch_warnings(record=True):
with catch_logging() as log_file:
assert_raises(RuntimeError, maxwell_filter, raw_kit,
ignore_ref=True, regularize=None) # bad condition
raw_sss = maxwell_filter(raw_kit, origin='auto',
ignore_ref=True, bad_condition='warning',
verbose='warning')
log_file = log_file.getvalue()
assert_true('badly conditioned' in log_file)
assert_true('more than 20 mm from' in log_file)
# fits can differ slightly based on scipy version, so be lenient here
_assert_n_free(raw_sss, 28, 34) # bad origin == brutal reg
# Let's set the origin
with warnings.catch_warnings(record=True):
with catch_logging() as log_file:
raw_sss = maxwell_filter(raw_kit, origin=(0., 0., 0.04),
ignore_ref=True, bad_condition='warning',
regularize=None, verbose='warning')
log_file = log_file.getvalue()
assert_true('badly conditioned' in log_file)
_assert_n_free(raw_sss, 80)
# Now with reg
with warnings.catch_warnings(record=True):
with catch_logging() as log_file:
raw_sss = maxwell_filter(raw_kit, origin=(0., 0., 0.04),
ignore_ref=True, verbose=True)
log_file = log_file.getvalue()
assert_true('badly conditioned' not in log_file)
_assert_n_free(raw_sss, 65)
# BTi
bti_dir = op.join(io_dir, 'bti', 'tests', 'data')
bti_pdf = op.join(bti_dir, 'test_pdf_linux')
bti_config = op.join(bti_dir, 'test_config_linux')
bti_hs = op.join(bti_dir, 'test_hs_linux')
with warnings.catch_warnings(record=True): # weght table
raw_bti = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False)
raw_sss = maxwell_filter(raw_bti)
_assert_n_free(raw_sss, 70)
# CTF
raw_ctf = Raw(fname_ctf_raw, compensation=2)
assert_raises(RuntimeError, maxwell_filter, raw_ctf) # compensated
raw_ctf = Raw(fname_ctf_raw)
assert_raises(ValueError, maxwell_filter, raw_ctf) # cannot fit headshape
raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04))
_assert_n_free(raw_sss, 68)
raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04), ignore_ref=True)
_assert_n_free(raw_sss, 70)
def test_spherical_harmonics():
"""Test spherical harmonic functions"""
from scipy.special import sph_harm
az, pol = np.meshgrid(np.linspace(0, 2 * np.pi, 30),
np.linspace(0, np.pi, 20))
# As of Oct 16, 2015, Anancoda has a bug in scipy due to old compilers (?):
# https://github.com/ContinuumIO/anaconda-issues/issues/479
if (PY3 and
LooseVersion(scipy.__version__) >= LooseVersion('0.15') and
'Continuum Analytics' in sys.version):
raise SkipTest('scipy sph_harm bad in Py3k on Anaconda')
# Test our basic spherical harmonics
for degree in range(1, int_order):
for order in range(0, degree + 1):
sph = _sph_harm(order, degree, az, pol)
sph_scipy = sph_harm(order, degree, az, pol)
assert_allclose(sph, sph_scipy, atol=1e-7)
def test_spherical_conversions():
"""Test spherical harmonic conversions"""
# Test our real<->complex conversion functions
az, pol = np.meshgrid(np.linspace(0, 2 * np.pi, 30),
np.linspace(0, np.pi, 20))
for degree in range(1, int_order):
for order in range(0, degree + 1):
sph = _sph_harm(order, degree, az, pol)
# ensure that we satisfy the conjugation property
assert_allclose(_sh_negate(sph, order),
_sph_harm(-order, degree, az, pol))
# ensure our conversion functions work
sph_real_pos = _sh_complex_to_real(sph, order)
sph_real_neg = _sh_complex_to_real(sph, -order)
sph_2 = _sh_real_to_complex([sph_real_pos, sph_real_neg], order)
assert_allclose(sph, sph_2, atol=1e-7)
@testing.requires_testing_data
def test_multipolar_bases():
"""Test multipolar moment basis calculation using sensor information"""
from scipy.io import loadmat
# Test our basis calculations
info = read_info(raw_fname)
coils = _prep_meg_channels(info, accurate=True, elekta_defs=True,
do_es=True)[0]
# Check against a known benchmark
sss_data = loadmat(bases_fname)
exp = dict(int_order=int_order, ext_order=ext_order)
for origin in ((0, 0, 0.04), (0, 0.02, 0.02)):
o_str = ''.join('%d' % (1000 * n) for n in origin)
exp.update(origin=origin)
S_tot = _sss_basis_basic(exp, coils, method='alternative')
# Test our real<->complex conversion functions
S_tot_complex = _bases_real_to_complex(S_tot, int_order, ext_order)
S_tot_round = _bases_complex_to_real(S_tot_complex,
int_order, ext_order)
assert_allclose(S_tot, S_tot_round, atol=1e-7)
S_tot_mat = np.concatenate([sss_data['Sin' + o_str],
sss_data['Sout' + o_str]], axis=1)
S_tot_mat_real = _bases_complex_to_real(S_tot_mat,
int_order, ext_order)
S_tot_mat_round = _bases_real_to_complex(S_tot_mat_real,
int_order, ext_order)
assert_allclose(S_tot_mat, S_tot_mat_round, atol=1e-7)
assert_allclose(S_tot_complex, S_tot_mat, rtol=1e-4, atol=1e-8)
assert_allclose(S_tot, S_tot_mat_real, rtol=1e-4, atol=1e-8)
# Now normalize our columns
S_tot /= np.sqrt(np.sum(S_tot * S_tot, axis=0))[np.newaxis]
S_tot_complex /= np.sqrt(np.sum(
(S_tot_complex * S_tot_complex.conj()).real, axis=0))[np.newaxis]
# Check against a known benchmark
S_tot_mat = np.concatenate([sss_data['SNin' + o_str],
sss_data['SNout' + o_str]], axis=1)
# Check this roundtrip
S_tot_mat_real = _bases_complex_to_real(S_tot_mat,
int_order, ext_order)
S_tot_mat_round = _bases_real_to_complex(S_tot_mat_real,
int_order, ext_order)
assert_allclose(S_tot_mat, S_tot_mat_round, atol=1e-7)
assert_allclose(S_tot_complex, S_tot_mat, rtol=1e-4, atol=1e-8)
# Now test our optimized version
S_tot = _sss_basis_basic(exp, coils)
S_tot_fast = _trans_sss_basis(
exp, all_coils=_prep_mf_coils(info), trans=info['dev_head_t'])
# there are some sign differences for columns (order/degrees)
# in here, likely due to Condon-Shortley. Here we use a
# Magnetometer channel to figure out the flips because the
# gradiometer channels have effectively zero values for first three
# external components (i.e., S_tot[grad_picks, 80:83])
flips = (np.sign(S_tot_fast[2]) != np.sign(S_tot[2]))
flips = 1 - 2 * flips
assert_allclose(S_tot, S_tot_fast * flips, atol=1e-16)
@testing.requires_testing_data
def test_basic():
"""Test Maxwell filter basic version"""
# Load testing data (raw, SSS std origin, SSS non-standard origin)
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1., copy=False)
raw_err = Raw(raw_fname, proj=True, allow_maxshield='yes')
raw_erm = Raw(erm_fname, allow_maxshield='yes')
assert_raises(RuntimeError, maxwell_filter, raw_err)
assert_raises(TypeError, maxwell_filter, 1.) # not a raw
assert_raises(ValueError, maxwell_filter, raw, int_order=20) # too many
n_int_bases = int_order ** 2 + 2 * int_order
n_ext_bases = ext_order ** 2 + 2 * ext_order
nbases = n_int_bases + n_ext_bases
# Check number of bases computed correctly
assert_equal(_get_n_moments([int_order, ext_order]).sum(), nbases)
# Test SSS computation at the standard head origin
assert_equal(len(raw.info['projs']), 12) # 11 MEG projs + 1 AVG EEG
raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_equal(len(raw_sss.info['projs']), 1) # avg EEG
assert_equal(raw_sss.info['projs'][0]['desc'], 'Average EEG reference')
assert_meg_snr(raw_sss, Raw(sss_std_fname), 200., 1000.)
py_cal = raw_sss.info['proc_history'][0]['max_info']['sss_cal']
assert_equal(len(py_cal), 0)
py_ctc = raw_sss.info['proc_history'][0]['max_info']['sss_ctc']
assert_equal(len(py_ctc), 0)
py_st = raw_sss.info['proc_history'][0]['max_info']['max_st']
assert_equal(len(py_st), 0)
assert_raises(RuntimeError, maxwell_filter, raw_sss)
# Test SSS computation at non-standard head origin
raw_sss = maxwell_filter(raw, origin=[0., 0.02, 0.02], regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, Raw(sss_nonstd_fname), 250., 700.)
# Test SSS computation at device origin
sss_erm_std = Raw(sss_erm_std_fname)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg',
origin=mf_meg_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, sss_erm_std, 100., 900.)
for key in ('job', 'frame'):
vals = [x.info['proc_history'][0]['max_info']['sss_info'][key]
for x in [raw_sss, sss_erm_std]]
assert_equal(vals[0], vals[1])
# Check against SSS functions from proc_history
sss_info = raw_sss.info['proc_history'][0]['max_info']
assert_equal(_get_n_moments(int_order),
proc_history._get_sss_rank(sss_info))
# Degenerate cases
raw_bad = raw.copy()
raw_bad.comp = True
assert_raises(RuntimeError, maxwell_filter, raw_bad)
del raw_bad
assert_raises(ValueError, maxwell_filter, raw, coord_frame='foo')
assert_raises(ValueError, maxwell_filter, raw, origin='foo')
assert_raises(ValueError, maxwell_filter, raw, origin=[0] * 4)
@testing.requires_testing_data
def test_maxwell_filter_additional():
"""Test processing of Maxwell filtered data"""
# TODO: Future tests integrate with mne/io/tests/test_proc_history
# Load testing data (raw, SSS std origin, SSS non-standard origin)
data_path = op.join(testing.data_path(download=False))
file_name = 'test_move_anon'
raw_fname = op.join(data_path, 'SSS', file_name + '_raw.fif')
# Use 2.0 seconds of data to get stable cov. estimate
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 2., copy=False)
# Get MEG channels, compute Maxwell filtered data
raw.load_data()
raw.pick_types(meg=True, eeg=False)
int_order = 8
raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None,
bad_condition='ignore')
# Test io on processed data
tempdir = _TempDir()
test_outname = op.join(tempdir, 'test_raw_sss.fif')
raw_sss.save(test_outname)
raw_sss_loaded = Raw(test_outname, preload=True)
# Some numerical imprecision since save uses 'single' fmt
assert_allclose(raw_sss_loaded[:][0], raw_sss[:][0],
rtol=1e-6, atol=1e-20)
# Test rank of covariance matrices for raw and SSS processed data
cov_raw = compute_raw_covariance(raw)
cov_sss = compute_raw_covariance(raw_sss)
scalings = None
cov_raw_rank = _estimate_rank_meeg_cov(cov_raw['data'], raw.info, scalings)
cov_sss_rank = _estimate_rank_meeg_cov(cov_sss['data'], raw_sss.info,
scalings)
assert_equal(cov_raw_rank, raw.info['nchan'])
assert_equal(cov_sss_rank, _get_n_moments(int_order))
@slow_test
@testing.requires_testing_data
def test_bads_reconstruction():
"""Test Maxwell filter reconstruction of bad channels"""
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1.)
raw.info['bads'] = bads
raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, Raw(sss_bad_recon_fname), 300.)
@requires_svd_convergence
@testing.requires_testing_data
def test_spatiotemporal_maxwell():
"""Test Maxwell filter (tSSS) spatiotemporal processing"""
# Load raw testing data
raw = Raw(raw_fname, allow_maxshield='yes')
# Test that window is less than length of data
assert_raises(ValueError, maxwell_filter, raw, st_duration=1000.)
# Check both 4 and 10 seconds because Elekta handles them differently
# This is to ensure that std/non-std tSSS windows are correctly handled
st_durations = [4., 10.]
tols = [325., 200.]
for st_duration, tol in zip(st_durations, tols):
# Load tSSS data depending on st_duration and get data
tSSS_fname = op.join(sss_path,
'test_move_anon_st%0ds_raw_sss.fif' % st_duration)
tsss_bench = Raw(tSSS_fname)
# Because Elekta's tSSS sometimes(!) lumps the tail window of data
# onto the previous buffer if it's shorter than st_duration, we have to
# crop the data here to compensate for Elekta's tSSS behavior.
if st_duration == 10.:
tsss_bench.crop(0, st_duration, copy=False)
# Test sss computation at the standard head origin. Same cropping issue
# as mentioned above.
if st_duration == 10.:
raw_tsss = maxwell_filter(raw.crop(0, st_duration),
origin=mf_head_origin,
st_duration=st_duration, regularize=None,
bad_condition='ignore')
else:
raw_tsss = maxwell_filter(raw, st_duration=st_duration,
origin=mf_head_origin, regularize=None,
bad_condition='ignore', verbose=True)
raw_tsss_2 = maxwell_filter(raw, st_duration=st_duration,
origin=mf_head_origin, regularize=None,
bad_condition='ignore', st_fixed=False,
verbose=True)
assert_meg_snr(raw_tsss, raw_tsss_2, 100., 1000.)
assert_equal(raw_tsss.estimate_rank(), 140)
assert_equal(raw_tsss_2.estimate_rank(), 140)
assert_meg_snr(raw_tsss, tsss_bench, tol)
py_st = raw_tsss.info['proc_history'][0]['max_info']['max_st']
assert_true(len(py_st) > 0)
assert_equal(py_st['buflen'], st_duration)
assert_equal(py_st['subspcorr'], 0.98)
# Degenerate cases
assert_raises(ValueError, maxwell_filter, raw, st_duration=10.,
st_correlation=0.)
@requires_svd_convergence
@testing.requires_testing_data
def test_spatiotemporal_only():
"""Test tSSS-only processing"""
# Load raw testing data
raw = Raw(raw_fname,
allow_maxshield='yes').crop(0, 2, copy=False).load_data()
picks = pick_types(raw.info, meg='mag', exclude=())
power = np.sqrt(np.sum(raw[picks][0] ** 2))
# basics
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True)
assert_equal(raw_tsss.estimate_rank(), 366)
_assert_shielding(raw_tsss, power, 10)
# temporal proj will actually reduce spatial DOF with small windows!
raw_tsss = maxwell_filter(raw, st_duration=0.1, st_only=True)
assert_true(raw_tsss.estimate_rank() < 350)
_assert_shielding(raw_tsss, power, 40)
# with movement
head_pos = read_head_pos(pos_fname)
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True,
head_pos=head_pos)
assert_equal(raw_tsss.estimate_rank(), 366)
_assert_shielding(raw_tsss, power, 12)
with warnings.catch_warnings(record=True): # st_fixed False
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True,
head_pos=head_pos, st_fixed=False)
assert_equal(raw_tsss.estimate_rank(), 366)
_assert_shielding(raw_tsss, power, 12)
# should do nothing
raw_tsss = maxwell_filter(raw, st_duration=1., st_correlation=1.,
st_only=True)
assert_allclose(raw[:][0], raw_tsss[:][0])
# degenerate
assert_raises(ValueError, maxwell_filter, raw, st_only=True) # no ST
# two-step process equivalent to single-step process
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True)
raw_tsss = maxwell_filter(raw_tsss)
raw_tsss_2 = maxwell_filter(raw, st_duration=1.)
assert_meg_snr(raw_tsss, raw_tsss_2, 1e5)
# now also with head movement, and a bad MEG channel
assert_equal(len(raw.info['bads']), 0)
raw.info['bads'] = ['EEG001', 'MEG2623']
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True,
head_pos=head_pos)
assert_equal(raw.info['bads'], ['EEG001', 'MEG2623'])
assert_equal(raw_tsss.info['bads'], ['EEG001', 'MEG2623']) # don't reset
raw_tsss = maxwell_filter(raw_tsss, head_pos=head_pos)
assert_equal(raw_tsss.info['bads'], ['EEG001']) # do reset MEG bads
raw_tsss_2 = maxwell_filter(raw, st_duration=1., head_pos=head_pos)
assert_equal(raw_tsss_2.info['bads'], ['EEG001'])
assert_meg_snr(raw_tsss, raw_tsss_2, 1e5)
@testing.requires_testing_data
def test_fine_calibration():
"""Test Maxwell filter fine calibration"""
# Load testing data (raw, SSS std origin, SSS non-standard origin)
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1., copy=False)
sss_fine_cal = Raw(sss_fine_cal_fname)
# Test 1D SSS fine calibration
raw_sss = maxwell_filter(raw, calibration=fine_cal_fname,
origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, sss_fine_cal, 82, 611)
py_cal = raw_sss.info['proc_history'][0]['max_info']['sss_cal']
assert_true(py_cal is not None)
assert_true(len(py_cal) > 0)
mf_cal = sss_fine_cal.info['proc_history'][0]['max_info']['sss_cal']
# we identify these differently
mf_cal['cal_chans'][mf_cal['cal_chans'][:, 1] == 3022, 1] = 3024
assert_allclose(py_cal['cal_chans'], mf_cal['cal_chans'])
assert_allclose(py_cal['cal_corrs'], mf_cal['cal_corrs'],
rtol=1e-3, atol=1e-3)
# Test 3D SSS fine calibration (no equivalent func in MaxFilter yet!)
# very low SNR as proc differs, eventually we should add a better test
raw_sss_3D = maxwell_filter(raw, calibration=fine_cal_fname_3d,
origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss_3D, sss_fine_cal, 1.0, 6.)
raw_ctf = Raw(fname_ctf_raw)
assert_raises(RuntimeError, maxwell_filter, raw_ctf, origin=(0., 0., 0.04),
calibration=fine_cal_fname)
@slow_test
@testing.requires_testing_data
def test_regularization():
"""Test Maxwell filter regularization"""
# Load testing data (raw, SSS std origin, SSS non-standard origin)
min_tols = (100., 2.6, 1.0)
med_tols = (1000., 21.4, 3.7)
origins = ((0., 0., 0.04), (0.,) * 3, (0., 0.02, 0.02))
coord_frames = ('head', 'meg', 'head')
raw_fnames = (raw_fname, erm_fname, sample_fname)
sss_fnames = (sss_reg_in_fname, sss_erm_reg_in_fname,
sss_samp_reg_in_fname)
comp_tols = [0, 1, 4]
for ii, rf in enumerate(raw_fnames):
raw = Raw(rf, allow_maxshield='yes').crop(0., 1.)
sss_reg_in = Raw(sss_fnames[ii])
# Test "in" regularization
raw_sss = maxwell_filter(raw, coord_frame=coord_frames[ii],
origin=origins[ii])
assert_meg_snr(raw_sss, sss_reg_in, min_tols[ii], med_tols[ii], msg=rf)
# check components match
py_info = raw_sss.info['proc_history'][0]['max_info']['sss_info']
assert_true(py_info is not None)
assert_true(len(py_info) > 0)
mf_info = sss_reg_in.info['proc_history'][0]['max_info']['sss_info']
n_in = None
for inf in py_info, mf_info:
if n_in is None:
n_in = _get_n_moments(inf['in_order'])
else:
assert_equal(n_in, _get_n_moments(inf['in_order']))
assert_equal(inf['components'][:n_in].sum(), inf['nfree'])
assert_allclose(py_info['nfree'], mf_info['nfree'],
atol=comp_tols[ii], err_msg=rf)
@testing.requires_testing_data
def test_cross_talk():
"""Test Maxwell filter cross-talk cancellation"""
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1., copy=False)
raw.info['bads'] = bads
sss_ctc = Raw(sss_ctc_fname)
raw_sss = maxwell_filter(raw, cross_talk=ctc_fname,
origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, sss_ctc, 275.)
py_ctc = raw_sss.info['proc_history'][0]['max_info']['sss_ctc']
assert_true(len(py_ctc) > 0)
assert_raises(ValueError, maxwell_filter, raw, cross_talk=raw)
assert_raises(ValueError, maxwell_filter, raw, cross_talk=raw_fname)
mf_ctc = sss_ctc.info['proc_history'][0]['max_info']['sss_ctc']
del mf_ctc['block_id'] # we don't write this
assert_equal(object_diff(py_ctc, mf_ctc), '')
raw_ctf = Raw(fname_ctf_raw)
assert_raises(ValueError, maxwell_filter, raw_ctf) # cannot fit headshape
raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04))
_assert_n_free(raw_sss, 68)
raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04), ignore_ref=True)
_assert_n_free(raw_sss, 70)
raw_missing = raw.copy().crop(0, 0.1).load_data().pick_channels(
[raw.ch_names[pi] for pi in pick_types(raw.info, meg=True,
exclude=())[3:]])
with warnings.catch_warnings(record=True) as w:
maxwell_filter(raw_missing, cross_talk=ctc_fname)
assert_equal(len(w), 1)
assert_true('Not all cross-talk channels in raw' in str(w[0].message))
# MEG channels not in cross-talk
assert_raises(RuntimeError, maxwell_filter, raw_ctf, origin=(0., 0., 0.04),
cross_talk=ctc_fname)
@testing.requires_testing_data
def test_head_translation():
"""Test Maxwell filter head translation"""
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1., copy=False)
# First try with an unchanged destination
raw_sss = maxwell_filter(raw, destination=raw_fname,
origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, Raw(sss_std_fname).crop(0., 1.), 200.)
# Now with default
with warnings.catch_warnings(record=True):
with catch_logging() as log:
raw_sss = maxwell_filter(raw, destination=mf_head_origin,
origin=mf_head_origin, regularize=None,
bad_condition='ignore', verbose='warning')
assert_true('over 25 mm' in log.getvalue())
assert_meg_snr(raw_sss, Raw(sss_trans_default_fname), 125.)
destination = np.eye(4)
destination[2, 3] = 0.04
assert_allclose(raw_sss.info['dev_head_t']['trans'], destination)
# Now to sample's head pos
with warnings.catch_warnings(record=True):
with catch_logging() as log:
raw_sss = maxwell_filter(raw, destination=sample_fname,
origin=mf_head_origin, regularize=None,
bad_condition='ignore', verbose='warning')
assert_true('= 25.6 mm' in log.getvalue())
assert_meg_snr(raw_sss, Raw(sss_trans_sample_fname), 350.)
assert_allclose(raw_sss.info['dev_head_t']['trans'],
read_info(sample_fname)['dev_head_t']['trans'])
# Degenerate cases
assert_raises(RuntimeError, maxwell_filter, raw,
destination=mf_head_origin, coord_frame='meg')
assert_raises(ValueError, maxwell_filter, raw, destination=[0.] * 4)
# TODO: Eventually add simulation tests mirroring Taulu's original paper
# that calculates the localization error:
# http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=1495874
def _assert_shielding(raw_sss, erm_power, shielding_factor, meg='mag'):
"""Helper to assert a minimum shielding factor using empty-room power"""
picks = pick_types(raw_sss.info, meg=meg)
sss_power = raw_sss[picks][0].ravel()
sss_power = np.sqrt(np.sum(sss_power * sss_power))
factor = erm_power / sss_power
assert_true(factor >= shielding_factor,
'Shielding factor %0.3f < %0.3f' % (factor, shielding_factor))
@slow_test
@requires_svd_convergence
@testing.requires_testing_data
def test_shielding_factor():
"""Test Maxwell filter shielding factor using empty room"""
raw_erm = Raw(erm_fname, allow_maxshield='yes', preload=True)
picks = pick_types(raw_erm.info, meg='mag')
erm_power = raw_erm[picks][0]
erm_power = np.sqrt(np.sum(erm_power * erm_power))
# Vanilla SSS (second value would be for meg=True instead of meg='mag')
_assert_shielding(Raw(sss_erm_std_fname), erm_power, 10) # 1.5)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None)
_assert_shielding(raw_sss, erm_power, 12) # 1.5)
# Fine cal
_assert_shielding(Raw(sss_erm_fine_cal_fname), erm_power, 12) # 2.0)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
origin=mf_meg_origin,
calibration=fine_cal_fname)
_assert_shielding(raw_sss, erm_power, 12) # 2.0)
# Crosstalk
_assert_shielding(Raw(sss_erm_ctc_fname), erm_power, 12) # 2.1)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
origin=mf_meg_origin,
cross_talk=ctc_fname)
_assert_shielding(raw_sss, erm_power, 12) # 2.1)
# Fine cal + Crosstalk
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
calibration=fine_cal_fname,
origin=mf_meg_origin,
cross_talk=ctc_fname)
_assert_shielding(raw_sss, erm_power, 13) # 2.2)
# tSSS
_assert_shielding(Raw(sss_erm_st_fname), erm_power, 37) # 5.8)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
origin=mf_meg_origin, st_duration=1.)
_assert_shielding(raw_sss, erm_power, 37) # 5.8)
# Crosstalk + tSSS
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
cross_talk=ctc_fname, origin=mf_meg_origin,
st_duration=1.)
_assert_shielding(raw_sss, erm_power, 38) # 5.91)
# Fine cal + tSSS
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
calibration=fine_cal_fname,
origin=mf_meg_origin, st_duration=1.)
_assert_shielding(raw_sss, erm_power, 38) # 5.98)
# Fine cal + Crosstalk + tSSS
_assert_shielding(Raw(sss_erm_st1FineCalCrossTalk_fname),
erm_power, 39) # 6.07)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
calibration=fine_cal_fname, origin=mf_meg_origin,
cross_talk=ctc_fname, st_duration=1.)
_assert_shielding(raw_sss, erm_power, 39) # 6.05)
# Fine cal + Crosstalk + tSSS + Reg-in
_assert_shielding(Raw(sss_erm_st1FineCalCrossTalkRegIn_fname), erm_power,
57) # 6.97)
raw_sss = maxwell_filter(raw_erm, calibration=fine_cal_fname,
cross_talk=ctc_fname, st_duration=1.,
origin=mf_meg_origin,
coord_frame='meg', regularize='in')
_assert_shielding(raw_sss, erm_power, 53) # 6.64)
raw_sss = maxwell_filter(raw_erm, calibration=fine_cal_fname,
cross_talk=ctc_fname, st_duration=1.,
coord_frame='meg', regularize='in')
_assert_shielding(raw_sss, erm_power, 58) # 7.0)
raw_sss = maxwell_filter(raw_erm, calibration=fine_cal_fname_3d,
cross_talk=ctc_fname, st_duration=1.,
coord_frame='meg', regularize='in')
# Our 3D cal has worse defaults for this ERM than the 1D file
_assert_shielding(raw_sss, erm_power, 54)
# Show it by rewriting the 3D as 1D and testing it
temp_dir = _TempDir()
temp_fname = op.join(temp_dir, 'test_cal.dat')
with open(fine_cal_fname_3d, 'r') as fid:
with open(temp_fname, 'w') as fid_out:
for line in fid:
fid_out.write(' '.join(line.strip().split(' ')[:14]) + '\n')
raw_sss = maxwell_filter(raw_erm, calibration=temp_fname,
cross_talk=ctc_fname, st_duration=1.,
coord_frame='meg', regularize='in')
# Our 3D cal has worse defaults for this ERM than the 1D file
_assert_shielding(raw_sss, erm_power, 44)
@slow_test
@requires_svd_convergence
@testing.requires_testing_data
def test_all():
"""Test maxwell filter using all options"""
raw_fnames = (raw_fname, raw_fname, erm_fname, sample_fname)
sss_fnames = (sss_st1FineCalCrossTalkRegIn_fname,
sss_st1FineCalCrossTalkRegInTransSample_fname,
sss_erm_st1FineCalCrossTalkRegIn_fname,
sss_samp_fname)
fine_cals = (fine_cal_fname,
fine_cal_fname,
fine_cal_fname,
fine_cal_mgh_fname)
coord_frames = ('head', 'head', 'meg', 'head')
ctcs = (ctc_fname, ctc_fname, ctc_fname, ctc_mgh_fname)
mins = (3.5, 3.5, 1.2, 0.9)
meds = (10.9, 10.4, 3.2, 6.)
st_durs = (1., 1., 1., None)
destinations = (None, sample_fname, None, None)
origins = (mf_head_origin,
mf_head_origin,
mf_meg_origin,
mf_head_origin)
for ii, rf in enumerate(raw_fnames):
raw = Raw(rf, allow_maxshield='yes').crop(0., 1.)
with warnings.catch_warnings(record=True): # head fit off-center
sss_py = maxwell_filter(
raw, calibration=fine_cals[ii], cross_talk=ctcs[ii],
st_duration=st_durs[ii], coord_frame=coord_frames[ii],
destination=destinations[ii], origin=origins[ii])
sss_mf = Raw(sss_fnames[ii])
assert_meg_snr(sss_py, sss_mf, mins[ii], meds[ii], msg=rf)
run_tests_if_main()
|
|
import os
import sys
import sysconfig
import copy
import ctypes
import ctypes.util
import warnings
from functools import wraps
import numpy
from numpy.ctypeslib import ndpointer
from scipy import interpolate
from galpy.util import multi, galpyWarning
from galpy.potential_src.Potential import Potential
_DEBUG= False
#Find and load the library
_lib= None
outerr= None
PY3= sys.version > '3'
if PY3: #pragma: no cover
_ext_suffix= sysconfig.get_config_var('EXT_SUFFIX')
else:
_ext_suffix= '.so'
for path in sys.path:
try:
_lib = ctypes.CDLL(os.path.join(path,'galpy_interppotential_c%s' % _ext_suffix))
except OSError as e:
if os.path.exists(os.path.join(path,'galpy_interppotential_c%s' % _ext_suffix)): #pragma: no cover
outerr= e
_lib = None
else:
break
if _lib is None: #pragma: no cover
if not outerr is None:
warnings.warn("interppotential_c extension module not loaded, because of error '%s' " % outerr,
galpyWarning)
else:
warnings.warn("interppotential_c extension module not loaded, because galpy_actionAngle_c%s image was not found" % _ext_suffix,
galpyWarning)
ext_loaded= False
else:
ext_loaded= True
def scalarVectorDecorator(func):
"""Decorator to return scalar outputs as a set"""
@wraps(func)
def scalar_wrapper(*args,**kwargs):
if numpy.array(args[1]).shape == () \
and numpy.array(args[2]).shape == (): #only if both R and z are scalars
scalarOut= True
args= (args[0],numpy.array([args[1]]),numpy.array([args[2]]))
elif numpy.array(args[1]).shape == () \
and not numpy.array(args[2]).shape == (): #R scalar, z vector
scalarOut= False
args= (args[0],args[1]*numpy.ones_like(args[2]),args[2])
elif not numpy.array(args[1]).shape == () \
and numpy.array(args[2]).shape == (): #R vector, z scalar
scalarOut= False
args= (args[0],args[1],args[2]*numpy.ones_like(args[1]))
else:
scalarOut= False
result= func(*args,**kwargs)
if scalarOut:
return result[0]
else:
return result
return scalar_wrapper
def zsymDecorator(odd):
"""Decorator to deal with zsym=True input; set odd=True if the function is an odd function of z (like zforce)"""
def wrapper(func):
@wraps(func)
def zsym_wrapper(*args,**kwargs):
if args[0]._zsym:
out= func(args[0],args[1],numpy.fabs(args[2]),**kwargs)
else:
out= func(*args,**kwargs)
if odd and args[0]._zsym:
return sign(args[2])*out
else:
return out
return zsym_wrapper
return wrapper
def scalarDecorator(func):
"""Decorator to return scalar output for 1D functions (vcirc,etc.)"""
@wraps(func)
def scalar_wrapper(*args,**kwargs):
if numpy.array(args[1]).shape == ():
scalarOut= True
args= (args[0],numpy.array([args[1]]))
else:
scalarOut= False
result= func(*args,**kwargs)
if scalarOut:
return result[0]
else:
return result
return scalar_wrapper
class interpRZPotential(Potential):
"""Class that interpolates a given potential on a grid for fast orbit integration"""
def __init__(self,
RZPot=None,rgrid=(numpy.log(0.01),numpy.log(20.),101),
zgrid=(0.,1.,101),logR=True,
interpPot=False,interpRforce=False,interpzforce=False,
interpDens=False,
interpvcirc=False,
interpdvcircdr=False,
interpepifreq=False,interpverticalfreq=False,
use_c=False,enable_c=False,zsym=True,
numcores=None):
"""
NAME:
__init__
PURPOSE:
Initialize an interpRZPotential instance
INPUT:
RZPot - RZPotential to be interpolated
rgrid - R grid to be given to linspace as in rs= linspace(*rgrid)
zgrid - z grid to be given to linspace as in zs= linspace(*zgrid)
logR - if True, rgrid is in the log of R so logrs= linspace(*rgrid)
interpPot, interpRforce, interpzforce, interpDens,interpvcirc, interpepifreq, interpverticalfreq, interpdvcircdr= if True, interpolate these functions
use_c= use C to speed up the calculation of the grid
enable_c= enable use of C for interpolations
zsym= if True (default), the potential is assumed to be symmetric around z=0 (so you can use, e.g., zgrid=(0.,1.,101)).
numcores= if set to an integer, use this many cores (only used for vcirc, dvcircdR, epifreq, and verticalfreq; NOT NECESSARILY FASTER, TIME TO MAKE SURE)
OUTPUT:
instance
HISTORY:
2010-07-21 - Written - Bovy (NYU)
2013-01-24 - Started with new implementation - Bovy (IAS)
"""
if isinstance(RZPot,interpRZPotential):
from galpy.potential import PotentialError
raise PotentialError('Cannot setup interpRZPotential with another interpRZPotential')
Potential.__init__(self,amp=1.)
self._origPot= RZPot
self._rgrid= numpy.linspace(*rgrid)
self._logR= logR
if self._logR:
self._rgrid= numpy.exp(self._rgrid)
self._logrgrid= numpy.log(self._rgrid)
self._zgrid= numpy.linspace(*zgrid)
self._interpPot= interpPot
self._interpRforce= interpRforce
self._interpzforce= interpzforce
self._interpDens= interpDens
self._interpvcirc= interpvcirc
self._interpdvcircdr= interpdvcircdr
self._interpepifreq= interpepifreq
self._interpverticalfreq= interpverticalfreq
self._enable_c= enable_c*ext_loaded
self.hasC= self._enable_c
self._zsym= zsym
if interpPot:
if use_c*ext_loaded:
self._potGrid, err= calc_potential_c(self._origPot,self._rgrid,self._zgrid)
else:
from galpy.potential import evaluatePotentials
potGrid= numpy.zeros((len(self._rgrid),len(self._zgrid)))
for ii in range(len(self._rgrid)):
for jj in range(len(self._zgrid)):
potGrid[ii,jj]= evaluatePotentials(self._rgrid[ii],self._zgrid[jj],self._origPot)
self._potGrid= potGrid
if self._logR:
self._potInterp= interpolate.RectBivariateSpline(self._logrgrid,
self._zgrid,
self._potGrid,
kx=3,ky=3,s=0.)
else:
self._potInterp= interpolate.RectBivariateSpline(self._rgrid,
self._zgrid,
self._potGrid,
kx=3,ky=3,s=0.)
if enable_c*ext_loaded:
self._potGrid_splinecoeffs= calc_2dsplinecoeffs_c(self._potGrid)
if interpRforce:
if use_c*ext_loaded:
self._rforceGrid, err= calc_potential_c(self._origPot,self._rgrid,self._zgrid,rforce=True)
else:
from galpy.potential import evaluateRforces
rforceGrid= numpy.zeros((len(self._rgrid),len(self._zgrid)))
for ii in range(len(self._rgrid)):
for jj in range(len(self._zgrid)):
rforceGrid[ii,jj]= evaluateRforces(self._rgrid[ii],self._zgrid[jj],self._origPot)
self._rforceGrid= rforceGrid
if self._logR:
self._rforceInterp= interpolate.RectBivariateSpline(self._logrgrid,
self._zgrid,
self._rforceGrid,
kx=3,ky=3,s=0.)
else:
self._rforceInterp= interpolate.RectBivariateSpline(self._rgrid,
self._zgrid,
self._rforceGrid,
kx=3,ky=3,s=0.)
if enable_c*ext_loaded:
self._rforceGrid_splinecoeffs= calc_2dsplinecoeffs_c(self._rforceGrid)
if interpzforce:
if use_c*ext_loaded:
self._zforceGrid, err= calc_potential_c(self._origPot,self._rgrid,self._zgrid,zforce=True)
else:
from galpy.potential import evaluatezforces
zforceGrid= numpy.zeros((len(self._rgrid),len(self._zgrid)))
for ii in range(len(self._rgrid)):
for jj in range(len(self._zgrid)):
zforceGrid[ii,jj]= evaluatezforces(self._rgrid[ii],self._zgrid[jj],self._origPot)
self._zforceGrid= zforceGrid
if self._logR:
self._zforceInterp= interpolate.RectBivariateSpline(self._logrgrid,
self._zgrid,
self._zforceGrid,
kx=3,ky=3,s=0.)
else:
self._zforceInterp= interpolate.RectBivariateSpline(self._rgrid,
self._zgrid,
self._zforceGrid,
kx=3,ky=3,s=0.)
if enable_c*ext_loaded:
self._zforceGrid_splinecoeffs= calc_2dsplinecoeffs_c(self._zforceGrid)
if interpDens:
from galpy.potential import evaluateDensities
densGrid= numpy.zeros((len(self._rgrid),len(self._zgrid)))
for ii in range(len(self._rgrid)):
for jj in range(len(self._zgrid)):
densGrid[ii,jj]= evaluateDensities(self._rgrid[ii],self._zgrid[jj],self._origPot)
self._densGrid= densGrid
if self._logR:
self._densInterp= interpolate.RectBivariateSpline(self._logrgrid,
self._zgrid,
numpy.log(self._densGrid+10.**-10.),
kx=3,ky=3,s=0.)
else:
self._densInterp= interpolate.RectBivariateSpline(self._rgrid,
self._zgrid,
numpy.log(self._densGrid+10.**-10.),
kx=3,ky=3,s=0.)
if interpvcirc:
from galpy.potential import vcirc
if not numcores is None:
self._vcircGrid= multi.parallel_map((lambda x: vcirc(self._origPot,self._rgrid[x])),
list(range(len(self._rgrid))),numcores=numcores)
else:
self._vcircGrid= numpy.array([vcirc(self._origPot,r) for r in self._rgrid])
if self._logR:
self._vcircInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid,self._vcircGrid,k=3)
else:
self._vcircInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid,self._vcircGrid,k=3)
if interpdvcircdr:
from galpy.potential import dvcircdR
if not numcores is None:
self._dvcircdrGrid= multi.parallel_map((lambda x: dvcircdR(self._origPot,self._rgrid[x])),
list(range(len(self._rgrid))),numcores=numcores)
else:
self._dvcircdrGrid= numpy.array([dvcircdR(self._origPot,r) for r in self._rgrid])
if self._logR:
self._dvcircdrInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid,self._dvcircdrGrid,k=3)
else:
self._dvcircdrInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid,self._dvcircdrGrid,k=3)
if interpepifreq:
from galpy.potential import epifreq
if not numcores is None:
self._epifreqGrid= numpy.array(multi.parallel_map((lambda x: epifreq(self._origPot,self._rgrid[x])),
list(range(len(self._rgrid))),numcores=numcores))
else:
self._epifreqGrid= numpy.array([epifreq(self._origPot,r) for r in self._rgrid])
indx= True-numpy.isnan(self._epifreqGrid)
if numpy.sum(indx) < 4:
if self._logR:
self._epifreqInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid[indx],self._epifreqGrid[indx],k=1)
else:
self._epifreqInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid[indx],self._epifreqGrid[indx],k=1)
else:
if self._logR:
self._epifreqInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid[indx],self._epifreqGrid[indx],k=3)
else:
self._epifreqInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid[indx],self._epifreqGrid[indx],k=3)
if interpverticalfreq:
from galpy.potential import verticalfreq
if not numcores is None:
self._verticalfreqGrid= multi.parallel_map((lambda x: verticalfreq(self._origPot,self._rgrid[x])),
list(range(len(self._rgrid))),numcores=numcores)
else:
self._verticalfreqGrid= numpy.array([verticalfreq(self._origPot,r) for r in self._rgrid])
if self._logR:
self._verticalfreqInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid,self._verticalfreqGrid,k=3)
else:
self._verticalfreqInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid,self._verticalfreqGrid,k=3)
return None
@scalarVectorDecorator
@zsymDecorator(False)
def _evaluate(self,R,z,phi=0.,t=0.):
from galpy.potential import evaluatePotentials
if self._interpPot:
out= numpy.empty_like(R)
indx= (R >= self._rgrid[0])*(R <= self._rgrid[-1])\
*(z <= self._zgrid[-1])*(z >= self._zgrid[0])
if numpy.sum(indx) > 0:
if self._enable_c:
out[indx]= eval_potential_c(self,R[indx],z[indx])[0]/self._amp
else:
if self._logR:
out[indx]= self._potInterp.ev(numpy.log(R[indx]),z[indx])
else:
out[indx]= self._potInterp.ev(R[indx],z[indx])
if numpy.sum(True-indx) > 0:
out[True-indx]= evaluatePotentials(R[True-indx],
z[True-indx],
self._origPot)
return out
else:
return evaluatePotentials(R,z,self._origPot)
@scalarVectorDecorator
@zsymDecorator(False)
def _Rforce(self,R,z,phi=0.,t=0.):
from galpy.potential import evaluateRforces
if self._interpRforce:
out= numpy.empty_like(R)
indx= (R >= self._rgrid[0])*(R <= self._rgrid[-1])\
*(z <= self._zgrid[-1])*(z >= self._zgrid[0])
if numpy.sum(indx) > 0:
if self._enable_c:
out[indx]= eval_force_c(self,R[indx],z[indx])[0]/self._amp
else:
if self._logR:
out[indx]= self._rforceInterp.ev(numpy.log(R[indx]),z[indx])
else:
out[indx]= self._rforceInterp.ev(R[indx],z[indx])
if numpy.sum(True-indx) > 0:
out[True-indx]= evaluateRforces(R[True-indx],
z[True-indx],
self._origPot)
return out
else:
return evaluateRforces(R,z,self._origPot)
@scalarVectorDecorator
@zsymDecorator(True)
def _zforce(self,R,z,phi=0.,t=0.):
from galpy.potential import evaluatezforces
if self._interpzforce:
out= numpy.empty_like(R)
indx= (R >= self._rgrid[0])*(R <= self._rgrid[-1])\
*(z <= self._zgrid[-1])*(z >= self._zgrid[0])
if numpy.sum(indx) > 0:
if self._enable_c:
out[indx]= eval_force_c(self,R[indx],z[indx],
zforce=True)[0]/self._amp
else:
if self._logR:
out[indx]= self._zforceInterp.ev(numpy.log(R[indx]),
z[indx])
else:
out[indx]= self._zforceInterp.ev(R[indx],z[indx])
if numpy.sum(True-indx) > 0:
out[True-indx]= evaluatezforces(R[True-indx],
z[True-indx],
self._origPot)
return out
else:
return evaluatezforces(R,z,self._origPot)
def _Rzderiv(self,R,z,phi=0.,t=0.):
from galpy.potential import evaluateRzderivs
return evaluateRzderivs(R,z,self._origPot)
@scalarVectorDecorator
@zsymDecorator(False)
def _dens(self,R,z,phi=0.,t=0.):
from galpy.potential import evaluateDensities
if self._interpDens:
out= numpy.empty_like(R)
indx= (R >= self._rgrid[0])*(R <= self._rgrid[-1])\
*(z <= self._zgrid[-1])*(z >= self._zgrid[0])
if numpy.sum(indx) > 0:
if self._logR:
out[indx]= numpy.exp(self._densInterp.ev(numpy.log(R[indx]),z[indx]))-10.**-10.
else:
out[indx]= numpy.exp(self._densInterp.ev(R[indx],z[indx]))-10.**-10.
if numpy.sum(True-indx) > 0:
out[True-indx]= evaluateDensities(R[True-indx],
z[True-indx],
self._origPot)
return out
else:
return evaluateDensities(R,z,self._origPot)
@scalarDecorator
def vcirc(self,R):
from galpy.potential import vcirc
if self._interpvcirc:
indx= (R >= self._rgrid[0])*(R <= self._rgrid[-1])
out= numpy.empty_like(R)
if numpy.sum(indx) > 0:
if self._logR:
out[indx]= self._vcircInterp(numpy.log(R[indx]))
else:
out[indx]= self._vcircInterp(R[indx])
if numpy.sum(True-indx) > 0:
out[True-indx]= vcirc(self._origPot,R[True-indx])
return out
else:
return vcirc(self._origPot,R)
@scalarDecorator
def dvcircdR(self,R):
from galpy.potential import dvcircdR
if self._interpdvcircdr:
indx= (R >= self._rgrid[0])*(R <= self._rgrid[-1])
out= numpy.empty_like(R)
if numpy.sum(indx) > 0:
if self._logR:
out[indx]= self._dvcircdrInterp(numpy.log(R[indx]))
else:
out[indx]= self._dvcircdrInterp(R[indx])
if numpy.sum(True-indx) > 0:
out[True-indx]= dvcircdR(self._origPot,R[True-indx])
return out
else:
return dvcircdR(self._origPot,R)
@scalarDecorator
def epifreq(self,R):
from galpy.potential import epifreq
if self._interpepifreq:
indx= (R >= self._rgrid[0])*(R <= self._rgrid[-1])
out= numpy.empty_like(R)
if numpy.sum(indx) > 0:
if self._logR:
out[indx]= self._epifreqInterp(numpy.log(R[indx]))
else:
out[indx]= self._epifreqInterp(R[indx])
if numpy.sum(True-indx) > 0:
out[True-indx]= epifreq(self._origPot,R[True-indx])
return out
else:
return epifreq(self._origPot,R)
@scalarDecorator
def verticalfreq(self,R):
from galpy.potential import verticalfreq
if self._interpverticalfreq:
indx= (R >= self._rgrid[0])*(R <= self._rgrid[-1])
out= numpy.empty_like(R)
if numpy.sum(indx) > 0:
if self._logR:
out[indx]= self._verticalfreqInterp(numpy.log(R[indx]))
else:
out[indx]= self._verticalfreqInterp(R[indx])
if numpy.sum(True-indx) > 0:
out[True-indx]= verticalfreq(self._origPot,R[True-indx])
return out
else:
return verticalfreq(self._origPot,R)
def calc_potential_c(pot,R,z,rforce=False,zforce=False):
"""
NAME:
calc_potential_c
PURPOSE:
Use C to calculate the potential on a grid
INPUT:
pot - Potential or list of such instances
R - grid in R
z - grid in z
rforce=, zforce= if either of these is True, calculate the radial or vertical force instead
OUTPUT:
potential on the grid (2D array)
HISTORY:
2013-01-24 - Written - Bovy (IAS)
2013-01-29 - Added forces - Bovy (IAS)
"""
from galpy.orbit_src.integrateFullOrbit import _parse_pot #here bc otherwise there is an infinite loop
#Parse the potential
npot, pot_type, pot_args= _parse_pot(pot)
#Set up result arrays
out= numpy.empty((len(R),len(z)))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
if rforce:
interppotential_calc_potentialFunc= _lib.calc_rforce
elif zforce:
interppotential_calc_potentialFunc= _lib.calc_zforce
else:
interppotential_calc_potentialFunc= _lib.calc_potential
interppotential_calc_potentialFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [R.flags['F_CONTIGUOUS'],
z.flags['F_CONTIGUOUS']]
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
out= numpy.require(out,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
interppotential_calc_potentialFunc(len(R),
R,
len(z),
z,
ctypes.c_int(npot),
pot_type,
pot_args,
out,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: R= numpy.asfortranarray(R)
if f_cont[1]: z= numpy.asfortranarray(z)
return (out,err.value)
def calc_2dsplinecoeffs_c(array2d):
"""
NAME:
calc_2dsplinecoeffs_c
PURPOSE:
Use C to calculate spline coefficients for a 2D array
INPUT:
array2d
OUTPUT:
new array with spline coeffs
HISTORY:
2013-01-24 - Written - Bovy (IAS)
"""
#Set up result arrays
out= copy.copy(array2d)
out= numpy.require(out,dtype=numpy.float64,requirements=['C','W'])
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
interppotential_calc_2dsplinecoeffs= _lib.samples_to_coefficients
interppotential_calc_2dsplinecoeffs.argtypes= [ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ctypes.c_int]
#Run the C code
interppotential_calc_2dsplinecoeffs(out,out.shape[1],out.shape[0])
return out
def eval_potential_c(pot,R,z):
"""
NAME:
eval_potential_c
PURPOSE:
Use C to evaluate the interpolated potential
INPUT:
pot - Potential or list of such instances
R - array
z - array
OUTPUT:
potential evaluated R and z
HISTORY:
2013-01-24 - Written - Bovy (IAS)
"""
from galpy.orbit_src.integrateFullOrbit import _parse_pot #here bc otherwise there is an infinite loop
#Parse the potential
npot, pot_type, pot_args= _parse_pot(pot,potforactions=True)
#Set up result arrays
out= numpy.empty((len(R)))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
interppotential_calc_potentialFunc= _lib.eval_potential
interppotential_calc_potentialFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [R.flags['F_CONTIGUOUS'],
z.flags['F_CONTIGUOUS']]
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
out= numpy.require(out,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
interppotential_calc_potentialFunc(len(R),
R,
z,
ctypes.c_int(npot),
pot_type,
pot_args,
out,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: R= numpy.asfortranarray(R)
if f_cont[1]: z= numpy.asfortranarray(z)
return (out,err.value)
def eval_force_c(pot,R,z,zforce=False):
"""
NAME:
eval_force_c
PURPOSE:
Use C to evaluate the interpolated potential's forces
INPUT:
pot - Potential or list of such instances
R - array
z - array
zforce= if True, return the vertical force, otherwise return the radial force
OUTPUT:
force evaluated R and z
HISTORY:
2013-01-29 - Written - Bovy (IAS)
"""
from galpy.orbit_src.integrateFullOrbit import _parse_pot #here bc otherwise there is an infinite loop
#Parse the potential
npot, pot_type, pot_args= _parse_pot(pot)
#Set up result arrays
out= numpy.empty((len(R)))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
if zforce:
interppotential_calc_forceFunc= _lib.eval_zforce
else:
interppotential_calc_forceFunc= _lib.eval_rforce
interppotential_calc_forceFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [R.flags['F_CONTIGUOUS'],
z.flags['F_CONTIGUOUS']]
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
out= numpy.require(out,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
interppotential_calc_forceFunc(len(R),
R,
z,
ctypes.c_int(npot),
pot_type,
pot_args,
out,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: R= numpy.asfortranarray(R)
if f_cont[1]: z= numpy.asfortranarray(z)
return (out,err.value)
def sign(x):
out= numpy.ones_like(x)
out[(x < 0.)]= -1.
return out
|
|
# Copyright 2015 Red Hat, Inc.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the ironic driver."""
from ironicclient import exc as ironic_exception
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from nova.api.metadata import base as instance_metadata
from nova.compute import power_state as nova_states
from nova.compute import task_states
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.openstack.common import loopingcall
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import utils
from nova.tests.unit.virt.ironic import utils as ironic_utils
from nova.virt import configdrive
from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.ironic import client_wrapper as cw
from nova.virt.ironic import driver as ironic_driver
from nova.virt.ironic import ironic_states
CONF = cfg.CONF
IRONIC_FLAGS = dict(
api_version=1,
group='ironic',
)
FAKE_CLIENT = ironic_utils.FakeClient()
class FakeClientWrapper(cw.IronicClientWrapper):
def _get_client(self):
return FAKE_CLIENT
class FakeLoopingCall(object):
def __init__(self):
self.wait = mock.MagicMock()
self.start = mock.MagicMock()
self.start.return_value = self
def _get_properties():
return {'cpus': 2,
'memory_mb': 512,
'local_gb': 10,
'cpu_arch': 'x86_64'}
def _get_stats():
return {'cpu_arch': 'x86_64'}
FAKE_CLIENT_WRAPPER = FakeClientWrapper()
@mock.patch.object(cw, 'IronicClientWrapper', lambda *_: FAKE_CLIENT_WRAPPER)
class IronicDriverTestCase(test.NoDBTestCase):
@mock.patch.object(cw, 'IronicClientWrapper',
lambda *_: FAKE_CLIENT_WRAPPER)
def setUp(self):
super(IronicDriverTestCase, self).setUp()
self.flags(**IRONIC_FLAGS)
# set client log config to exercise the code that manipulates it
CONF.set_override('client_log_level', 'DEBUG', group='ironic')
self.driver = ironic_driver.IronicDriver(None)
self.driver.virtapi = fake.FakeVirtAPI()
self.ctx = nova_context.get_admin_context()
self.instance_uuid = uuidutils.generate_uuid()
# mock retries configs to avoid sleeps and make tests run quicker
CONF.set_default('api_max_retries', default=1, group='ironic')
CONF.set_default('api_retry_interval', default=0, group='ironic')
def test_public_api_signatures(self):
self.assertPublicAPISignatures(driver.ComputeDriver(None), self.driver)
def test_validate_driver_loading(self):
self.assertIsInstance(self.driver, ironic_driver.IronicDriver)
def test_driver_capabilities(self):
self.assertFalse(self.driver.capabilities['has_imagecache'],
'Driver capabilities for \'has_imagecache\''
'is invalid')
self.assertFalse(self.driver.capabilities['supports_recreate'],
'Driver capabilities for \'supports_recreate\''
'is invalid')
def test__get_hypervisor_type(self):
self.assertEqual('ironic', self.driver._get_hypervisor_type())
def test__get_hypervisor_version(self):
self.assertEqual(1, self.driver._get_hypervisor_version())
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test__validate_instance_and_node(self, mock_gbiui):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid)
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
ironicclient = cw.IronicClientWrapper()
mock_gbiui.return_value = node
result = ironic_driver._validate_instance_and_node(ironicclient,
instance)
self.assertEqual(result.uuid, node_uuid)
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test__validate_instance_and_node_failed(self, mock_gbiui):
ironicclient = cw.IronicClientWrapper()
mock_gbiui.side_effect = ironic_exception.NotFound()
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
self.assertRaises(exception.InstanceNotFound,
ironic_driver._validate_instance_and_node,
ironicclient, instance)
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__wait_for_active_pass(self, fake_validate):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
provision_state=ironic_states.DEPLOYING)
fake_validate.return_value = node
self.driver._wait_for_active(FAKE_CLIENT, instance)
self.assertTrue(fake_validate.called)
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__wait_for_active_done(self, fake_validate):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
provision_state=ironic_states.ACTIVE)
fake_validate.return_value = node
self.assertRaises(loopingcall.LoopingCallDone,
self.driver._wait_for_active,
FAKE_CLIENT, instance)
self.assertTrue(fake_validate.called)
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__wait_for_active_fail(self, fake_validate):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
provision_state=ironic_states.DEPLOYFAIL)
fake_validate.return_value = node
self.assertRaises(exception.InstanceDeployFailure,
self.driver._wait_for_active,
FAKE_CLIENT, instance)
self.assertTrue(fake_validate.called)
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__wait_for_power_state_pass(self, fake_validate):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
target_power_state=ironic_states.POWER_OFF)
fake_validate.return_value = node
self.driver._wait_for_power_state(
FAKE_CLIENT, instance, 'fake message')
self.assertTrue(fake_validate.called)
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__wait_for_power_state_ok(self, fake_validate):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
target_power_state=ironic_states.NOSTATE)
fake_validate.return_value = node
self.assertRaises(loopingcall.LoopingCallDone,
self.driver._wait_for_power_state,
FAKE_CLIENT, instance, 'fake message')
self.assertTrue(fake_validate.called)
def test__node_resource(self):
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
stats = _get_stats()
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid,
properties=props)
result = self.driver._node_resource(node)
wantkeys = ["hypervisor_hostname", "hypervisor_type",
"hypervisor_version", "cpu_info",
"vcpus", "vcpus_used",
"memory_mb", "memory_mb_used",
"local_gb", "local_gb_used",
"disk_available_least",
"supported_instances",
"stats"]
wantkeys.sort()
gotkeys = result.keys()
gotkeys.sort()
self.assertEqual(wantkeys, gotkeys)
self.assertEqual(props['cpus'], result['vcpus'])
self.assertEqual(props['cpus'], result['vcpus_used'])
self.assertEqual(props['memory_mb'], result['memory_mb'])
self.assertEqual(props['memory_mb'], result['memory_mb_used'])
self.assertEqual(props['local_gb'], result['local_gb'])
self.assertEqual(props['local_gb'], result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, jsonutils.loads(result['stats']))
def test__node_resource_canonicalizes_arch(self):
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
props['cpu_arch'] = 'i386'
node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
result = self.driver._node_resource(node)
self.assertEqual('i686',
jsonutils.loads(result['supported_instances'])[0][0])
self.assertEqual('i386',
jsonutils.loads(result['stats'])['cpu_arch'])
def test__node_resource_unknown_arch(self):
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
del props['cpu_arch']
node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
result = self.driver._node_resource(node)
self.assertEqual([], jsonutils.loads(result['supported_instances']))
def test__node_resource_exposes_capabilities(self):
props = _get_properties()
props['capabilities'] = 'test:capability'
node = ironic_utils.get_test_node(properties=props)
result = self.driver._node_resource(node)
stats = jsonutils.loads(result['stats'])
self.assertIsNone(stats.get('capabilities'))
self.assertEqual('capability', stats.get('test'))
def test__node_resource_no_capabilities(self):
props = _get_properties()
props['capabilities'] = None
node = ironic_utils.get_test_node(properties=props)
result = self.driver._node_resource(node)
self.assertIsNone(jsonutils.loads(result['stats']).get('capabilities'))
def test__node_resource_malformed_capabilities(self):
props = _get_properties()
props['capabilities'] = 'test:capability,:no_key,no_val:'
node = ironic_utils.get_test_node(properties=props)
result = self.driver._node_resource(node)
stats = jsonutils.loads(result['stats'])
self.assertEqual('capability', stats.get('test'))
def test__node_resource_available(self):
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
stats = _get_stats()
node = ironic_utils.get_test_node(
uuid=node_uuid,
instance_uuid=None,
power_state=ironic_states.POWER_OFF,
properties=props,
provision_state=ironic_states.AVAILABLE)
result = self.driver._node_resource(node)
self.assertEqual(props['cpus'], result['vcpus'])
self.assertEqual(0, result['vcpus_used'])
self.assertEqual(props['memory_mb'], result['memory_mb'])
self.assertEqual(0, result['memory_mb_used'])
self.assertEqual(props['local_gb'], result['local_gb'])
self.assertEqual(0, result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, jsonutils.loads(result['stats']))
@mock.patch.object(ironic_driver.IronicDriver,
'_node_resources_unavailable')
def test__node_resource_unavailable_node_res(self, mock_res_unavail):
mock_res_unavail.return_value = True
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
stats = _get_stats()
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=None,
properties=props)
result = self.driver._node_resource(node)
self.assertEqual(0, result['vcpus'])
self.assertEqual(0, result['vcpus_used'])
self.assertEqual(0, result['memory_mb'])
self.assertEqual(0, result['memory_mb_used'])
self.assertEqual(0, result['local_gb'])
self.assertEqual(0, result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, jsonutils.loads(result['stats']))
@mock.patch.object(ironic_driver.IronicDriver,
'_node_resources_used')
def test__node_resource_used_node_res(self, mock_res_used):
mock_res_used.return_value = True
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
stats = _get_stats()
node = ironic_utils.get_test_node(
uuid=node_uuid,
instance_uuid=uuidutils.generate_uuid(),
provision_state=ironic_states.ACTIVE,
properties=props)
result = self.driver._node_resource(node)
self.assertEqual(props['cpus'], result['vcpus'])
self.assertEqual(props['cpus'], result['vcpus_used'])
self.assertEqual(props['memory_mb'], result['memory_mb'])
self.assertEqual(props['memory_mb'], result['memory_mb_used'])
self.assertEqual(props['local_gb'], result['local_gb'])
self.assertEqual(props['local_gb'], result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, jsonutils.loads(result['stats']))
@mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
create=True)
@mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
create=True)
@mock.patch.object(firewall.NoopFirewallDriver, 'apply_instance_filter',
create=True)
def test__start_firewall(self, mock_aif, mock_sbf, mock_pif):
fake_inst = 'fake-inst'
fake_net_info = utils.get_test_network_info()
self.driver._start_firewall(fake_inst, fake_net_info)
mock_aif.assert_called_once_with(fake_inst, fake_net_info)
mock_sbf.assert_called_once_with(fake_inst, fake_net_info)
mock_pif.assert_called_once_with(fake_inst, fake_net_info)
@mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
create=True)
def test__stop_firewall(self, mock_ui):
fake_inst = 'fake-inst'
fake_net_info = utils.get_test_network_info()
self.driver._stop_firewall(fake_inst, fake_net_info)
mock_ui.assert_called_once_with(fake_inst, fake_net_info)
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_instance_exists(self, mock_call):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
self.assertTrue(self.driver.instance_exists(instance))
mock_call.assert_called_once_with('node.get_by_instance_uuid',
self.instance_uuid)
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_instance_exists_fail(self, mock_call):
mock_call.side_effect = ironic_exception.NotFound
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
self.assertFalse(self.driver.instance_exists(instance))
mock_call.assert_called_once_with('node.get_by_instance_uuid',
self.instance_uuid)
@mock.patch.object(cw.IronicClientWrapper, 'call')
@mock.patch.object(objects.Instance, 'get_by_uuid')
def test_list_instances(self, mock_inst_by_uuid, mock_call):
nodes = []
instances = []
for i in range(2):
uuid = uuidutils.generate_uuid()
instances.append(fake_instance.fake_instance_obj(self.ctx,
id=i,
uuid=uuid))
nodes.append(ironic_utils.get_test_node(instance_uuid=uuid))
mock_inst_by_uuid.side_effect = instances
mock_call.return_value = nodes
response = self.driver.list_instances()
mock_call.assert_called_with("node.list", associated=True, limit=0)
expected_calls = [mock.call(mock.ANY, instances[0].uuid),
mock.call(mock.ANY, instances[1].uuid)]
mock_inst_by_uuid.assert_has_calls(expected_calls)
self.assertEqual(['instance-00000000', 'instance-00000001'],
sorted(response))
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_list_instance_uuids(self, mock_call):
num_nodes = 2
nodes = []
for n in range(num_nodes):
nodes.append(ironic_utils.get_test_node(
instance_uuid=uuidutils.generate_uuid()))
mock_call.return_value = nodes
uuids = self.driver.list_instance_uuids()
mock_call.assert_called_with('node.list', associated=True, limit=0)
expected = [n.instance_uuid for n in nodes]
self.assertEqual(sorted(expected), sorted(uuids))
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(FAKE_CLIENT.node, 'get')
def test_node_is_available_empty_cache_empty_list(self, mock_get,
mock_list):
node = ironic_utils.get_test_node()
mock_get.return_value = node
mock_list.return_value = []
self.assertTrue(self.driver.node_is_available(node.uuid))
mock_get.assert_called_with(node.uuid)
mock_list.assert_called_with(detail=True, limit=0)
mock_get.side_effect = ironic_exception.NotFound
self.assertFalse(self.driver.node_is_available(node.uuid))
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(FAKE_CLIENT.node, 'get')
def test_node_is_available_empty_cache(self, mock_get, mock_list):
node = ironic_utils.get_test_node()
mock_get.return_value = node
mock_list.return_value = [node]
self.assertTrue(self.driver.node_is_available(node.uuid))
mock_list.assert_called_with(detail=True, limit=0)
self.assertEqual(0, mock_get.call_count)
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(FAKE_CLIENT.node, 'get')
def test_node_is_available_with_cache(self, mock_get, mock_list):
node = ironic_utils.get_test_node()
mock_get.return_value = node
mock_list.return_value = [node]
# populate the cache
self.driver.get_available_nodes(refresh=True)
# prove that zero calls are made after populating cache
mock_list.reset_mock()
self.assertTrue(self.driver.node_is_available(node.uuid))
self.assertEqual(0, mock_list.call_count)
self.assertEqual(0, mock_get.call_count)
def test__node_resources_unavailable(self):
node_dicts = [
# a node in maintenance /w no instance and power OFF
{'uuid': uuidutils.generate_uuid(),
'maintenance': True,
'power_state': ironic_states.POWER_OFF,
'provision_state': ironic_states.AVAILABLE},
# a node in maintenance /w no instance and ERROR power state
{'uuid': uuidutils.generate_uuid(),
'maintenance': True,
'power_state': ironic_states.ERROR,
'provision_state': ironic_states.AVAILABLE},
# a node not in maintenance /w no instance and bad power state
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.NOSTATE,
'provision_state': ironic_states.AVAILABLE},
# a node not in maintenance or bad power state, bad provision state
{'uuid': uuidutils.generate_uuid,
'power_state': ironic_states.POWER_ON,
'provision_state': ironic_states.MANAGEABLE}
]
for n in node_dicts:
node = ironic_utils.get_test_node(**n)
self.assertTrue(self.driver._node_resources_unavailable(node))
for ok_state in (ironic_states.AVAILABLE, ironic_states.NOSTATE):
# these are both ok and should present as available
avail_node = ironic_utils.get_test_node(
power_state=ironic_states.POWER_OFF,
provision_state=ok_state)
unavailable = self.driver._node_resources_unavailable(avail_node)
self.assertFalse(unavailable)
def test__node_resources_used(self):
node_dicts = [
# a node in maintenance /w instance and active
{'uuid': uuidutils.generate_uuid(),
'instance_uuid': uuidutils.generate_uuid(),
'provision_state': ironic_states.ACTIVE},
# a node in deploying but no instance yet
{'uuid': uuidutils.generate_uuid(),
'provision_state': ironic_states.DEPLOYWAIT},
# a node that made it to cleaning before losing its instance uuid
{'uuid': uuidutils.generate_uuid,
'instance_uuid': uuidutils.generate_uuid(),
'provision_state': ironic_states.CLEANING},
]
for n in node_dicts:
node = ironic_utils.get_test_node(**n)
self.assertTrue(self.driver._node_resources_used(node))
unused_node = ironic_utils.get_test_node(
power_state=ironic_states.AVAILABLE)
self.assertFalse(self.driver._node_resources_used(unused_node))
@mock.patch.object(FAKE_CLIENT.node, 'list')
def test_get_available_nodes(self, mock_list):
node_dicts = [
# a node in maintenance /w no instance and power OFF
{'uuid': uuidutils.generate_uuid(),
'maintenance': True,
'power_state': ironic_states.POWER_OFF},
# a node /w instance and power ON
{'uuid': uuidutils.generate_uuid(),
'instance_uuid': self.instance_uuid,
'power_state': ironic_states.POWER_ON},
# a node not in maintenance /w no instance and bad power state
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.ERROR},
]
nodes = [ironic_utils.get_test_node(**n) for n in node_dicts]
mock_list.return_value = nodes
available_nodes = self.driver.get_available_nodes()
expected_uuids = [n['uuid'] for n in node_dicts]
self.assertEqual(sorted(expected_uuids), sorted(available_nodes))
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
def test_get_available_resource(self, mock_nr, mock_list, mock_get):
node = ironic_utils.get_test_node()
node_2 = ironic_utils.get_test_node(uuid=uuidutils.generate_uuid())
fake_resource = 'fake-resource'
mock_get.return_value = node
# ensure cache gets populated without the node we want
mock_list.return_value = [node_2]
mock_nr.return_value = fake_resource
result = self.driver.get_available_resource(node.uuid)
self.assertEqual(fake_resource, result)
mock_nr.assert_called_once_with(node)
mock_get.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
def test_get_available_resource_with_cache(self, mock_nr, mock_list,
mock_get):
node = ironic_utils.get_test_node()
fake_resource = 'fake-resource'
mock_list.return_value = [node]
mock_nr.return_value = fake_resource
# populate the cache
self.driver.get_available_nodes(refresh=True)
mock_list.reset_mock()
result = self.driver.get_available_resource(node.uuid)
self.assertEqual(fake_resource, result)
self.assertEqual(0, mock_list.call_count)
self.assertEqual(0, mock_get.call_count)
mock_nr.assert_called_once_with(node)
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test_get_info(self, mock_gbiu):
properties = {'memory_mb': 512, 'cpus': 2}
power_state = ironic_states.POWER_ON
node = ironic_utils.get_test_node(instance_uuid=self.instance_uuid,
properties=properties,
power_state=power_state)
mock_gbiu.return_value = node
# ironic_states.POWER_ON should be mapped to
# nova_states.RUNNING
memory_kib = properties['memory_mb'] * 1024
instance = fake_instance.fake_instance_obj('fake-context',
uuid=self.instance_uuid)
result = self.driver.get_info(instance)
self.assertEqual(hardware.InstanceInfo(state=nova_states.RUNNING,
max_mem_kb=memory_kib,
mem_kb=memory_kib,
num_cpu=properties['cpus']),
result)
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test_get_info_http_not_found(self, mock_gbiu):
mock_gbiu.side_effect = ironic_exception.NotFound()
instance = fake_instance.fake_instance_obj(
self.ctx, uuid=uuidutils.generate_uuid())
result = self.driver.get_info(instance)
self.assertEqual(hardware.InstanceInfo(state=nova_states.NOSTATE),
result)
@mock.patch.object(FAKE_CLIENT, 'node')
def test_macs_for_instance(self, mock_node):
node = ironic_utils.get_test_node()
port = ironic_utils.get_test_port()
mock_node.get.return_value = node
mock_node.list_ports.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
result = self.driver.macs_for_instance(instance)
self.assertEqual(set([port.address]), result)
mock_node.list_ports.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_CLIENT.node, 'get')
def test_macs_for_instance_http_not_found(self, mock_get):
mock_get.side_effect = ironic_exception.NotFound()
instance = fake_instance.fake_instance_obj(
self.ctx, node=uuidutils.generate_uuid())
result = self.driver.macs_for_instance(instance)
self.assertIsNone(result)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
def _test_spawn(self, mock_sf, mock_pvifs, mock_adf, mock_wait_active,
mock_node, mock_looping, mock_save):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
fake_flavor = objects.Flavor(ephemeral_gb=0)
instance.flavor = fake_flavor
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.return_value = mock.MagicMock()
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
self.driver.spawn(self.ctx, instance, None, [], None)
mock_node.get.assert_called_once_with(node_uuid)
mock_node.validate.assert_called_once_with(node_uuid)
mock_adf.assert_called_once_with(node, instance, None, fake_flavor)
mock_pvifs.assert_called_once_with(node, instance, None)
mock_sf.assert_called_once_with(instance, None)
mock_node.set_provision_state.assert_called_once_with(node_uuid,
'active', configdrive=mock.ANY)
self.assertIsNone(instance.default_ephemeral_device)
self.assertFalse(mock_save.called)
mock_looping.assert_called_once_with(mock_wait_active,
FAKE_CLIENT_WRAPPER,
instance)
fake_looping_call.start.assert_called_once_with(
interval=CONF.ironic.api_retry_interval)
fake_looping_call.wait.assert_called_once_with()
@mock.patch.object(ironic_driver.IronicDriver, '_generate_configdrive')
@mock.patch.object(configdrive, 'required_by')
def test_spawn(self, mock_required_by, mock_configdrive):
mock_required_by.return_value = False
self._test_spawn()
# assert configdrive was not generated
self.assertFalse(mock_configdrive.called)
@mock.patch.object(ironic_driver.IronicDriver, '_generate_configdrive')
@mock.patch.object(configdrive, 'required_by')
def test_spawn_with_configdrive(self, mock_required_by, mock_configdrive):
mock_required_by.return_value = True
self._test_spawn()
# assert configdrive was generated
mock_configdrive.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
extra_md={})
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, 'destroy')
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
def test_spawn_destroyed_after_failure(self, mock_sf, mock_pvifs, mock_adf,
mock_wait_active, mock_destroy,
mock_node, mock_looping,
mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
fake_flavor = objects.Flavor(ephemeral_gb=0)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = fake_flavor
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.return_value = mock.MagicMock()
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
deploy_exc = exception.InstanceDeployFailure('foo')
fake_looping_call.wait.side_effect = deploy_exc
self.assertRaises(
exception.InstanceDeployFailure,
self.driver.spawn, self.ctx, instance, None, [], None)
mock_destroy.assert_called_once_with(self.ctx, instance, None)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__add_driver_fields_good(self, mock_update):
node = ironic_utils.get_test_node(driver='fake')
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
image_meta = ironic_utils.get_test_image_meta()
flavor = ironic_utils.get_test_flavor()
self.driver._add_driver_fields(node, instance, image_meta, flavor)
expected_patch = [{'path': '/instance_info/image_source', 'op': 'add',
'value': image_meta['id']},
{'path': '/instance_info/root_gb', 'op': 'add',
'value': str(instance.root_gb)},
{'path': '/instance_info/swap_mb', 'op': 'add',
'value': str(flavor['swap'])},
{'path': '/instance_uuid', 'op': 'add',
'value': instance.uuid}]
mock_update.assert_called_once_with(node.uuid, expected_patch)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__add_driver_fields_fail(self, mock_update):
mock_update.side_effect = ironic_exception.BadRequest()
node = ironic_utils.get_test_node(driver='fake')
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
image_meta = ironic_utils.get_test_image_meta()
flavor = ironic_utils.get_test_flavor()
self.assertRaises(exception.InstanceDeployFailure,
self.driver._add_driver_fields,
node, instance, image_meta, flavor)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__cleanup_deploy_good_with_flavor(self, mock_update):
node = ironic_utils.get_test_node(driver='fake',
instance_uuid=self.instance_uuid)
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
flavor = ironic_utils.get_test_flavor(extra_specs={})
self.driver._cleanup_deploy(self.ctx, node, instance, None,
flavor=flavor)
expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
mock_update.assert_called_once_with(node.uuid, expected_patch)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__cleanup_deploy_without_flavor(self, mock_update):
node = ironic_utils.get_test_node(driver='fake',
instance_uuid=self.instance_uuid)
flavor = ironic_utils.get_test_flavor(extra_specs={})
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
instance.flavor = flavor
self.driver._cleanup_deploy(self.ctx, node, instance, None)
expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
mock_update.assert_called_once_with(node.uuid, expected_patch)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__cleanup_deploy_fail(self, mock_update):
mock_update.side_effect = ironic_exception.BadRequest()
node = ironic_utils.get_test_node(driver='fake',
instance_uuid=self.instance_uuid)
flavor = ironic_utils.get_test_flavor(extra_specs={})
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
instance.flavor = flavor
self.assertRaises(exception.InstanceTerminationFailure,
self.driver._cleanup_deploy,
self.ctx, node, instance, None)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(FAKE_CLIENT, 'node')
def test_spawn_node_driver_validation_fail(self, mock_node,
mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
mock_node.validate.return_value = ironic_utils.get_test_validation(
power=False, deploy=False)
mock_node.get.return_value = node
image_meta = ironic_utils.get_test_image_meta()
self.assertRaises(exception.ValidationError, self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(node_uuid)
mock_node.validate.assert_called_once_with(node_uuid)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_spawn_node_prepare_for_deploy_fail(self, mock_cleanup_deploy,
mock_pvifs, mock_sf,
mock_node, mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
image_meta = ironic_utils.get_test_image_meta()
class TestException(Exception):
pass
mock_sf.side_effect = TestException()
self.assertRaises(TestException, self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(node_uuid)
mock_node.validate.assert_called_once_with(node_uuid)
mock_cleanup_deploy.assert_called_with(self.ctx, node, instance, None,
flavor=flavor)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_spawn_node_trigger_deploy_fail(self, mock_cleanup_deploy,
mock_pvifs, mock_sf,
mock_node, mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
image_meta = ironic_utils.get_test_image_meta()
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.set_provision_state.side_effect = exception.NovaException()
self.assertRaises(exception.NovaException, self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(node_uuid)
mock_node.validate.assert_called_once_with(node_uuid)
mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
instance, None,
flavor=flavor)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_spawn_node_trigger_deploy_fail2(self, mock_cleanup_deploy,
mock_pvifs, mock_sf,
mock_node, mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
image_meta = ironic_utils.get_test_image_meta()
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.set_provision_state.side_effect = ironic_exception.BadRequest
self.assertRaises(ironic_exception.BadRequest,
self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(node_uuid)
mock_node.validate.assert_called_once_with(node_uuid)
mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
instance, None,
flavor=flavor)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, 'destroy')
def test_spawn_node_trigger_deploy_fail3(self, mock_destroy,
mock_pvifs, mock_sf,
mock_node, mock_looping,
mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
image_meta = ironic_utils.get_test_image_meta()
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
fake_looping_call.wait.side_effect = ironic_exception.BadRequest
fake_net_info = utils.get_test_network_info()
self.assertRaises(ironic_exception.BadRequest,
self.driver.spawn, self.ctx, instance,
image_meta, [], None, fake_net_info)
mock_destroy.assert_called_once_with(self.ctx, instance,
fake_net_info)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
def test_spawn_sets_default_ephemeral_device(self, mock_sf, mock_pvifs,
mock_wait, mock_node,
mock_save, mock_looping,
mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor(ephemeral_gb=1)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.return_value = mock.MagicMock()
image_meta = ironic_utils.get_test_image_meta()
self.driver.spawn(self.ctx, instance, image_meta, [], None)
self.assertTrue(mock_save.called)
self.assertEqual('/dev/sda1', instance.default_ephemeral_device)
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_destroy(self, mock_cleanup_deploy, mock_node):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
network_info = 'foo'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
provision_state=ironic_states.ACTIVE)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
def fake_set_provision_state(*_):
node.provision_state = None
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.side_effect = fake_set_provision_state
self.driver.destroy(self.ctx, instance, network_info, None)
mock_node.set_provision_state.assert_called_once_with(node_uuid,
'deleted')
mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
mock_cleanup_deploy.assert_called_with(self.ctx, node,
instance, network_info)
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_destroy_ignore_unexpected_state(self, mock_cleanup_deploy,
mock_node):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
network_info = 'foo'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
provision_state=ironic_states.DELETING)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
mock_node.get_by_instance_uuid.return_value = node
self.driver.destroy(self.ctx, instance, network_info, None)
self.assertFalse(mock_node.set_provision_state.called)
mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
mock_cleanup_deploy.assert_called_with(self.ctx, node, instance,
network_info)
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_destroy_cleaning(self, mock_cleanup_deploy, mock_node):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
network_info = 'foo'
node = ironic_utils.get_test_node(
driver='fake', uuid=node_uuid,
provision_state=ironic_states.CLEANING)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
mock_node.get_by_instance_uuid.return_value = node
self.driver.destroy(self.ctx, instance, network_info, None)
self.assertFalse(mock_node.set_provision_state.called)
mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
mock_cleanup_deploy.assert_called_with(self.ctx, node, instance,
network_info)
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test_destroy_trigger_undeploy_fail(self, fake_validate, mock_sps):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
provision_state=ironic_states.ACTIVE)
fake_validate.return_value = node
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
mock_sps.side_effect = exception.NovaException()
self.assertRaises(exception.NovaException, self.driver.destroy,
self.ctx, instance, None, None)
@mock.patch.object(FAKE_CLIENT, 'node')
def test_destroy_unprovision_fail(self, mock_node):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
provision_state=ironic_states.ACTIVE)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
def fake_set_provision_state(*_):
node.provision_state = ironic_states.ERROR
mock_node.get_by_instance_uuid.return_value = node
self.assertRaises(exception.NovaException, self.driver.destroy,
self.ctx, instance, None, None)
mock_node.set_provision_state.assert_called_once_with(node_uuid,
'deleted')
@mock.patch.object(FAKE_CLIENT, 'node')
def test_destroy_unassociate_fail(self, mock_node):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
provision_state=ironic_states.ACTIVE)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
mock_node.get_by_instance_uuid.return_value = node
mock_node.update.side_effect = exception.NovaException()
self.assertRaises(exception.NovaException, self.driver.destroy,
self.ctx, instance, None, None)
mock_node.set_provision_state.assert_called_once_with(node_uuid,
'deleted')
mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_reboot(self, mock_sp, fake_validate, mock_looping):
node = ironic_utils.get_test_node()
fake_validate.side_effect = [node, node]
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
self.driver.reboot(self.ctx, instance, None, None)
mock_sp.assert_called_once_with(node.uuid, 'reboot')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_power_off(self, mock_sp, fake_validate, mock_looping):
self._test_power_on_off(mock_sp, fake_validate, mock_looping,
method_name='power_off')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_power_on(self, mock_sp, fake_validate, mock_looping):
self._test_power_on_off(mock_sp, fake_validate, mock_looping,
method_name='power_on')
def _test_power_on_off(self, mock_sp, fake_validate, mock_looping,
method_name=None):
node = ironic_utils.get_test_node()
fake_validate.side_effect = [node, node]
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
instance = fake_instance.fake_instance_obj(self.ctx,
node=self.instance_uuid)
# Call the method under test here
if method_name == 'power_on':
self.driver.power_on(self.ctx, instance,
utils.get_test_network_info())
mock_sp.assert_called_once_with(node.uuid, 'on')
elif method_name == 'power_off':
self.driver.power_off(instance)
mock_sp.assert_called_once_with(node.uuid, 'off')
@mock.patch.object(FAKE_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
def test_plug_vifs_with_port(self, mock_uvifs, mock_port_udt, mock_lp):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port()
mock_lp.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
network_info = utils.get_test_network_info()
port_id = unicode(network_info[0]['id'])
expected_patch = [{'op': 'add',
'path': '/extra/vif_port_id',
'value': port_id}]
self.driver._plug_vifs(node, instance, network_info)
# asserts
mock_uvifs.assert_called_once_with(node, instance, network_info)
mock_lp.assert_called_once_with(node_uuid)
mock_port_udt.assert_called_with(port.uuid, expected_patch)
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
def test_plug_vifs(self, mock__plug_vifs, mock_get):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
mock_get.return_value = node
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
network_info = utils.get_test_network_info()
self.driver.plug_vifs(instance, network_info)
mock_get.assert_called_once_with(node_uuid)
mock__plug_vifs.assert_called_once_with(node, instance, network_info)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT.node, 'list_ports')
@mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
def test_plug_vifs_count_mismatch(self, mock_uvifs, mock_lp,
mock_port_udt):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port()
mock_lp.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
# len(network_info) > len(ports)
network_info = (utils.get_test_network_info() +
utils.get_test_network_info())
self.assertRaises(exception.NovaException,
self.driver._plug_vifs, node, instance,
network_info)
# asserts
mock_uvifs.assert_called_once_with(node, instance, network_info)
mock_lp.assert_called_once_with(node_uuid)
# assert port.update() was not called
self.assertFalse(mock_port_udt.called)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT.node, 'list_ports')
@mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
def test_plug_vifs_no_network_info(self, mock_uvifs, mock_lp,
mock_port_udt):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port()
mock_lp.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
network_info = []
self.driver._plug_vifs(node, instance, network_info)
# asserts
mock_uvifs.assert_called_once_with(node, instance, network_info)
mock_lp.assert_called_once_with(node_uuid)
# assert port.update() was not called
self.assertFalse(mock_port_udt.called)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT, 'node')
def test_unplug_vifs(self, mock_node, mock_update):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port(extra={'vif_port_id': 'fake-vif'})
mock_node.get.return_value = node
mock_node.list_ports.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
expected_patch = [{'op': 'remove', 'path':
'/extra/vif_port_id'}]
self.driver.unplug_vifs(instance,
utils.get_test_network_info())
# asserts
mock_node.get.assert_called_once_with(node_uuid)
mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
mock_update.assert_called_once_with(port.uuid, expected_patch)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT, 'node')
def test_unplug_vifs_port_not_associated(self, mock_node, mock_update):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port(extra={})
mock_node.get.return_value = node
mock_node.list_ports.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
self.driver.unplug_vifs(instance, utils.get_test_network_info())
mock_node.get.assert_called_once_with(node_uuid)
mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
# assert port.update() was not called
self.assertFalse(mock_update.called)
@mock.patch.object(FAKE_CLIENT.port, 'update')
def test_unplug_vifs_no_network_info(self, mock_update):
instance = fake_instance.fake_instance_obj(self.ctx)
network_info = []
self.driver.unplug_vifs(instance, network_info)
# assert port.update() was not called
self.assertFalse(mock_update.called)
@mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
create=True)
def test_unfilter_instance(self, mock_ui):
instance = fake_instance.fake_instance_obj(self.ctx)
network_info = utils.get_test_network_info()
self.driver.unfilter_instance(instance, network_info)
mock_ui.assert_called_once_with(instance, network_info)
@mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
create=True)
@mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
create=True)
def test_ensure_filtering_rules_for_instance(self, mock_pif, mock_sbf):
instance = fake_instance.fake_instance_obj(self.ctx)
network_info = utils.get_test_network_info()
self.driver.ensure_filtering_rules_for_instance(instance,
network_info)
mock_sbf.assert_called_once_with(instance, network_info)
mock_pif.assert_called_once_with(instance, network_info)
@mock.patch.object(firewall.NoopFirewallDriver,
'refresh_instance_security_rules', create=True)
def test_refresh_instance_security_rules(self, mock_risr):
instance = fake_instance.fake_instance_obj(self.ctx)
self.driver.refresh_instance_security_rules(instance)
mock_risr.assert_called_once_with(instance)
@mock.patch.object(firewall.NoopFirewallDriver,
'refresh_provider_fw_rules', create=True)
def test_refresh_provider_fw_rules(self, mock_rpfr):
fake_instance.fake_instance_obj(self.ctx)
self.driver.refresh_provider_fw_rules()
mock_rpfr.assert_called_once_with()
@mock.patch.object(firewall.NoopFirewallDriver,
'refresh_security_group_members', create=True)
def test_refresh_security_group_members(self, mock_rsgm):
fake_group = 'fake-security-group-members'
self.driver.refresh_security_group_members(fake_group)
mock_rsgm.assert_called_once_with(fake_group)
@mock.patch.object(firewall.NoopFirewallDriver,
'refresh_instance_security_rules', create=True)
def test_refresh_security_group_rules(self, mock_risr):
fake_group = 'fake-security-group-members'
self.driver.refresh_instance_security_rules(fake_group)
mock_risr.assert_called_once_with(fake_group)
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(objects.Instance, 'save')
def _test_rebuild(self, mock_save, mock_get, mock_driver_fields,
mock_set_pstate, mock_looping, mock_wait_active,
preserve=False):
node_uuid = uuidutils.generate_uuid()
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid,
instance_type_id=5)
mock_get.return_value = node
image_meta = ironic_utils.get_test_image_meta()
flavor_id = 5
flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid,
node=node_uuid,
instance_type_id=flavor_id)
instance.flavor = flavor
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
self.driver.rebuild(
context=self.ctx, instance=instance, image_meta=image_meta,
injected_files=None, admin_password=None, bdms=None,
detach_block_devices=None, attach_block_devices=None,
preserve_ephemeral=preserve)
mock_save.assert_called_once_with(
expected_task_state=[task_states.REBUILDING])
mock_driver_fields.assert_called_once_with(node, instance, image_meta,
flavor, preserve)
mock_set_pstate.assert_called_once_with(node_uuid,
ironic_states.REBUILD)
mock_looping.assert_called_once_with(mock_wait_active,
FAKE_CLIENT_WRAPPER,
instance)
fake_looping_call.start.assert_called_once_with(
interval=CONF.ironic.api_retry_interval)
fake_looping_call.wait.assert_called_once_with()
def test_rebuild_preserve_ephemeral(self):
self._test_rebuild(preserve=True)
def test_rebuild_no_preserve_ephemeral(self):
self._test_rebuild(preserve=False)
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(objects.Instance, 'save')
def test_rebuild_failures(self, mock_save, mock_get, mock_driver_fields,
mock_set_pstate):
node_uuid = uuidutils.generate_uuid()
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid,
instance_type_id=5)
mock_get.return_value = node
image_meta = ironic_utils.get_test_image_meta()
flavor_id = 5
flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid,
node=node_uuid,
instance_type_id=flavor_id)
instance.flavor = flavor
exceptions = [
exception.NovaException(),
ironic_exception.BadRequest(),
ironic_exception.InternalServerError(),
]
for e in exceptions:
mock_set_pstate.side_effect = e
self.assertRaises(exception.InstanceDeployFailure,
self.driver.rebuild,
context=self.ctx, instance=instance, image_meta=image_meta,
injected_files=None, admin_password=None, bdms=None,
detach_block_devices=None, attach_block_devices=None)
@mock.patch.object(instance_metadata, 'InstanceMetadata')
@mock.patch.object(configdrive, 'ConfigDriveBuilder')
class IronicDriverGenerateConfigDriveTestCase(test.NoDBTestCase):
@mock.patch.object(cw, 'IronicClientWrapper',
lambda *_: FAKE_CLIENT_WRAPPER)
def setUp(self):
super(IronicDriverGenerateConfigDriveTestCase, self).setUp()
self.flags(**IRONIC_FLAGS)
self.driver = ironic_driver.IronicDriver(None)
self.driver.virtapi = fake.FakeVirtAPI()
self.ctx = nova_context.get_admin_context()
node_uuid = uuidutils.generate_uuid()
self.node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
self.instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
self.network_info = utils.get_test_network_info()
def test_generate_configdrive(self, mock_cd_builder, mock_instance_meta):
mock_instance_meta.return_value = 'fake-instance'
mock_make_drive = mock.MagicMock(make_drive=lambda *_: None)
mock_cd_builder.return_value.__enter__.return_value = mock_make_drive
self.driver._generate_configdrive(self.instance, self.node,
self.network_info)
mock_cd_builder.assert_called_once_with(instance_md='fake-instance')
mock_instance_meta.assert_called_once_with(self.instance,
network_info=self.network_info, extra_md={}, content=None)
def test_generate_configdrive_fail(self, mock_cd_builder,
mock_instance_meta):
mock_cd_builder.side_effect = exception.ConfigDriveMountFailed(
operation='foo', error='error')
mock_instance_meta.return_value = 'fake-instance'
mock_make_drive = mock.MagicMock(make_drive=lambda *_: None)
mock_cd_builder.return_value.__enter__.return_value = mock_make_drive
self.assertRaises(exception.ConfigDriveMountFailed,
self.driver._generate_configdrive,
self.instance, self.node, self.network_info)
mock_cd_builder.assert_called_once_with(instance_md='fake-instance')
mock_instance_meta.assert_called_once_with(self.instance,
network_info=self.network_info, extra_md={}, content=None)
|
|
import StringIO
import datetime
import sys
from django.http import HttpResponse
from django.template import loader
from django.utils.encoding import force_unicode, smart_unicode
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.utils.xmlutils import SimplerXMLGenerator
from django.db.models import BooleanField, NullBooleanField
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, ListAdminView
from xadmin.util import json
from xadmin.views.list import ALL_VAR
try:
import xlwt
has_xlwt = True
except:
has_xlwt = False
try:
import xlsxwriter
has_xlsxwriter = True
except:
has_xlsxwriter = False
class ExportMenuPlugin(BaseAdminPlugin):
list_export = ('xlsx', 'xls', 'csv', 'xml', 'json')
export_names = {'xlsx': 'Excel 2007', 'xls': 'Excel', 'csv': 'CSV',
'xml': 'XML', 'json': 'JSON'}
def init_request(self, *args, **kwargs):
self.list_export = [
f for f in self.list_export
if (f != 'xlsx' or has_xlsxwriter) and (f != 'xls' or has_xlwt)]
def block_top_toolbar(self, context, nodes):
if self.list_export:
context.update({
'show_export_all': self.admin_view.paginator.count > self.admin_view.list_per_page and not ALL_VAR in self.admin_view.request.GET,
'form_params': self.admin_view.get_form_params({'_do_': 'export'}, ('export_type',)),
'export_types': [{'type': et, 'name': self.export_names[et]} for et in self.list_export],
})
nodes.append(loader.render_to_string('xadmin/blocks/model_list.top_toolbar.exports.html', context_instance=context))
class ExportPlugin(BaseAdminPlugin):
export_mimes = {'xlsx': 'application/vnd.ms-excel',
'xls': 'application/vnd.ms-excel', 'csv': 'text/csv',
'xml': 'application/xhtml+xml', 'json': 'application/json'}
def init_request(self, *args, **kwargs):
return self.request.GET.get('_do_') == 'export'
def _format_value(self, o):
if (o.field is None and getattr(o.attr, 'boolean', False)) or \
(o.field and isinstance(o.field, (BooleanField, NullBooleanField))):
value = o.value
elif str(o.text).startswith("<span class='text-muted'>"):
value = escape(str(o.text)[25:-7])
else:
value = escape(str(o.text))
return value
def _get_objects(self, context):
headers = [c for c in context['result_headers'].cells if c.export]
rows = context['results']
return [dict([
(force_unicode(headers[i].text), self._format_value(o)) for i, o in
enumerate(filter(lambda c:getattr(c, 'export', False), r.cells))]) for r in rows]
def _get_datas(self, context):
rows = context['results']
new_rows = [[self._format_value(o) for o in
filter(lambda c:getattr(c, 'export', False), r.cells)] for r in rows]
new_rows.insert(0, [force_unicode(c.text) for c in context['result_headers'].cells if c.export])
return new_rows
def get_xlsx_export(self, context):
datas = self._get_datas(context)
output = StringIO.StringIO()
export_header = (
self.request.GET.get('export_xlsx_header', 'off') == 'on')
model_name = self.opts.verbose_name
book = xlsxwriter.Workbook(output)
sheet = book.add_worksheet(
u"%s %s" % (_(u'Sheet'), force_unicode(model_name)))
styles = {'datetime': book.add_format({'num_format': 'yyyy-mm-dd hh:mm:ss'}),
'date': book.add_format({'num_format': 'yyyy-mm-dd'}),
'time': book.add_format({'num_format': 'hh:mm:ss'}),
'header': book.add_format({'font': 'name Times New Roman', 'color': 'red', 'bold': 'on', 'num_format': '#,##0.00'}),
'default': book.add_format()}
if not export_header:
datas = datas[1:]
for rowx, row in enumerate(datas):
for colx, value in enumerate(row):
if export_header and rowx == 0:
cell_style = styles['header']
else:
if isinstance(value, datetime.datetime):
cell_style = styles['datetime']
elif isinstance(value, datetime.date):
cell_style = styles['date']
elif isinstance(value, datetime.time):
cell_style = styles['time']
else:
cell_style = styles['default']
sheet.write(rowx, colx, value, cell_style)
book.close()
output.seek(0)
return output.getvalue()
def get_xls_export(self, context):
datas = self._get_datas(context)
output = StringIO.StringIO()
export_header = (
self.request.GET.get('export_xls_header', 'off') == 'on')
model_name = self.opts.verbose_name
book = xlwt.Workbook(encoding='utf8')
sheet = book.add_sheet(
u"%s %s" % (_(u'Sheet'), force_unicode(model_name)))
styles = {'datetime': xlwt.easyxf(num_format_str='yyyy-mm-dd hh:mm:ss'),
'date': xlwt.easyxf(num_format_str='yyyy-mm-dd'),
'time': xlwt.easyxf(num_format_str='hh:mm:ss'),
'header': xlwt.easyxf('font: name Times New Roman, color-index red, bold on', num_format_str='#,##0.00'),
'default': xlwt.Style.default_style}
if not export_header:
datas = datas[1:]
for rowx, row in enumerate(datas):
for colx, value in enumerate(row):
if export_header and rowx == 0:
cell_style = styles['header']
else:
if isinstance(value, datetime.datetime):
cell_style = styles['datetime']
elif isinstance(value, datetime.date):
cell_style = styles['date']
elif isinstance(value, datetime.time):
cell_style = styles['time']
else:
cell_style = styles['default']
sheet.write(rowx, colx, value, style=cell_style)
book.save(output)
output.seek(0)
return output.getvalue()
def _format_csv_text(self, t):
if isinstance(t, bool):
return _('Yes') if t else _('No')
t = t.replace('"', '""').replace(',', '\,')
if isinstance(t, basestring):
t = '"%s"' % t
return t
def get_csv_export(self, context):
datas = self._get_datas(context)
stream = []
if self.request.GET.get('export_csv_header', 'off') != 'on':
datas = datas[1:]
for row in datas:
stream.append(','.join(map(self._format_csv_text, row)))
return '\r\n'.join(stream)
def _to_xml(self, xml, data):
if isinstance(data, (list, tuple)):
for item in data:
xml.startElement("row", {})
self._to_xml(xml, item)
xml.endElement("row")
elif isinstance(data, dict):
for key, value in data.iteritems():
key = key.replace(' ', '_')
xml.startElement(key, {})
self._to_xml(xml, value)
xml.endElement(key)
else:
xml.characters(smart_unicode(data))
def get_xml_export(self, context):
results = self._get_objects(context)
stream = StringIO.StringIO()
xml = SimplerXMLGenerator(stream, "utf-8")
xml.startDocument()
xml.startElement("objects", {})
self._to_xml(xml, results)
xml.endElement("objects")
xml.endDocument()
return stream.getvalue().split('\n')[1]
def get_json_export(self, context):
results = self._get_objects(context)
return json.dumps({'objects': results}, ensure_ascii=False,
indent=(self.request.GET.get('export_json_format', 'off') == 'on') and 4 or None)
def get_response(self, response, context, *args, **kwargs):
file_type = self.request.GET.get('export_type', 'csv')
response = HttpResponse(
content_type="%s; charset=UTF-8" % self.export_mimes[file_type])
file_name = self.opts.verbose_name.replace(' ', '_')
response['Content-Disposition'] = ('attachment; filename=%s.%s' % (
file_name, file_type)).encode('utf-8')
response.write(getattr(self, 'get_%s_export' % file_type)(context))
return response
# View Methods
def get_result_list(self, __):
if self.request.GET.get('all', 'off') == 'on':
self.admin_view.list_per_page = sys.maxint
return __()
def result_header(self, item, field_name, row):
item.export = not item.attr or field_name == '__str__' or getattr(item.attr, 'allow_export', True)
return item
def result_item(self, item, obj, field_name, row):
item.export = item.field or field_name == '__str__' or getattr(item.attr, 'allow_export', True)
return item
site.register_plugin(ExportMenuPlugin, ListAdminView)
site.register_plugin(ExportPlugin, ListAdminView)
|
|
from collections import defaultdict
from datetime import datetime
from itertools import product
import warnings
import numpy as np
from numpy import nan
import pytest
from pandas.compat import PY2
from pandas import DataFrame, MultiIndex, Series, compat, concat, merge
from pandas.core import common as com
from pandas.core.sorting import (
decons_group_index, get_group_index, is_int64_overflow_possible,
lexsort_indexer, nargsort, safe_sort)
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestSorting(object):
@pytest.mark.slow
def test_int64_overflow(self):
B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500)))
A = np.arange(2500)
df = DataFrame({'A': A,
'B': B,
'C': A,
'D': B,
'E': A,
'F': B,
'G': A,
'H': B,
'values': np.random.randn(2500)})
lg = df.groupby(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
rg = df.groupby(['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A'])
left = lg.sum()['values']
right = rg.sum()['values']
exp_index, _ = left.index.sortlevel()
tm.assert_index_equal(left.index, exp_index)
exp_index, _ = right.index.sortlevel(0)
tm.assert_index_equal(right.index, exp_index)
tups = list(map(tuple, df[['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'
]].values))
tups = com.asarray_tuplesafe(tups)
expected = df.groupby(tups).sum()['values']
for k, v in compat.iteritems(expected):
assert left[k] == right[k[::-1]]
assert left[k] == v
assert len(left) == len(right)
def test_int64_overflow_moar(self):
# GH9096
values = range(55109)
data = DataFrame.from_dict(
{'a': values, 'b': values, 'c': values, 'd': values})
grouped = data.groupby(['a', 'b', 'c', 'd'])
assert len(grouped) == len(values)
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 15, 5))
i = np.random.choice(len(arr), len(arr) * 4)
arr = np.vstack((arr, arr[i])) # add sume duplicate rows
i = np.random.permutation(len(arr))
arr = arr[i] # shuffle rows
df = DataFrame(arr, columns=list('abcde'))
df['jim'], df['joe'] = np.random.randn(2, len(df)) * 10
gr = df.groupby(list('abcde'))
# verify this is testing what it is supposed to test!
assert is_int64_overflow_possible(gr.grouper.shape)
# manually compute groupings
jim, joe = defaultdict(list), defaultdict(list)
for key, a, b in zip(map(tuple, arr), df['jim'], df['joe']):
jim[key].append(a)
joe[key].append(b)
assert len(gr) == len(jim)
mi = MultiIndex.from_tuples(jim.keys(), names=list('abcde'))
def aggr(func):
f = lambda a: np.fromiter(map(func, a), dtype='f8')
arr = np.vstack((f(jim.values()), f(joe.values()))).T
res = DataFrame(arr, columns=['jim', 'joe'], index=mi)
return res.sort_index()
assert_frame_equal(gr.mean(), aggr(np.mean))
assert_frame_equal(gr.median(), aggr(np.median))
def test_lexsort_indexer(self):
keys = [[nan] * 5 + list(range(100)) + [nan] * 5]
# orders=True, na_position='last'
result = lexsort_indexer(keys, orders=True, na_position='last')
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=True, na_position='first'
result = lexsort_indexer(keys, orders=True, na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='last'
result = lexsort_indexer(keys, orders=False, na_position='last')
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='first'
result = lexsort_indexer(keys, orders=False, na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
def test_nargsort(self):
# np.argsort(items) places NaNs last
items = [nan] * 5 + list(range(100)) + [nan] * 5
# np.argsort(items2) may not place NaNs first
items2 = np.array(items, dtype='O')
# mergesort is the most difficult to get right because we want it to be
# stable.
# According to numpy/core/tests/test_multiarray, """The number of
# sorted items must be greater than ~50 to check the actual algorithm
# because quick and merge sort fall over to insertion sort for small
# arrays."""
# mergesort, ascending=True, na_position='last'
result = nargsort(items, kind='mergesort', ascending=True,
na_position='last')
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items, kind='mergesort', ascending=True,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items, kind='mergesort', ascending=False,
na_position='last')
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(items, kind='mergesort', ascending=False,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='last'
result = nargsort(items2, kind='mergesort', ascending=True,
na_position='last')
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items2, kind='mergesort', ascending=True,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items2, kind='mergesort', ascending=False,
na_position='last')
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(items2, kind='mergesort', ascending=False,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
class TestMerge(object):
@pytest.mark.slow
def test_int64_overflow_issues(self):
# #2690, combinatorial explosion
df1 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G1'])
df2 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G2'])
# it works!
result = merge(df1, df2, how='outer')
assert len(result) == 2000
low, high, n = -1 << 10, 1 << 10, 1 << 20
left = DataFrame(np.random.randint(low, high, (n, 7)),
columns=list('ABCDEFG'))
left['left'] = left.sum(axis=1)
# one-2-one match
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
right.columns = right.columns[:-1].tolist() + ['right']
right.index = np.arange(len(right))
right['right'] *= -1
out = merge(left, right, how='outer')
assert len(out) == len(left)
assert_series_equal(out['left'], - out['right'], check_names=False)
result = out.iloc[:, :-2].sum(axis=1)
assert_series_equal(out['left'], result, check_names=False)
assert result.name is None
out.sort_values(out.columns.tolist(), inplace=True)
out.index = np.arange(len(out))
for how in ['left', 'right', 'outer', 'inner']:
assert_frame_equal(out, merge(left, right, how=how, sort=True))
# check that left merge w/ sort=False maintains left frame order
out = merge(left, right, how='left', sort=False)
assert_frame_equal(left, out[left.columns.tolist()])
out = merge(right, left, how='left', sort=False)
assert_frame_equal(right, out[right.columns.tolist()])
# one-2-many/none match
n = 1 << 11
left = DataFrame(np.random.randint(low, high, (n, 7)).astype('int64'),
columns=list('ABCDEFG'))
# confirm that this is checking what it is supposed to check
shape = left.apply(Series.nunique).values
assert is_int64_overflow_possible(shape)
# add duplicates to left frame
left = concat([left, left], ignore_index=True)
right = DataFrame(np.random.randint(low, high, (n // 2, 7))
.astype('int64'),
columns=list('ABCDEFG'))
# add duplicates & overlap with left to the right frame
i = np.random.choice(len(left), n)
right = concat([right, right, left.iloc[i]], ignore_index=True)
left['left'] = np.random.randn(len(left))
right['right'] = np.random.randn(len(right))
# shuffle left & right frames
i = np.random.permutation(len(left))
left = left.iloc[i].copy()
left.index = np.arange(len(left))
i = np.random.permutation(len(right))
right = right.iloc[i].copy()
right.index = np.arange(len(right))
# manually compute outer merge
ldict, rdict = defaultdict(list), defaultdict(list)
for idx, row in left.set_index(list('ABCDEFG')).iterrows():
ldict[idx].append(row['left'])
for idx, row in right.set_index(list('ABCDEFG')).iterrows():
rdict[idx].append(row['right'])
vals = []
for k, lval in ldict.items():
rval = rdict.get(k, [np.nan])
for lv, rv in product(lval, rval):
vals.append(k + tuple([lv, rv]))
for k, rval in rdict.items():
if k not in ldict:
for rv in rval:
vals.append(k + tuple([np.nan, rv]))
def align(df):
df = df.sort_values(df.columns.tolist())
df.index = np.arange(len(df))
return df
def verify_order(df):
kcols = list('ABCDEFG')
assert_frame_equal(df[kcols].copy(),
df[kcols].sort_values(kcols, kind='mergesort'))
out = DataFrame(vals, columns=list('ABCDEFG') + ['left', 'right'])
out = align(out)
jmask = {'left': out['left'].notna(),
'right': out['right'].notna(),
'inner': out['left'].notna() & out['right'].notna(),
'outer': np.ones(len(out), dtype='bool')}
for how in 'left', 'right', 'outer', 'inner':
mask = jmask[how]
frame = align(out[mask].copy())
assert mask.all() ^ mask.any() or how == 'outer'
for sort in [False, True]:
res = merge(left, right, how=how, sort=sort)
if sort:
verify_order(res)
# as in GH9092 dtypes break with outer/right join
assert_frame_equal(frame, align(res),
check_dtype=how not in ('right', 'outer'))
def test_decons():
def testit(label_list, shape):
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
label_list2 = decons_group_index(group_index, shape)
for a, b in zip(label_list, label_list2):
tm.assert_numpy_array_equal(a, b)
shape = (4, 5, 6)
label_list = [np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64)]
testit(label_list, shape)
shape = (10000, 10000)
label_list = [np.tile(np.arange(10000, dtype=np.int64), 5),
np.tile(np.arange(10000, dtype=np.int64), 5)]
testit(label_list, shape)
class TestSafeSort(object):
def test_basic_sort(self):
values = [3, 1, 2, 0, 4]
result = safe_sort(values)
expected = np.array([0, 1, 2, 3, 4])
tm.assert_numpy_array_equal(result, expected)
values = list("baaacb")
result = safe_sort(values)
expected = np.array(list("aaabbc"), dtype='object')
tm.assert_numpy_array_equal(result, expected)
values = []
result = safe_sort(values)
expected = np.array([])
tm.assert_numpy_array_equal(result, expected)
def test_labels(self):
values = [3, 1, 2, 0, 4]
expected = np.array([0, 1, 2, 3, 4])
labels = [0, 1, 1, 2, 3, 0, -1, 4]
result, result_labels = safe_sort(values, labels)
expected_labels = np.array([3, 1, 1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
# na_sentinel
labels = [0, 1, 1, 2, 3, 0, 99, 4]
result, result_labels = safe_sort(values, labels,
na_sentinel=99)
expected_labels = np.array([3, 1, 1, 2, 0, 3, 99, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
# out of bound indices
labels = [0, 101, 102, 2, 3, 0, 99, 4]
result, result_labels = safe_sort(values, labels)
expected_labels = np.array([3, -1, -1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
labels = []
result, result_labels = safe_sort(values, labels)
expected_labels = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
def test_mixed_integer(self):
values = np.array(['b', 1, 0, 'a', 0, 'b'], dtype=object)
result = safe_sort(values)
expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
values = np.array(['b', 1, 0, 'a'], dtype=object)
labels = [0, 1, 2, 3, 0, -1, 1]
result, result_labels = safe_sort(values, labels)
expected = np.array([0, 1, 'a', 'b'], dtype=object)
expected_labels = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
def test_mixed_integer_from_list(self):
values = ['b', 1, 0, 'a', 0, 'b']
result = safe_sort(values)
expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_unsortable(self):
# GH 13714
arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)
msg = (r"'(<|>)' not supported between instances of ('"
r"datetime\.datetime' and 'int'|'int' and 'datetime\.datetime"
r"')|"
r"unorderable types: int\(\) > datetime\.datetime\(\)")
if compat.PY2:
# RuntimeWarning: tp_compare didn't return -1 or -2 for exception
with warnings.catch_warnings():
with pytest.raises(TypeError, match=msg):
safe_sort(arr)
else:
with pytest.raises(TypeError, match=msg):
safe_sort(arr)
def test_exceptions(self):
with pytest.raises(TypeError,
match="Only list-like objects are allowed"):
safe_sort(values=1)
with pytest.raises(TypeError,
match="Only list-like objects or None"):
safe_sort(values=[0, 1, 2], labels=1)
with pytest.raises(ValueError,
match="values should be unique"):
safe_sort(values=[0, 1, 2, 1], labels=[0, 1])
|
|
import torch
import warnings
from torch._six import string_classes
from datetime import timedelta
from .rendezvous import rendezvous, register_rendezvous_handler
from . import BroadcastOptions, AllreduceOptions, ReduceOptions, \
ScatterOptions, GatherOptions
from . import ReduceOp
from . import PrefixStore
from . import ProcessGroupGloo
_MPI_AVAILABLE = True
_NCCL_AVAILABLE = True
try:
from. import ProcessGroupMPI
except ImportError:
_MPI_AVAILABLE = False
try:
from. import ProcessGroupNCCL
except ImportError:
_NCCL_AVAILABLE = False
class Backend(object):
"""
An enum-like class of available backends: GLOO, NCCL, and MPI.
The values of this class are lowercase strings, e.g., ``"gloo"``. They can
be accessed as attributes, e.g., ``Backend.NCCL``.
This class can be directly called to parse the string, e.g.,
``Backend(backend_str)`` will check if ``backend_str`` is valid, and
return the parsed lowercase string if so. It also accepts uppercase strings,
e.g., ``Backend("GLOO")`` returns ``"gloo"``.
.. note:: The entry ``Backend.UNDEFINED`` is present but only used as
initial value of some fields. Users should neither use it directly
nor assume its existence.
"""
UNDEFINED = "undefined"
GLOO = "gloo"
NCCL = "nccl"
MPI = "mpi"
TCP = "tcp"
def __new__(cls, name):
if not isinstance(name, string_classes):
raise ValueError("Backend name must be a string, but got: {}".format(name))
value = getattr(Backend, name.upper(), Backend.UNDEFINED)
if value == Backend.TCP:
raise ValueError("TCP backend has been deprecated. Please use "
"Gloo or MPI backend for collective operations "
"on CPU tensors.")
elif value == Backend.UNDEFINED:
raise ValueError("Invalid backend: '{}'".format(name))
return value
# `_backend`, `dist_backend`, and `reduce_op` are here to maintain backward
# compatibility with pre-c10d distributed package.
# TODO: remove them when users are ready to take a hard dependency on PyTorch 1.
_backend = Backend.UNDEFINED
dist_backend = Backend
class reduce_op(object):
r"""
Deprecated enum-like class for reduction operations: ``SUM``, ``PRODUCT``,
``MIN``, and ``MAX``.
:class:`~torch.distributed.ReduceOp` is recommended to use instead.
"""
def __init__(self):
# __members__ is a dict storing key-value pairs for enum classes
for k, v in ReduceOp.__members__.items():
setattr(self, k, v)
self.__members__ = ReduceOp.__members__
def __getattribute__(self, key):
warnings.warn("torch.distributed.reduce_op is deprecated, please use "
"torch.distributed.ReduceOp instead")
return object.__getattribute__(self, key)
reduce_op = reduce_op()
class group(object):
WORLD = object()
class GroupMember(object):
# Alias to group.WORLD for backward compatibility
WORLD = group.WORLD
NON_GROUP_MEMBER = object()
# Cached process groups
# For NCCL and GLOO pg, it is a map from ProcessGroup to (Backend, Store)
# For MPI pg, it is a map from ProcessGroup to (Backend, Bool), where bool
# represents if the ProcessGroup objects is part of the group
_pg_map = {}
# Process group's names, map from ProcessGroup to str
_pg_names = {}
# Process group's global rank to local rank mapping
_pg_group_ranks = {}
# Default process group state
_default_pg = None
_default_pg_init_method = None
# Default process group wide timeout, if applicable.
# This currently only applies to the gloo backend. To make an attempt at
# backwards compatibility with THD, we use an extraordinarily high default
# timeout, given that THD did not have timeouts.
_default_pg_timeout = timedelta(minutes=30)
# Process group count for default naming
_group_count = 0
def _rank_not_in_group(group):
"""
Helper that checks if the current process's rank is not in a given group
"""
default_backend, _ = _pg_map[_get_default_group()]
if default_backend != Backend.MPI:
return group == GroupMember.NON_GROUP_MEMBER
else:
if group == GroupMember.WORLD:
return False
else:
_, in_group = _pg_map[group]
return not in_group
def _get_group_rank(group, rank):
"""
Helper that gets a given group's local rank in the group from a given global
rank
"""
if group is GroupMember.WORLD:
raise RuntimeError("group.WORLD does not have local rank to global "
"rank mapping")
if group not in _pg_group_ranks:
raise RuntimeError("The given group does not exist")
try:
group_rank = _pg_group_ranks[group][rank]
except KeyError:
raise RuntimeError("The global rank is not part of the group")
return group_rank
def _get_global_rank(group, group_rank):
"""
Helper that gets a given group's global rank from a given local rank in the
group
"""
if group is GroupMember.WORLD:
raise RuntimeError("group.WORLD does not have local rank to global "
"rank mapping")
group_rank_map = _pg_group_ranks[group]
for rank, grp_rank in group_rank_map.items():
if grp_rank == group_rank:
return rank
raise RuntimeError("The group rank is not part of the group")
def _check_default_pg():
"""
Helper that checks if the default ProcessGroup has been initializd, with
assertion
"""
assert _default_pg is not None, \
"Default process group is not initialized"
def _get_group_size(group):
"""
Helper that gets a given group's world size
"""
if group is GroupMember.WORLD:
_check_default_pg()
return _default_pg.size()
if group not in _pg_group_ranks:
raise RuntimeError("The given group does not exist")
return len(_pg_group_ranks[group])
def _check_single_tensor(param, param_name):
"""
Helper that check the parameter: param_name is a single Tensor
"""
if not isinstance(param, torch.Tensor):
raise RuntimeError("Invalid function argument. Expecting parameter: {} "
"to be a torch.Tensor type".format(param_name))
def _check_tensor_list(param, param_name):
"""
Helper that check the parameter: param_name is a Tensor list
"""
wrong_type = False
if isinstance(param, list):
for p in param:
if not isinstance(p, torch.Tensor):
wrong_type = True
break
else:
wrong_type = True
if wrong_type:
raise RuntimeError("Invalid function argument. Expecting parameter: {} "
"to be a List[torch.Tensor] type".format(param_name))
def is_mpi_available():
"""
Checks if MPI is available
"""
return _MPI_AVAILABLE
def is_nccl_available():
"""
Checks if NCCL is available
"""
return _NCCL_AVAILABLE
def is_initialized():
"""
Checking if the default process group has been initialized
"""
return _default_pg is not None
def _get_default_group():
"""
Getting the default process group created by init_process_group
"""
if not is_initialized():
raise RuntimeError("Default process group has not been initialized, "
"please make sure to call init_process_group.")
return _default_pg
def get_backend(group=group.WORLD):
"""
Returns the backend of the given process group.
Arguments:
group (ProcessGroup, optional): The process group to work on. The
default is the general main process group. If another specific group
is specified, the calling process must be part of :attr:`group`.
Returns:
The backend of the given process group as a lower case string.
"""
_check_default_pg()
if group == GroupMember.WORLD:
pg = _default_pg
else:
pg = group
if _rank_not_in_group(pg):
raise RuntimeError("Invalid process group specified")
return _pg_map.get(pg, None)[0]
def init_process_group(backend,
init_method="env://",
timeout=_default_pg_timeout,
**kwargs):
"""
Initializes the default distributed process group, and this will also
initialize the distributed package
Arguments:
backend (str or Backend): The backend to use. Depending on
build-time configurations, valid values include ``mpi``, ``gloo``,
and ``nccl``. This field should be given as a lowercase string
(e.g., ``"gloo"``), which can also be accessed via
:class:`Backend` attributes (e.g., ``Backend.GLOO``).
init_method (str, optional): URL specifying how to initialize the
process group.
world_size (int, optional): Number of processes participating in
the job.
rank (int, optional): Rank of the current process.
timeout (timedelta, optional): Timeout for operations executed against
the process group. Default value equals 30 minutes.
This is only applicable for the ``gloo`` backend.
group_name (str, optional, deprecated): Group name.
To enable ``backend == Backend.MPI``, PyTorch needs to built from source
on a system that supports MPI. The same applies to NCCL as well.
"""
global _pg_map
global _pg_names
global _backend
global _default_pg
global _default_pg_init_method
if not isinstance(timeout, timedelta):
raise RuntimeError("Expected timeout argument to be of type"
"datetime.timedelta")
if _default_pg is not None:
raise RuntimeError("trying to initialize the default process group "
"twice!")
world_size = kwargs.pop('world_size', -1)
group_name = kwargs.pop('group_name', '')
rank = kwargs.pop('rank', -1)
assert len(kwargs) == 0, \
"got unexpected keyword arguments: %s" % ",".join(kwargs.keys())
backend = Backend(backend)
if backend == Backend.MPI:
if not is_mpi_available():
raise RuntimeError("Distributed package doesn't have MPI built in")
_default_pg = ProcessGroupMPI([])
_pg_map[_default_pg] = (Backend.MPI, True)
_pg_names[_default_pg] = group_name
else:
# backward compatible API
url = init_method
if world_size != -1 and rank != -1:
url += "?rank={}&world_size={}".format(rank, world_size)
elif rank != -1:
url += "?rank={}".format(rank)
elif world_size != -1:
url += "?world_size={}".format(world_size)
store, rank, world_size = next(rendezvous(url))
if backend == Backend.GLOO:
_default_pg = ProcessGroupGloo(
store,
rank,
world_size,
timeout=timeout)
_pg_map[_default_pg] = (Backend.GLOO, store)
_pg_names[_default_pg] = group_name
elif backend == Backend.NCCL:
if not is_nccl_available():
raise RuntimeError("Distributed package doesn't have NCCL "
"built in")
_default_pg = ProcessGroupNCCL(store, rank, world_size)
_pg_map[_default_pg] = (Backend.NCCL, store)
_pg_names[_default_pg] = group_name
_backend = _pg_map[_default_pg][0]
_default_pg_init_method = init_method
def _new_process_group_helper(world_size,
rank,
group_ranks,
in_group,
group_name,
timeout=_default_pg_timeout):
"""
Create a new distributed process group. And the new process group can be
used to perform collective operations.
"""
global _pg_map
global _group_count
global _pg_names
if not group_name:
group_name = str(_group_count)
_group_count += 1
if group_name in _pg_names.values():
raise RuntimeError("The specified group name has already been "
"created, please use a different group name")
if not isinstance(timeout, timedelta):
raise RuntimeError("Expected timeout argument to be of type"
"datetime.timedelta")
default_backend, default_store = _pg_map[_default_pg]
if default_backend == Backend.MPI:
if not is_mpi_available():
raise RuntimeError("Distributed package doesn't have MPI built in")
pg = ProcessGroupMPI(group_ranks)
_pg_map[pg] = (Backend.MPI, in_group)
_pg_names[pg] = group_name
else:
# Create the prefix store
store = PrefixStore(group_name, default_store)
if default_backend == Backend.GLOO:
pg = ProcessGroupGloo(
store,
rank,
world_size,
timeout=timeout)
_pg_map[pg] = (Backend.GLOO, store)
_pg_names[pg] = group_name
elif default_backend == Backend.NCCL:
if not is_nccl_available():
raise RuntimeError("Distributed package doesn't have NCCL "
"built in")
pg = ProcessGroupNCCL(store, rank, world_size, group_name)
_pg_map[pg] = (Backend.NCCL, store)
_pg_names[pg] = group_name
else:
raise RuntimeError("Unsupported distributed backend by group")
return pg
def destroy_process_group(group=group.WORLD):
"""
Destroy a given process group, and deinitialize the distributed package
Arguments:
group (ProcessGroup, optional): The process group to be destroyed, if
group.WORLD is given, all process
groups including the default one will
be destroyed.
"""
global _pg_map
global _pg_names
global _pg_group_ranks
global _default_pg
global _default_pg_init_method
default_backend, _ = _pg_map[_get_default_group()]
if (default_backend != Backend.MPI and
group == GroupMember.NON_GROUP_MEMBER):
return
if group == GroupMember.WORLD:
pg = _default_pg
else:
pg = group
if _pg_map.get(pg, None) is None:
raise RuntimeError("Invalid process group specified")
if group == GroupMember.WORLD:
_default_pg = None
_default_pg_init_method = None
_pg_map.clear()
_pg_names.clear()
_pg_group_ranks.clear()
else:
del _pg_map[pg]
del _pg_names[pg]
del _pg_group_ranks[pg]
def get_rank(group=group.WORLD):
"""
Returns the rank of currrent process group
Rank is a unique identifier assigned to each process within a distributed
process group. They are always consecutive integers ranging from 0 to
``world_size``.
Arguments:
group (ProcessGroup, optional): The process group to work on
Returns:
The rank of the process group
-1, if not part of the group
"""
if _rank_not_in_group(group):
return -1
_check_default_pg()
if group == GroupMember.WORLD:
return _default_pg.rank()
return _get_group_rank(group, _default_pg.rank())
def get_world_size(group=group.WORLD):
"""
Returns the number of processes in the current process group
Arguments:
group (ProcessGroup, optional): The process group to work on
Returns:
The world size of the process group
-1, if not part of the group
"""
if _rank_not_in_group(group):
return -1
return _get_group_size(group)
def isend(tensor,
dst,
group=group.WORLD,
tag=0):
"""
Sends a tensor asynchronously.
Arguments:
tensor (Tensor): Tensor to send.
dst (int): Destination rank.
group (ProcessGroup, optional): The process group to work on
tag (int, optional): Tag to match send with remote recv
Returns:
A distributed request object.
None, if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
return _default_pg.send([tensor], dst, tag)
else:
group_dst_rank = _get_group_rank(group, dst)
return group.send([tensor], group_dst_rank, tag)
def irecv(tensor,
src,
group=group.WORLD,
tag=0):
"""
Receives a tensor asynchronously.
Arguments:
tensor (Tensor): Tensor to fill with received data.
src (int): Source rank.
group (ProcessGroup, optional): The process group to work on
tag (int, optional): Tag to match recv with remote send
Returns:
A distributed request object.
None, if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
return _default_pg.recv([tensor], src, tag)
else:
group_src_rank = _get_group_rank(group, src)
return group.recv([tensor], group_src_rank, tag)
def send(tensor,
dst,
group=group.WORLD,
tag=0):
"""
Sends a tensor synchronously.
Arguments:
tensor (Tensor): Tensor to send.
dst (int): Destination rank.
group (ProcessGroup, optional): The process group to work on
tag (int, optional): Tag to match send with remote recv
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
_default_pg.send([tensor], dst, tag).wait()
else:
group_dst_rank = _get_group_rank(group, dst)
group.send([tensor], group_dst_rank, tag).wait()
def recv(tensor,
src=None,
group=group.WORLD,
tag=0):
"""
Receives a tensor synchronously.
Arguments:
tensor (Tensor): Tensor to fill with received data.
src (int, optional): Source rank. Will receive from any
process if unspecified.
group (ProcessGroup, optional): The process group to work on
tag (int, optional): Tag to match recv with remote send
Returns:
Sender rank
-1, if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return -1
if group == GroupMember.WORLD:
_check_default_pg()
pg = _default_pg
else:
pg = group
if src is None:
work = pg.recv_anysource([tensor], tag)
work.wait()
src_rank = work.source_rank()
if group == GroupMember.WORLD:
return src_rank
else:
return _get_global_rank(pg, src_rank)
else:
if group == GroupMember.WORLD:
pg.recv([tensor], src, tag).wait()
else:
group_src_rank = _get_group_rank(pg, src)
pg.recv([tensor], group_src_rank, tag).wait()
return src
def broadcast_multigpu(tensor_list,
src,
group=group.WORLD,
async_op=False,
src_tensor=0):
"""
Broadcasts the tensor to the whole group with multiple GPU tensors
per node.
``tensor`` must have the same number of elements in all the GPUs from
all processes participating in the collective. each tensor in the list must
be on a different GPU
Only nccl and gloo backend are currently supported
tensors should only be GPU tensors
Arguments:
tensor_list (List[Tensor]): Tensors that participate in the collective
operation. if ``src`` is the rank, then ``src_tensor``th element of
``tensor_list`` (``tensor_list[src_tensor]``) will be broadcasted
to all other tensors (on different GPUs) in the src process and
all tensors in ``tensor_list`` of other non-src processes.
You also need to make sure that ``len(tensor_list)`` is the same
for all the distributed processes calling this function.
src (int): Source rank.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
src_tensor (int, optional): Source tensor rank within ``tensor_list``
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
if _rank_not_in_group(group):
return
opts = BroadcastOptions()
opts.rootRank = src
opts.rootTensor = src_tensor
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.broadcast(tensor_list, opts)
else:
group_src_rank = _get_group_rank(group, src)
opts.rootRank = group_src_rank
work = group.broadcast(tensor_list, opts)
if async_op:
return work
else:
work.wait()
def broadcast(tensor,
src,
group=group.WORLD,
async_op=False):
"""
Broadcasts the tensor to the whole group.
``tensor`` must have the same number of elements in all processes
participating in the collective.
Arguments:
tensor (Tensor): Data to be sent if ``src`` is the rank of current
process, and tensor to be used to save received data otherwise.
src (int): Source rank.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
opts = BroadcastOptions()
opts.rootRank = src
opts.rootTensor = 0
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.broadcast([tensor], opts)
else:
group_src_rank = _get_group_rank(group, src)
opts.rootRank = group_src_rank
work = group.broadcast([tensor], opts)
if async_op:
return work
else:
work.wait()
def all_reduce_multigpu(tensor_list,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False):
r"""
Reduces the tensor data across all machines in such a way that all get
the final result. This function reduces a number of tensors on every node,
while each tensor resides on different GPUs.
Therefore, the input tensor in the tensor list needs to be GPU tensors.
Also, each tensor in the tensor list needs to reside on a different GPU.
After the call, all ``tensor`` in ``tensor_list`` is going to be bitwise
identical in all processes.
Only nccl and gloo backend is currently supported
tensors should only be GPU tensors
Arguments:
tensor list (List[Tensor]): List of input and output tensors of
the collective. The function operates in-place and requires that
each tensor to be a GPU tensor on different GPUs.
You also need to make sure that ``len(tensor_list)`` is the same for
all the distributed processes calling this function.
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
if _rank_not_in_group(group):
return
opts = AllreduceOptions()
opts.reduceOp = op
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.allreduce(tensor_list, opts)
else:
work = group.allreduce(tensor_list, opts)
if async_op:
return work
else:
work.wait()
def all_reduce(tensor,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False):
"""
Reduces the tensor data across all machines in such a way that all get
the final result.
After the call ``tensor`` is going to be bitwise identical in all processes.
Arguments:
tensor (Tensor): Input and output of the collective. The function
operates in-place.
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
opts = AllreduceOptions()
opts.reduceOp = op
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.allreduce([tensor], opts)
else:
work = group.allreduce([tensor], opts)
if async_op:
return work
else:
work.wait()
def reduce_multigpu(tensor_list,
dst,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False,
dst_tensor=0):
"""
Reduces the tensor data on multiple GPUs across all machines. Each tensor
in ``tensor_list`` should reside on a separate GPU
Only the GPU of ``tensor_list[dst_tensor]`` on the process with rank ``dst``
is going to receive the final result.
Only nccl backend is currently supported
tensors should only be GPU tensors
Arguments:
tensor_list (List[Tensor]): Input and output GPU tensors of the
collective. The function operates in-place.
You also need to make sure that ``len(tensor_list)`` is the same for
all the distributed processes calling this function.
dst (int): Destination rank
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
dst_tensor (int, optional): Destination tensor rank within
``tensor_list``
Returns:
Async work handle, if async_op is set to True.
None, otherwise
"""
if _rank_not_in_group(group):
return
opts = ReduceOptions()
opts.reduceOp = op
opts.rootRank = dst
opts.rootTensor = dst_tensor
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.reduce(tensor_list, opts)
else:
group_dst_rank = _get_group_rank(group, dst)
opts.rootRank = group_dst_rank
work = group.reduce(tensor_list, opts)
if async_op:
return work
else:
work.wait()
def reduce(tensor,
dst,
op=ReduceOp.SUM,
group=group.WORLD,
async_op=False):
"""
Reduces the tensor data across all machines.
Only the process with rank ``dst`` is going to receive the final result.
Arguments:
tensor (Tensor): Input and output of the collective. The function
operates in-place.
dst (int): Destination rank
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
opts = ReduceOptions()
opts.reduceOp = op
opts.rootRank = dst
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.reduce([tensor], opts)
else:
group_dst_rank = _get_group_rank(group, dst)
opts.rootRank = group_dst_rank
work = group.reduce([tensor], opts)
if async_op:
return work
else:
work.wait()
def all_gather_multigpu(output_tensor_lists,
input_tensor_list,
group=group.WORLD,
async_op=False):
"""
Gathers tensors from the whole group in a list.
Each tensor in ``tensor_list`` should reside on a separate GPU
Only nccl backend is currently supported
tensors should only be GPU tensors
Arguments:
output_tensor_lists (List[List[Tensor]]): Output lists. It should
contain correctly-sized tensors on each GPU to be used for output of
the collective.
e.g. ``output_tensor_lists[i]`` contains the all_gather
result that resides on the GPU of ``input_tensor_list[i]``.
Note that each element of ``output_tensor_lists[i]`` has the size of
``world_size * len(input_tensor_list)``, since the function all
gathers the result from every single GPU in the group. To interpret
each element of ``output_tensor_list[i]``, note that
``input_tensor_list[j]`` of rank k will be appear in
``output_tensor_list[i][rank * world_size + j]``
Also note that ``len(output_tensor_lists)``, and the size of each
element in ``output_tensor_lists`` (each element is a list,
therefore ``len(output_tensor_lists[i])``) need to be the same
for all the distributed processes calling this function.
input_tensor_list (List[Tensor]): List of tensors(on different GPUs) to
be broadcast from current process.
Note that ``len(input_tensor_list)`` needs to be the same for
all the distributed processes calling this function.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.allgather(output_tensor_lists, input_tensor_list)
else:
work = group.allgather(output_tensor_lists, input_tensor_list)
if async_op:
return work
else:
work.wait()
def all_gather(tensor_list,
tensor,
group=group.WORLD,
async_op=False):
"""
Gathers tensors from the whole group in a list.
Arguments:
tensor_list (list[Tensor]): Output list. It should contain
correctly-sized tensors to be used for output of the collective.
tensor (Tensor): Tensor to be broadcast from current process.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
_check_tensor_list(tensor_list, "tensor_list")
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.allgather([tensor_list], [tensor])
else:
work = group.allgather([tensor_list], [tensor])
if async_op:
return work
else:
work.wait()
def gather(tensor,
gather_list,
dst,
group=group.WORLD,
async_op=False):
"""
Gathers a list of tensors in a single process.
Arguments:
tensor (Tensor): Input tensor.
gather_list (list[Tensor]): List of appropriately-sized tensors to
use for received data. Required only in the receiving process.
dst (int): Destination rank. Required in all processes except the one
that is receiveing the data.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
_check_single_tensor(tensor, "tensor")
_check_tensor_list(gather_list, "gather_list")
if _rank_not_in_group(group):
return
my_rank = get_rank()
if dst == my_rank:
if gather_list is None:
raise RuntimeError("gather_list is a required argument in gather "
"destination")
input_tensors = [tensor]
output_tensors = [gather_list]
else:
if gather_list:
raise RuntimeError("non-empty gather_list can be given only "
"to gather destination")
input_tensors = [tensor]
output_tensors = []
opts = GatherOptions()
opts.rootRank = dst
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.gather(output_tensors, input_tensors, opts)
else:
group_dst_rank = _get_group_rank(group, dst)
opts.rootRank = group_dst_rank
work = group.gather(output_tensors, input_tensors, opts)
if async_op:
return work
else:
work.wait()
def scatter(tensor,
scatter_list,
src,
group=group.WORLD,
async_op=False):
"""
Scatters a list of tensors to all processes in a group.
Each process will receive exactly one tensor and store its data in the
``tensor`` argument.
Arguments:
tensor (Tensor): Output tensor.
scatter_list (list[Tensor]): List of tensors to scatter. Required only
in the process that is sending the data.
src (int): Source rank. Required in all processes except the one that
is sending the data.
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
_check_single_tensor(tensor, "tensor")
_check_tensor_list(scatter_list, "scatter_list")
if _rank_not_in_group(group):
return
my_rank = get_rank()
if src == my_rank:
if scatter_list is None:
raise RuntimeError("scatter_list is a required argument in "
"scatter source")
input_tensors = [scatter_list]
output_tensors = [tensor]
else:
if scatter_list:
raise RuntimeError("non-empty can be given only to scatter "
"source")
input_tensors = []
output_tensors = [tensor]
opts = ScatterOptions()
opts.rootRank = src
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.scatter(output_tensors, input_tensors, opts)
else:
group_src_rank = _get_group_rank(group, src)
opts.rootRank = group_src_rank
work = group.scatter(output_tensors, input_tensors, opts)
if async_op:
return work
else:
work.wait()
def barrier(group=group.WORLD,
async_op=False):
"""
Synchronizes all processes.
This collective blocks processes until the whole group enters this function,
if async_op is False, or if async work handle is called on wait().
Arguments:
group (ProcessGroup, optional): The process group to work on
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
if _rank_not_in_group(group):
return
if group == GroupMember.WORLD:
_check_default_pg()
work = _default_pg.barrier()
else:
work = group.barrier()
if async_op:
return work
else:
work.wait()
def new_group(ranks=None, timeout=_default_pg_timeout):
"""
Creates a new distributed group.
This function requires that all processes in the main group (i.e. all
processes that are part of the distributed job) enter this function, even
if they are not going to be members of the group. Additionally, groups
should be created in the same order in all processes.
Arguments:
ranks (list[int]): List of ranks of group members.
timeout (timedelta, optional): Timeout for operations executed against
the process group. Default value equals 30 minutes.
This is only applicable for the ``gloo`` backend.
Returns:
A handle of distributed group that can be given to collective calls.
"""
_check_default_pg()
global _pg_group_ranks
global _group_count
global _pg_names
group_name = str(_group_count)
_group_count += 1
if group_name in _pg_names.values():
raise RuntimeError("The specified group name has already been "
"created, please use a different group name")
default_backend, _ = _pg_map[_default_pg]
global_rank = _default_pg.rank()
global_world_size = _default_pg.size()
# checks the input ranks
if ranks is not None:
input_ranks = list(ranks)
group_world_size = len(ranks)
if group_world_size > global_world_size:
raise RuntimeError("the new group's world size should be less or "
"equal to the world size set by "
"init_process_group")
# check ranks' sanity
for rank in ranks:
if rank < 0 or rank >= global_world_size:
raise RuntimeError("The new group's rank should be within the "
"the world_size set by init_process_group")
if global_rank in ranks:
group_rank = ranks.index(global_rank)
else:
group_rank = None
else:
input_ranks = []
ranks = list(range(global_world_size))
group_world_size = global_world_size
group_rank = global_rank
if default_backend == Backend.MPI:
in_group = global_rank in ranks
pg = _new_process_group_helper(group_world_size,
group_rank,
input_ranks,
in_group,
group_name,
timeout=timeout)
else:
# Release ranks not in the group
if global_rank not in ranks:
return GroupMember.NON_GROUP_MEMBER
if default_backend != Backend.MPI:
pg = _new_process_group_helper(group_world_size,
group_rank,
input_ranks,
True,
group_name,
timeout=timeout)
# Create the global rank to group rank mapping
_pg_group_ranks[pg] = {}
if default_backend == Backend.MPI:
_pg_group_ranks[pg] = pg.group_ranks()
else:
for rank in range(global_world_size):
if rank in ranks:
_pg_group_ranks[pg][rank] = ranks.index(rank)
return pg
|
|
from __future__ import unicode_literals, division, absolute_import
from collections import defaultdict
import logging
import re
from datetime import datetime
from sqlalchemy.orm import relationship
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.schema import Table, ForeignKey
from sqlalchemy import Column, Integer, DateTime, Unicode, Index
from flexget import db_schema, options, plugin
from flexget.event import event
from flexget.entry import Entry
from flexget.options import ParseExtrasAction, get_parser
from flexget.utils.sqlalchemy_utils import table_schema, get_index_by_name
from flexget.utils.tools import console, strip_html
from flexget.manager import Session
log = logging.getLogger('archive')
SCHEMA_VER = 0
Base = db_schema.versioned_base('archive', SCHEMA_VER)
archive_tags_table = Table('archive_entry_tags', Base.metadata,
Column('entry_id', Integer, ForeignKey('archive_entry.id')),
Column('tag_id', Integer, ForeignKey('archive_tag.id')),
Index('ix_archive_tags', 'entry_id', 'tag_id'))
archive_sources_table = Table('archive_entry_sources', Base.metadata,
Column('entry_id', Integer, ForeignKey('archive_entry.id')),
Column('source_id', Integer, ForeignKey('archive_source.id')),
Index('ix_archive_sources', 'entry_id', 'source_id'))
class ArchiveEntry(Base):
__tablename__ = 'archive_entry'
__table_args__ = (Index('ix_archive_title_url', 'title', 'url'),)
id = Column(Integer, primary_key=True)
title = Column(Unicode, index=True)
url = Column(Unicode, index=True)
description = Column(Unicode)
task = Column('feed', Unicode) # DEPRECATED, but SQLite does not support drop column
added = Column(DateTime, index=True)
tags = relationship("ArchiveTag", secondary=archive_tags_table)
sources = relationship("ArchiveSource", secondary=archive_sources_table, backref='archive_entries')
def __init__(self):
self.added = datetime.now()
def __str__(self):
return '<ArchiveEntry(title=%s,url=%s,task=%s,added=%s)>' %\
(self.title, self.url, self.task, self.added.strftime('%Y-%m-%d %H:%M'))
class ArchiveTag(Base):
__tablename__ = 'archive_tag'
id = Column(Integer, primary_key=True)
name = Column(Unicode, index=True)
def __init__(self, name):
self.name = name
def __str__(self):
return '<ArchiveTag(id=%s,name=%s)>' % (self.id, self.name)
class ArchiveSource(Base):
__tablename__ = 'archive_source'
id = Column(Integer, primary_key=True)
name = Column(Unicode, index=True)
def __init__(self, name):
self.name = name
def __str__(self):
return '<ArchiveSource(id=%s,name=%s)>' % (self.id, self.name)
def get_source(name, session):
"""
:param string name: Source name
:param session: SQLAlchemy session
:return: ArchiveSource from db or new one
"""
try:
return session.query(ArchiveSource).filter(ArchiveSource.name == name).one()
except NoResultFound:
source = ArchiveSource(name)
return source
def get_tag(name, session):
"""
:param string name: Tag name
:param session: SQLAlchemy session
:return: ArchiveTag from db or new one
"""
try:
return session.query(ArchiveTag).filter(ArchiveTag.name == name).one()
except NoResultFound:
source = ArchiveTag(name)
return source
@db_schema.upgrade('archive')
def upgrade(ver, session):
if ver is None:
# get rid of old index
aet = table_schema('archive_entry', session)
old_index = get_index_by_name(aet, 'archive_feed_title')
if old_index is not None:
log.info('Dropping legacy index (may take a while) ...')
old_index.drop()
# create new index by title, url
new_index = get_index_by_name(Base.metadata.tables['archive_entry'], 'ix_archive_title_url')
if new_index:
log.info('Creating new index (may take a while) ...')
new_index.create(bind=session.connection())
else:
# maybe removed from the model by later migrations?
log.error('Unable to create index `ix_archive_title_url`, removed from the model?')
# TODO: nag about this ?
# This is safe as long as we don't delete the model completely :)
# But generally never use Declarative Models in migrate!
if session.query(ArchiveEntry).first():
log.critical('----------------------------------------------')
log.critical('You should run `--archive consolidate` ')
log.critical('one time when you have time, it may take hours')
log.critical('----------------------------------------------')
ver = 0
return ver
class Archive(object):
"""
Archives all new items into database where they can be later searched and injected.
Stores the entries in the state as they are at the exit phase, this way task cleanup for title
etc is stored into the database. This may however make injecting them back to the original task work
wrongly.
"""
schema = {
'oneOf': [
{'type': 'boolean'},
{'type': 'array', 'items': {'type': 'string'}}
]
}
def on_task_exit(self, task, config):
"""Add new entries into archive. We use exit phase in case the task corrects title or url via some plugins."""
if isinstance(config, bool):
tag_names = []
else:
tag_names = config
tags = []
for tag_name in set(tag_names):
tags.append(get_tag(tag_name, task.session))
count = 0
processed = []
for entry in task.entries + task.rejected + task.failed:
# I think entry can be in multiple of those lists .. not sure though!
if entry in processed:
continue
else:
processed.append(entry)
ae = task.session.query(ArchiveEntry).\
filter(ArchiveEntry.title == entry['title']).\
filter(ArchiveEntry.url == entry['url']).first()
if ae:
# add (missing) sources
source = get_source(task.name, task.session)
if not source in ae.sources:
log.debug('Adding `%s` into `%s` sources' % (task.name, ae))
ae.sources.append(source)
# add (missing) tags
for tag_name in tag_names:
atag = get_tag(tag_name, task.session)
if not atag in ae.tags:
log.debug('Adding tag %s into %s' % (tag_name, ae))
ae.tags.append(atag)
else:
# create new archive entry
ae = ArchiveEntry()
ae.title = entry['title']
ae.url = entry['url']
if 'description' in entry:
ae.description = entry['description']
ae.task = task.name
ae.sources.append(get_source(task.name, task.session))
if tags:
# note, we're extending empty list
ae.tags.extend(tags)
log.debug('Adding `%s` with %i tags to archive' % (ae, len(tags)))
task.session.add(ae)
count += 1
if count:
log.verbose('Added %i new entries to archive' % count)
def on_task_abort(self, task, config):
"""
Archive even on task abort, except if the abort has happened before session
was started.
"""
if task.session is not None:
self.on_task_exit(task, config)
class UrlrewriteArchive(object):
"""
Provides capability to rewrite urls from archive or make searches with discover.
"""
entry_map = {'title': 'title',
'url': 'url',
'description': 'description'}
def validator(self):
from flexget import validator
root = validator.factory()
root.accept('boolean')
root.accept('list').accept('text')
return root
def search(self, entry, config=None):
"""Search plugin API method"""
session = Session()
entries = set()
try:
for query in entry.get('search_strings', [entry['title']]):
log.debug('looking for `%s` config: %s' % (query, config))
for archive_entry in search(session, query, desc=True):
log.debug('rewrite search result: %s' % archive_entry)
entry = Entry()
entry.update_using_map(self.entry_map, archive_entry, ignore_none=True)
if entry.isvalid():
entries.add(entry)
finally:
session.close()
log.debug('found %i entries' % len(entries))
return entries
def consolidate():
"""
Converts previous archive data model to new one.
"""
session = Session()
try:
log.verbose('Checking archive size ...')
count = session.query(ArchiveEntry).count()
log.verbose('Found %i items to migrate, this can be aborted with CTRL-C safely.' % count)
# consolidate old data
from progressbar import ProgressBar, Percentage, Bar, ETA
widgets = ['Process - ', ETA(), ' ', Percentage(), ' ', Bar(left='[', right=']')]
bar = ProgressBar(widgets=widgets, maxval=count).start()
# id's for duplicates
duplicates = []
for index, orig in enumerate(session.query(ArchiveEntry).yield_per(5)):
bar.update(index)
# item already processed
if orig.id in duplicates:
continue
# item already migrated
if orig.sources:
log.info('Database looks like it has already been consolidated, '
'item %s has already sources ...' % orig.title)
session.rollback()
return
# add legacy task to the sources list
orig.sources.append(get_source(orig.task, session))
# remove task, deprecated .. well, let's still keep it ..
#orig.task = None
for dupe in session.query(ArchiveEntry).\
filter(ArchiveEntry.id != orig.id).\
filter(ArchiveEntry.title == orig.title).\
filter(ArchiveEntry.url == orig.url).all():
orig.sources.append(get_source(dupe.task, session))
duplicates.append(dupe.id)
if duplicates:
log.info('Consolidated %i items, removing duplicates ...' % len(duplicates))
for id in duplicates:
session.query(ArchiveEntry).filter(ArchiveEntry.id == id).delete()
session.commit()
log.info('Completed! This does NOT need to be ran again.')
except KeyboardInterrupt:
session.rollback()
log.critical('Aborted, no changes saved')
finally:
session.close()
def tag_source(source_name, tag_names=None):
"""
Tags all archived entries within a source with supplied tags
:param string source_name: Source name
:param list tag_names: List of tag names to add
"""
if not tag_names or tag_names is None:
return
session = Session()
try:
# check that source exists
source = session.query(ArchiveSource).filter(ArchiveSource.name == source_name).first()
if not source:
log.critical('Source `%s` does not exists' % source_name)
srcs = ', '.join([s.name for s in session.query(ArchiveSource).order_by(ArchiveSource.name)])
if srcs:
log.info('Known sources: %s' % srcs)
return
# construct tags list
tags = []
for tag_name in tag_names:
tags.append(get_tag(tag_name, session))
# tag 'em
log.verbose('Please wait while adding tags %s ...' % (', '.join(tag_names)))
for a in session.query(ArchiveEntry).\
filter(ArchiveEntry.sources.any(name=source_name)).yield_per(5):
a.tags.extend(tags)
finally:
session.commit()
session.close()
# API function, was also used from webui .. needs to be rethinked
def search(session, text, tags=None, sources=None, desc=False):
"""
Search from the archive.
:param string text: Search text, spaces and dots are tried to be ignored.
:param Session session: SQLAlchemy session, should not be closed while iterating results.
:param list tags: Optional list of acceptable tags
:param list sources: Optional list of acceptable sources
:param bool desc: Sort results descending
:return: ArchiveEntries responding to query
"""
keyword = unicode(text).replace(' ', '%').replace('.', '%')
# clean the text from any unwanted regexp, convert spaces and keep dots as dots
normalized_re = re.escape(text.replace('.', ' ')).replace('\\ ', ' ').replace(' ', '.')
find_re = re.compile(normalized_re, re.IGNORECASE)
query = session.query(ArchiveEntry).filter(ArchiveEntry.title.like('%' + keyword + '%'))
if tags:
query = query.filter(ArchiveEntry.tags.any(ArchiveTag.name.in_(tags)))
if sources:
query = query.filter(ArchiveEntry.sources.any(ArchiveSource.name.in_(sources)))
if desc:
query = query.order_by(ArchiveEntry.added.desc())
else:
query = query.order_by(ArchiveEntry.added.asc())
for a in query.yield_per(5):
if find_re.match(a.title):
yield a
else:
log.trace('title %s is too wide match' % a.title)
def cli_search(options):
search_term = ' '.join(options.keywords)
tags = options.tags
sources = options.sources
def print_ae(ae):
diff = datetime.now() - ae.added
console('ID: %-6s | Title: %s\nAdded: %s (%d days ago)\nURL: %s' %
(ae.id, ae.title, ae.added, diff.days, ae.url))
source_names = ', '.join([s.name for s in ae.sources])
tag_names = ', '.join([t.name for t in ae.tags])
console('Source(s): %s | Tag(s): %s' % (source_names or 'N/A', tag_names or 'N/A'))
if ae.description:
console('Description: %s' % strip_html(ae.description))
console('---')
session = Session()
try:
console('Searching: %s' % search_term)
if tags:
console('Tags: %s' % ', '.join(tags))
if sources:
console('Sources: %s' % ', '.join(sources))
console('Please wait...')
console('')
results = False
for ae in search(session, search_term, tags=tags, sources=sources):
print_ae(ae)
results = True
if not results:
console('No results found.')
finally:
session.close()
def cli_inject(manager, options):
log.debug('Finding inject content')
inject_entries = defaultdict(list)
session = Session()
try:
for id in options.ids:
archive_entry = session.query(ArchiveEntry).get(id)
# not found
if not archive_entry:
log.critical('There\'s no archived item with ID `%s`' % id)
continue
# find if there is no longer any task within sources
if not any(source.name in manager.tasks for source in archive_entry.sources):
log.error('None of sources (%s) exists anymore, cannot inject `%s` from archive!' %
(', '.join([s.name for s in archive_entry.sources]), archive_entry.title))
continue
# update list of tasks to be injected
for source in archive_entry.sources:
inject_entries[source.name].append(archive_entry)
finally:
session.close()
for task_name in inject_entries:
entries = []
for inject_entry in inject_entries[task_name]:
log.info('Injecting from archive `%s`' % inject_entry.title)
entry = Entry(inject_entry.title, inject_entry.url)
if inject_entry.description:
entry['description'] = inject_entry.description
if options.immortal:
log.debug('Injecting as immortal')
entry['immortal'] = True
entry['accepted_by'] = 'archive inject'
entry.accept('injected')
entries.append(entry)
manager.scheduler.execute(options={'inject': entries, 'tasks': [task_name]})
with manager.acquire_lock():
manager.scheduler.start(run_schedules=False)
manager.shutdown()
def do_cli(manager, options):
action = options.archive_action
if action == 'tag-source':
tag_source(options.source, tag_names=options.tags)
elif action == 'consolidate':
consolidate()
elif action == 'search':
cli_search(options)
elif action == 'inject':
cli_inject(manager, options)
@event('plugin.register')
def register_plugin():
plugin.register(Archive, 'archive', api_ver=2)
plugin.register(UrlrewriteArchive, 'flexget_archive', groups=['search'], api_ver=2)
@event('options.register')
def register_parser_arguments():
archive_parser = options.register_command('archive', do_cli, help='search and manipulate the archive database')
archive_parser.add_subparsers(title='Actions', metavar='<action>', dest='archive_action')
# Default usage shows the positional arguments after the optional ones, override usage to fix it
search_parser = archive_parser.add_subparser('search', help='search from the archive',
usage='%(prog)s [-h] <keyword> [<keyword> ...] [optional arguments]')
search_parser.add_argument('keywords', metavar='<keyword>', nargs='+', help='keyword(s) to search for')
search_parser.add_argument('--tags', metavar='TAG', nargs='+', default=[], help='tag(s) to search within')
search_parser.add_argument('--sources', metavar='SOURCE', nargs='+', default=[], help='source(s) to search within')
inject_parser = archive_parser.add_subparser('inject', help='inject entries from the archive back into tasks')
inject_parser.add_argument('ids', nargs='+', type=int, metavar='ID', help='archive ID of an item to inject')
inject_parser.add_argument('--immortal', action='store_true', help='injected entries will not be able to be '
'rejected by any plugins')
exec_group = inject_parser.add_argument_group('execute arguments')
exec_group.add_argument('execute_options', action=ParseExtrasAction, parser=get_parser('execute'))
tag_parser = archive_parser.add_subparser('tag-source', help='tag all archived entries within a given source')
tag_parser.add_argument('source', metavar='<source>', help='the source whose entries you would like to tag')
tag_parser.add_argument('tags', nargs='+', metavar='<tag>',
help='the tag(s) you would like to apply to the entries')
archive_parser.add_subparser('consolidate', help='migrate old archive data to new model, may take a long time')
|
|
"""
Reference implementation for sending data to the "mobile" part of DSS.
Open video file/stream and send to 'mobile_streaming' the same way
it is implemented on an Android system.
Requires FFmpeg (with libx264 libfdk_aac).
Might use FFprobe to send metadata
"""
import sys
import re
import socket
import time
import tempfile
import queue
import pymongo
from os.path import join
try:
import _python_base
except ImportError:
from . import _python_base
from dss import storage
from dss.config import config
from dss.tools import ffmpeg, process, thread
from dss.mobile.handler import MediaHandler
from dss.mobile.enum import DataContent, ContentType
from dss.mobile.processing.data import DataProc
class Watcher(thread.Thread):
status = False
def __init__(self, cmd, *args, **kw):
super(Watcher, self).__init__(*args, **kw)
self.cmd = cmd
self.proc = None
self.cond = thread.Condition()
def run(self):
self.status = True
self.name = 'Process Watcher'
try:
self.watch_proc()
except Exception:
self.status = False
raise
def watch_proc(self):
with process.Popen(self.cmd, stderr=process.PIPE,
universal_newlines=True) as self.proc:
while self.proc.poll() is None:
line = self.proc.stderr.readline().rstrip()
content = None
try:
data = re.findall(r'(\w+)=\s*(\S+)\s', line)
if len(data) > 2:
content = dict(data)
except ValueError:
pass
# TODO: use `content` to show percentage of processing.
with self.cond:
self.cond.notify_all()
print('Input video has finished.')
def stop(self):
try:
self.proc.kill()
except Exception:
pass
self.join()
class MockMediaHandler(MediaHandler):
def __init__(self, socket):
self.request = socket
self.setup()
class Sender(thread.Thread):
def __init__(self, addr, *args, **kw):
super(Sender, self).__init__(*args, **kw)
self.queue = queue.Queue()
self.running = False
self.socket = None
self.handler = None
self.addr = addr
self.cond = thread.Condition()
def run(self):
self.running = True
self.socket = socket.socket()
self.socket.connect(self.addr)
self.handler = MockMediaHandler(self.socket)
while self.running:
data = self.queue.get()
if data is None:
break
type, data, kw = data
self.handler.write_data(type, data, **kw)
with self.cond:
self.cond.notify_all()
def insert(self, type, value, **kw):
self.queue.put((type, value, kw))
def stop(self):
self.running = False
self.queue.empty()
self.queue.put(None)
self.join()
def read_loop(video_file, audio_file, sender, watcher):
with open(video_file, 'rb') as fv, open(audio_file, 'rb') as fa:
while watcher.status:
v = fv.read()
if v:
sender.insert(DataContent.video, v)
a = fa.read()
if a:
sender.insert(DataContent.audio, a)
def main():
client = pymongo.MongoClient()
database_name = 'dss_script'
collection_name = 'mobile_send'
db = storage.KeyValueStorage(collection_name, client[database_name])
try:
file = sys.argv[1]
except IndexError:
print('Missing file/stream name as first argument.')
print('Usage:\n\t', './mobile_send', 'FILE_NAME', '[SERVER_IP_ADDRESS]')
sys.exit(-1)
try:
addr = sys.argv[2]
except IndexError:
addr = None
tmp = tempfile.mkdtemp()
video_file = join(tmp, 'video.ts')
audio_file = join(tmp, 'audio.ts')
cmd = ffmpeg.cmd_outputs(
'-re', file, '-f mpegts',
['-an -c:v libx264', '-vn -c:a libfdk_aac'],
[video_file, audio_file],
add_probe=False,
)
#print(' '.join(cmd))
if addr is None:
addr = config['local']['addr']
port = config.getint('local', 'tcp_port')
sender = Sender((addr, port)).start()
watcher = Watcher(cmd).start()
try:
stream_id = db.stream_id
except AttributeError:
stream_id = ''
sender.insert(ContentType.meta, {'id': stream_id}, as_metadata=True)
with sender.cond:
sender.cond.wait()
type, data = sender.handler.read_data()
type, data = DataProc.decode_data(data)
if 'id' in data:
db.stream_id = data['id']
print('Transmission started')
print('ID:', db.stream_id)
time.sleep(2)
try:
# Run loop on other thread because it will block!
_ = thread.Thread(
read_loop,
args=(audio_file, video_file, sender, watcher)
).start()
with watcher.cond:
watcher.cond.wait()
except KeyboardInterrupt:
watcher.stop()
sender.stop()
print('Exiting...')
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2013 Paul Tagliamonte <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from inspect import getargspec, formatargspec
from hy.models import replace_hy_obj, wrap_value
from hy.models.expression import HyExpression
from hy.models.string import HyString
from hy.errors import HyTypeError, HyMacroExpansionError
from collections import defaultdict
CORE_MACROS = [
"hy.core.bootstrap",
]
EXTRA_MACROS = [
"hy.core.macros",
]
_hy_macros = defaultdict(dict)
_hy_reader = defaultdict(dict)
def macro(name):
"""Decorator to define a macro called `name`.
This stores the macro `name` in the namespace for the module where it is
defined.
If the module where it is defined is in `hy.core`, then the macro is stored
in the default `None` namespace.
This function is called from the `defmacro` special form in the compiler.
"""
def _(fn):
module_name = fn.__module__
if module_name.startswith("hy.core"):
module_name = None
_hy_macros[module_name][name] = fn
return fn
return _
def reader(name):
"""Decorator to define a reader macro called `name`.
This stores the macro `name` in the namespace for the module where it is
defined.
If the module where it is defined is in `hy.core`, then the macro is stored
in the default `None` namespace.
This function is called from the `defreader` special form in the compiler.
"""
def _(fn):
module_name = fn.__module__
if module_name.startswith("hy.core"):
module_name = None
_hy_reader[module_name][name] = fn
return fn
return _
def require(source_module, target_module):
"""Load the macros from `source_module` in the namespace of
`target_module`.
This function is called from the `require` special form in the compiler.
"""
macros = _hy_macros[source_module]
refs = _hy_macros[target_module]
for name, macro in macros.items():
refs[name] = macro
readers = _hy_reader[source_module]
reader_refs = _hy_reader[target_module]
for name, reader in readers.items():
reader_refs[name] = reader
def load_macros(module_name):
"""Load the hy builtin macros for module `module_name`.
Modules from `hy.core` can only use the macros from CORE_MACROS.
Other modules get the macros from CORE_MACROS and EXTRA_MACROS.
"""
def _import(module, module_name=module_name):
"__import__ a module, avoiding recursions"
if module != module_name:
__import__(module)
for module in CORE_MACROS:
_import(module)
if module_name.startswith("hy.core"):
return
for module in EXTRA_MACROS:
_import(module)
def make_empty_fn_copy(fn):
argspec = getargspec(fn)
formatted_args = formatargspec(*argspec)
fn_str = 'lambda {}: None'.format(
formatted_args.lstrip('(').rstrip(')'))
empty_fn = eval(fn_str)
return empty_fn
def macroexpand(tree, module_name):
"""Expand the toplevel macros for the `tree`.
Load the macros from the given `module_name`, then expand the (top-level)
macros in `tree` until it stops changing.
"""
load_macros(module_name)
old = None
while old != tree:
old = tree
tree = macroexpand_1(tree, module_name)
return tree
def macroexpand_1(tree, module_name):
"""Expand the toplevel macro from `tree` once, in the context of
`module_name`."""
if isinstance(tree, HyExpression):
if tree == []:
return tree
fn = tree[0]
if fn in ("quote", "quasiquote"):
return tree
ntree = HyExpression(tree[:])
ntree.replace(tree)
if isinstance(fn, HyString):
m = _hy_macros[module_name].get(fn)
if m is None:
m = _hy_macros[None].get(fn)
if m is not None:
try:
m_copy = make_empty_fn_copy(m)
m_copy(*ntree[1:])
except TypeError as e:
msg = "expanding `" + str(tree[0]) + "': "
msg += str(e).replace("<lambda>()", "", 1).strip()
raise HyMacroExpansionError(tree, msg)
try:
obj = wrap_value(m(*ntree[1:]))
except HyTypeError as e:
if e.expression is None:
e.expression = tree
raise
except Exception as e:
msg = "expanding `" + str(tree[0]) + "': " + repr(e)
raise HyMacroExpansionError(tree, msg)
replace_hy_obj(obj, tree)
return obj
return ntree
return tree
def reader_macroexpand(char, tree, module_name):
"""Expand the reader macro "char" with argument `tree`."""
load_macros(module_name)
reader_macro = _hy_reader[module_name].get(char)
if reader_macro is None:
try:
reader_macro = _hy_reader[None][char]
except KeyError:
raise HyTypeError(
char,
"`{0}' is not a defined reader macro.".format(char)
)
expr = reader_macro(tree)
return replace_hy_obj(wrap_value(expr), tree)
|
|
# This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for autotagging functionality.
"""
import os
import shutil
import re
import copy
import _common
from _common import unittest
from beets import autotag
from beets.autotag import match
from beets.library import Item
from beets.util import plurality
from beets.autotag import AlbumInfo, TrackInfo
from beets import config
class PluralityTest(unittest.TestCase):
def test_plurality_consensus(self):
objs = [1, 1, 1, 1]
obj, freq = plurality(objs)
self.assertEqual(obj, 1)
self.assertEqual(freq, 4)
def test_plurality_near_consensus(self):
objs = [1, 1, 2, 1]
obj, freq = plurality(objs)
self.assertEqual(obj, 1)
self.assertEqual(freq, 3)
def test_plurality_conflict(self):
objs = [1, 1, 2, 2, 3]
obj, freq = plurality(objs)
self.assert_(obj in (1, 2))
self.assertEqual(freq, 2)
def test_plurality_empty_sequence_raises_error(self):
with self.assertRaises(ValueError):
plurality([])
def test_current_metadata_finds_pluralities(self):
items = [Item({'artist': 'The Beetles', 'album': 'The White Album'}),
Item({'artist': 'The Beatles', 'album': 'The White Album'}),
Item({'artist': 'The Beatles', 'album': 'Teh White Album'})]
l_artist, l_album, artist_consensus = match.current_metadata(items)
self.assertEqual(l_artist, 'The Beatles')
self.assertEqual(l_album, 'The White Album')
self.assertFalse(artist_consensus)
def test_current_metadata_artist_consensus(self):
items = [Item({'artist': 'The Beatles', 'album': 'The White Album'}),
Item({'artist': 'The Beatles', 'album': 'The White Album'}),
Item({'artist': 'The Beatles', 'album': 'Teh White Album'})]
l_artist, l_album, artist_consensus = match.current_metadata(items)
self.assertEqual(l_artist, 'The Beatles')
self.assertEqual(l_album, 'The White Album')
self.assertTrue(artist_consensus)
def test_albumartist_consensus(self):
items = [Item({'artist': 'tartist1', 'album': 'album',
'albumartist': 'aartist'}),
Item({'artist': 'tartist2', 'album': 'album',
'albumartist': 'aartist'}),
Item({'artist': 'tartist3', 'album': 'album',
'albumartist': 'aartist'})]
l_artist, l_album, artist_consensus = match.current_metadata(items)
self.assertEqual(l_artist, 'aartist')
self.assertFalse(artist_consensus)
def _make_item(title, track, artist=u'some artist'):
return Item({
'title': title, 'track': track,
'artist': artist, 'album': u'some album',
'length': 1,
'mb_trackid': '', 'mb_albumid': '', 'mb_artistid': '',
})
def _make_trackinfo():
return [
TrackInfo(u'one', None, u'some artist', length=1, index=1),
TrackInfo(u'two', None, u'some artist', length=1, index=2),
TrackInfo(u'three', None, u'some artist', length=1, index=3),
]
class TrackDistanceTest(unittest.TestCase):
def test_identical_tracks(self):
item = _make_item(u'one', 1)
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertEqual(dist, 0.0)
def test_different_title(self):
item = _make_item(u'foo', 1)
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertNotEqual(dist, 0.0)
def test_different_artist(self):
item = _make_item(u'one', 1)
item.artist = u'foo'
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertNotEqual(dist, 0.0)
def test_various_artists_tolerated(self):
item = _make_item(u'one', 1)
item.artist = u'Various Artists'
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertEqual(dist, 0.0)
class AlbumDistanceTest(unittest.TestCase):
def _mapping(self, items, info):
out = {}
for i, t in zip(items, info.tracks):
out[i] = t
return out
def _dist(self, items, info):
return match.distance(items, info, self._mapping(items, info))
def test_identical_albums(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist = u'some artist',
album = u'some album',
tracks = _make_trackinfo(),
va = False,
album_id = None, artist_id = None,
)
self.assertEqual(self._dist(items, info), 0)
def test_incomplete_album(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist = u'some artist',
album = u'some album',
tracks = _make_trackinfo(),
va = False,
album_id = None, artist_id = None,
)
dist = self._dist(items, info)
self.assertNotEqual(dist, 0)
# Make sure the distance is not too great
self.assertTrue(dist < 0.2)
def test_global_artists_differ(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist = u'someone else',
album = u'some album',
tracks = _make_trackinfo(),
va = False,
album_id = None, artist_id = None,
)
self.assertNotEqual(self._dist(items, info), 0)
def test_comp_track_artists_match(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist = u'should be ignored',
album = u'some album',
tracks = _make_trackinfo(),
va = True,
album_id = None, artist_id = None,
)
self.assertEqual(self._dist(items, info), 0)
def test_comp_no_track_artists(self):
# Some VA releases don't have track artists (incomplete metadata).
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist = u'should be ignored',
album = u'some album',
tracks = _make_trackinfo(),
va = True,
album_id = None, artist_id = None,
)
info.tracks[0].artist = None
info.tracks[1].artist = None
info.tracks[2].artist = None
self.assertEqual(self._dist(items, info), 0)
def test_comp_track_artists_do_not_match(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2, u'someone else'))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist = u'some artist',
album = u'some album',
tracks = _make_trackinfo(),
va = True,
album_id = None, artist_id = None,
)
self.assertNotEqual(self._dist(items, info), 0)
def test_tracks_out_of_order(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'three', 2))
items.append(_make_item(u'two', 3))
info = AlbumInfo(
artist = u'some artist',
album = u'some album',
tracks = _make_trackinfo(),
va = False,
album_id = None, artist_id = None,
)
dist = self._dist(items, info)
self.assertTrue(0 < dist < 0.2)
def test_two_medium_release(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist = u'some artist',
album = u'some album',
tracks = _make_trackinfo(),
va = False,
album_id = None, artist_id = None,
)
info.tracks[0].medium_index = 1
info.tracks[1].medium_index = 2
info.tracks[2].medium_index = 1
dist = self._dist(items, info)
self.assertEqual(dist, 0)
def test_per_medium_track_numbers(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 1))
info = AlbumInfo(
artist = u'some artist',
album = u'some album',
tracks = _make_trackinfo(),
va = False,
album_id = None, artist_id = None,
)
info.tracks[0].medium_index = 1
info.tracks[1].medium_index = 2
info.tracks[2].medium_index = 1
dist = self._dist(items, info)
self.assertEqual(dist, 0)
def _mkmp3(path):
shutil.copyfile(os.path.join(_common.RSRC, 'min.mp3'), path)
class AlbumsInDirTest(_common.TestCase):
def setUp(self):
super(AlbumsInDirTest, self).setUp()
# create a directory structure for testing
self.base = os.path.abspath(os.path.join(self.temp_dir, 'tempdir'))
os.mkdir(self.base)
os.mkdir(os.path.join(self.base, 'album1'))
os.mkdir(os.path.join(self.base, 'album2'))
os.mkdir(os.path.join(self.base, 'more'))
os.mkdir(os.path.join(self.base, 'more', 'album3'))
os.mkdir(os.path.join(self.base, 'more', 'album4'))
_mkmp3(os.path.join(self.base, 'album1', 'album1song1.mp3'))
_mkmp3(os.path.join(self.base, 'album1', 'album1song2.mp3'))
_mkmp3(os.path.join(self.base, 'album2', 'album2song.mp3'))
_mkmp3(os.path.join(self.base, 'more', 'album3', 'album3song.mp3'))
_mkmp3(os.path.join(self.base, 'more', 'album4', 'album4song.mp3'))
def test_finds_all_albums(self):
albums = list(autotag.albums_in_dir(self.base))
self.assertEqual(len(albums), 4)
def test_separates_contents(self):
found = []
for _, album in autotag.albums_in_dir(self.base):
found.append(re.search(r'album(.)song', album[0].path).group(1))
self.assertTrue('1' in found)
self.assertTrue('2' in found)
self.assertTrue('3' in found)
self.assertTrue('4' in found)
def test_finds_multiple_songs(self):
for _, album in autotag.albums_in_dir(self.base):
n = re.search(r'album(.)song', album[0].path).group(1)
if n == '1':
self.assertEqual(len(album), 2)
else:
self.assertEqual(len(album), 1)
class MultiDiscAlbumsInDirTest(_common.TestCase):
def setUp(self):
super(MultiDiscAlbumsInDirTest, self).setUp()
self.base = os.path.abspath(os.path.join(self.temp_dir, 'tempdir'))
os.mkdir(self.base)
self.dirs = [
# Nested album, multiple subdirs.
# Also, false positive marker in root dir, and subtitle for disc 3.
os.path.join(self.base, 'ABCD1234'),
os.path.join(self.base, 'ABCD1234', 'cd 1'),
os.path.join(self.base, 'ABCD1234', 'cd 3 - bonus'),
# Nested album, single subdir.
# Also, punctuation between marker and disc number.
os.path.join(self.base, 'album'),
os.path.join(self.base, 'album', 'cd _ 1'),
# Flattened album, case typo.
# Also, false positive marker in parent dir.
os.path.join(self.base, 'artist [CD5]'),
os.path.join(self.base, 'artist [CD5]', 'CAT disc 1'),
os.path.join(self.base, 'artist [CD5]', 'CAt disc 2'),
# Single disc album, sorted between CAT discs.
os.path.join(self.base, 'artist [CD5]', 'CATS'),
]
self.files = [
os.path.join(self.base, 'ABCD1234', 'cd 1', 'song1.mp3'),
os.path.join(self.base, 'ABCD1234', 'cd 3 - bonus', 'song2.mp3'),
os.path.join(self.base, 'ABCD1234', 'cd 3 - bonus', 'song3.mp3'),
os.path.join(self.base, 'album', 'cd _ 1', 'song4.mp3'),
os.path.join(self.base, 'artist [CD5]', 'CAT disc 1', 'song5.mp3'),
os.path.join(self.base, 'artist [CD5]', 'CAt disc 2', 'song6.mp3'),
os.path.join(self.base, 'artist [CD5]', 'CATS', 'song7.mp3'),
]
for path in self.dirs:
os.mkdir(path)
for path in self.files:
_mkmp3(path)
def test_coalesce_nested_album_multiple_subdirs(self):
albums = list(autotag.albums_in_dir(self.base))
self.assertEquals(len(albums), 4)
root, items = albums[0]
self.assertEquals(root, self.dirs[0:3])
self.assertEquals(len(items), 3)
def test_coalesce_nested_album_single_subdir(self):
albums = list(autotag.albums_in_dir(self.base))
root, items = albums[1]
self.assertEquals(root, self.dirs[3:5])
self.assertEquals(len(items), 1)
def test_coalesce_flattened_album_case_typo(self):
albums = list(autotag.albums_in_dir(self.base))
root, items = albums[2]
self.assertEquals(root, self.dirs[6:8])
self.assertEquals(len(items), 2)
def test_single_disc_album(self):
albums = list(autotag.albums_in_dir(self.base))
root, items = albums[3]
self.assertEquals(root, self.dirs[8:])
self.assertEquals(len(items), 1)
def test_do_not_yield_empty_album(self):
# Remove all the MP3s.
for path in self.files:
os.remove(path)
albums = list(autotag.albums_in_dir(self.base))
self.assertEquals(len(albums), 0)
class AssignmentTest(unittest.TestCase):
def item(self, title, track):
return Item({
'title': title, 'track': track,
'mb_trackid': '', 'mb_albumid': '', 'mb_artistid': '',
})
def test_reorder_when_track_numbers_incorrect(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'three', 2))
items.append(self.item(u'two', 3))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'two', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[1]: trackinfo[2],
items[2]: trackinfo[1],
})
def test_order_works_with_invalid_track_numbers(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'three', 1))
items.append(self.item(u'two', 1))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'two', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[1]: trackinfo[2],
items[2]: trackinfo[1],
})
def test_order_works_with_missing_tracks(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'three', 3))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'two', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [trackinfo[1]])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[1]: trackinfo[2],
})
def test_order_works_with_extra_tracks(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'two', 2))
items.append(self.item(u'three', 3))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [items[1]])
self.assertEqual(extra_tracks, [])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[2]: trackinfo[1],
})
def test_order_works_when_track_names_are_entirely_wrong(self):
# A real-world test case contributed by a user.
def item(i, length):
return Item({
'artist': u'ben harper',
'album': u'burn to shine',
'title': u'ben harper - Burn to Shine ' + str(i),
'track': i,
'length': length,
'mb_trackid': '', 'mb_albumid': '', 'mb_artistid': '',
})
items = []
items.append(item(1, 241.37243007106997))
items.append(item(2, 342.27781704375036))
items.append(item(3, 245.95070222338137))
items.append(item(4, 472.87662515485437))
items.append(item(5, 279.1759535763187))
items.append(item(6, 270.33333768012))
items.append(item(7, 247.83435613222923))
items.append(item(8, 216.54504531525072))
items.append(item(9, 225.72775379800484))
items.append(item(10, 317.7643606963552))
items.append(item(11, 243.57001238834192))
items.append(item(12, 186.45916150485752))
def info(index, title, length):
return TrackInfo(title, None, length=length, index=index)
trackinfo = []
trackinfo.append(info(1, u'Alone', 238.893))
trackinfo.append(info(2, u'The Woman in You', 341.44))
trackinfo.append(info(3, u'Less', 245.59999999999999))
trackinfo.append(info(4, u'Two Hands of a Prayer', 470.49299999999999))
trackinfo.append(info(5, u'Please Bleed', 277.86599999999999))
trackinfo.append(info(6, u'Suzie Blue', 269.30599999999998))
trackinfo.append(info(7, u'Steal My Kisses', 245.36000000000001))
trackinfo.append(info(8, u'Burn to Shine', 214.90600000000001))
trackinfo.append(info(9, u'Show Me a Little Shame', 224.09299999999999))
trackinfo.append(info(10, u'Forgiven', 317.19999999999999))
trackinfo.append(info(11, u'Beloved One', 243.733))
trackinfo.append(info(12, u'In the Lord\'s Arms', 186.13300000000001))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [])
for item, info in mapping.iteritems():
self.assertEqual(items.index(item), trackinfo.index(info))
class ApplyTestUtil(object):
def _apply(self, info=None, per_disc_numbering=False):
info = info or self.info
mapping = {}
for i, t in zip(self.items, info.tracks):
mapping[i] = t
config['per_disc_numbering'] = per_disc_numbering
autotag.apply_metadata(info, mapping)
class ApplyTest(_common.TestCase, ApplyTestUtil):
def setUp(self):
super(ApplyTest, self).setUp()
self.items = []
self.items.append(Item({}))
self.items.append(Item({}))
trackinfo = []
trackinfo.append(TrackInfo(
u'oneNew', 'dfa939ec-118c-4d0f-84a0-60f3d1e6522c', medium=1,
medium_index=1, artist_credit='trackArtistCredit',
artist_sort='trackArtistSort', index=1,
))
trackinfo.append(TrackInfo(u'twoNew',
'40130ed1-a27c-42fd-a328-1ebefb6caef4',
medium=2, medium_index=1, index=2))
self.info = AlbumInfo(
tracks = trackinfo,
artist = u'artistNew',
album = u'albumNew',
album_id = '7edb51cb-77d6-4416-a23c-3a8c2994a2c7',
artist_id = 'a6623d39-2d8e-4f70-8242-0a9553b91e50',
artist_credit = u'albumArtistCredit',
artist_sort = u'albumArtistSort',
albumtype = u'album',
va = False,
mediums = 2,
)
def test_titles_applied(self):
self._apply()
self.assertEqual(self.items[0].title, 'oneNew')
self.assertEqual(self.items[1].title, 'twoNew')
def test_album_and_artist_applied_to_all(self):
self._apply()
self.assertEqual(self.items[0].album, 'albumNew')
self.assertEqual(self.items[1].album, 'albumNew')
self.assertEqual(self.items[0].artist, 'artistNew')
self.assertEqual(self.items[1].artist, 'artistNew')
def test_track_index_applied(self):
self._apply()
self.assertEqual(self.items[0].track, 1)
self.assertEqual(self.items[1].track, 2)
def test_track_total_applied(self):
self._apply()
self.assertEqual(self.items[0].tracktotal, 2)
self.assertEqual(self.items[1].tracktotal, 2)
def test_disc_index_applied(self):
self._apply()
self.assertEqual(self.items[0].disc, 1)
self.assertEqual(self.items[1].disc, 2)
def test_disc_total_applied(self):
self._apply()
self.assertEqual(self.items[0].disctotal, 2)
self.assertEqual(self.items[1].disctotal, 2)
def test_per_disc_numbering(self):
self._apply(per_disc_numbering=True)
self.assertEqual(self.items[0].track, 1)
self.assertEqual(self.items[1].track, 1)
def test_mb_trackid_applied(self):
self._apply()
self.assertEqual(self.items[0].mb_trackid,
'dfa939ec-118c-4d0f-84a0-60f3d1e6522c')
self.assertEqual(self.items[1].mb_trackid,
'40130ed1-a27c-42fd-a328-1ebefb6caef4')
def test_mb_albumid_and_artistid_applied(self):
self._apply()
for item in self.items:
self.assertEqual(item.mb_albumid,
'7edb51cb-77d6-4416-a23c-3a8c2994a2c7')
self.assertEqual(item.mb_artistid,
'a6623d39-2d8e-4f70-8242-0a9553b91e50')
def test_albumtype_applied(self):
self._apply()
self.assertEqual(self.items[0].albumtype, 'album')
self.assertEqual(self.items[1].albumtype, 'album')
def test_album_artist_overrides_empty_track_artist(self):
my_info = copy.deepcopy(self.info)
self._apply(info=my_info)
self.assertEqual(self.items[0].artist, 'artistNew')
self.assertEqual(self.items[0].artist, 'artistNew')
def test_album_artist_overriden_by_nonempty_track_artist(self):
my_info = copy.deepcopy(self.info)
my_info.tracks[0].artist = 'artist1!'
my_info.tracks[1].artist = 'artist2!'
self._apply(info=my_info)
self.assertEqual(self.items[0].artist, 'artist1!')
self.assertEqual(self.items[1].artist, 'artist2!')
def test_artist_credit_applied(self):
self._apply()
self.assertEqual(self.items[0].albumartist_credit, 'albumArtistCredit')
self.assertEqual(self.items[0].artist_credit, 'trackArtistCredit')
self.assertEqual(self.items[1].albumartist_credit, 'albumArtistCredit')
self.assertEqual(self.items[1].artist_credit, 'albumArtistCredit')
def test_artist_sort_applied(self):
self._apply()
self.assertEqual(self.items[0].albumartist_sort, 'albumArtistSort')
self.assertEqual(self.items[0].artist_sort, 'trackArtistSort')
self.assertEqual(self.items[1].albumartist_sort, 'albumArtistSort')
self.assertEqual(self.items[1].artist_sort, 'albumArtistSort')
class ApplyCompilationTest(_common.TestCase, ApplyTestUtil):
def setUp(self):
super(ApplyCompilationTest, self).setUp()
self.items = []
self.items.append(Item({}))
self.items.append(Item({}))
trackinfo = []
trackinfo.append(TrackInfo(
u'oneNew',
'dfa939ec-118c-4d0f-84a0-60f3d1e6522c',
u'artistOneNew',
'a05686fc-9db2-4c23-b99e-77f5db3e5282',
index=1,
))
trackinfo.append(TrackInfo(
u'twoNew',
'40130ed1-a27c-42fd-a328-1ebefb6caef4',
u'artistTwoNew',
'80b3cf5e-18fe-4c59-98c7-e5bb87210710',
index=2,
))
self.info = AlbumInfo(
tracks = trackinfo,
artist = u'variousNew',
album = u'albumNew',
album_id = '3b69ea40-39b8-487f-8818-04b6eff8c21a',
artist_id = '89ad4ac3-39f7-470e-963a-56509c546377',
albumtype = u'compilation',
va = False,
)
def test_album_and_track_artists_separate(self):
self._apply()
self.assertEqual(self.items[0].artist, 'artistOneNew')
self.assertEqual(self.items[1].artist, 'artistTwoNew')
self.assertEqual(self.items[0].albumartist, 'variousNew')
self.assertEqual(self.items[1].albumartist, 'variousNew')
def test_mb_albumartistid_applied(self):
self._apply()
self.assertEqual(self.items[0].mb_albumartistid,
'89ad4ac3-39f7-470e-963a-56509c546377')
self.assertEqual(self.items[1].mb_albumartistid,
'89ad4ac3-39f7-470e-963a-56509c546377')
self.assertEqual(self.items[0].mb_artistid,
'a05686fc-9db2-4c23-b99e-77f5db3e5282')
self.assertEqual(self.items[1].mb_artistid,
'80b3cf5e-18fe-4c59-98c7-e5bb87210710')
def test_va_flag_cleared_does_not_set_comp(self):
self._apply()
self.assertFalse(self.items[0].comp)
self.assertFalse(self.items[1].comp)
def test_va_flag_sets_comp(self):
va_info = copy.deepcopy(self.info)
va_info.va = True
self._apply(info=va_info)
self.assertTrue(self.items[0].comp)
self.assertTrue(self.items[1].comp)
class StringDistanceTest(unittest.TestCase):
def test_equal_strings(self):
dist = match.string_dist(u'Some String', u'Some String')
self.assertEqual(dist, 0.0)
def test_different_strings(self):
dist = match.string_dist(u'Some String', u'Totally Different')
self.assertNotEqual(dist, 0.0)
def test_punctuation_ignored(self):
dist = match.string_dist(u'Some String', u'Some.String!')
self.assertEqual(dist, 0.0)
def test_case_ignored(self):
dist = match.string_dist(u'Some String', u'sOME sTring')
self.assertEqual(dist, 0.0)
def test_leading_the_has_lower_weight(self):
dist1 = match.string_dist(u'XXX Band Name', u'Band Name')
dist2 = match.string_dist(u'The Band Name', u'Band Name')
self.assert_(dist2 < dist1)
def test_parens_have_lower_weight(self):
dist1 = match.string_dist(u'One .Two.', u'One')
dist2 = match.string_dist(u'One (Two)', u'One')
self.assert_(dist2 < dist1)
def test_brackets_have_lower_weight(self):
dist1 = match.string_dist(u'One .Two.', u'One')
dist2 = match.string_dist(u'One [Two]', u'One')
self.assert_(dist2 < dist1)
def test_ep_label_has_zero_weight(self):
dist = match.string_dist(u'My Song (EP)', u'My Song')
self.assertEqual(dist, 0.0)
def test_featured_has_lower_weight(self):
dist1 = match.string_dist(u'My Song blah Someone', u'My Song')
dist2 = match.string_dist(u'My Song feat Someone', u'My Song')
self.assert_(dist2 < dist1)
def test_postfix_the(self):
dist = match.string_dist(u'The Song Title', u'Song Title, The')
self.assertEqual(dist, 0.0)
def test_postfix_a(self):
dist = match.string_dist(u'A Song Title', u'Song Title, A')
self.assertEqual(dist, 0.0)
def test_postfix_an(self):
dist = match.string_dist(u'An Album Title', u'Album Title, An')
self.assertEqual(dist, 0.0)
def test_empty_strings(self):
dist = match.string_dist(u'', u'')
self.assertEqual(dist, 0.0)
def test_solo_pattern(self):
# Just make sure these don't crash.
match.string_dist(u'The ', u'')
match.string_dist(u'(EP)', u'(EP)')
match.string_dist(u', An', u'')
def test_heuristic_does_not_harm_distance(self):
dist = match.string_dist(u'Untitled', u'[Untitled]')
self.assertEqual(dist, 0.0)
def test_ampersand_expansion(self):
dist = match.string_dist(u'And', u'&')
self.assertEqual(dist, 0.0)
def test_accented_characters(self):
dist = match.string_dist(u'\xe9\xe1\xf1', u'ean')
self.assertEqual(dist, 0.0)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
import json
from django.shortcuts import get_object_or_404
from django.core.exceptions import ObjectDoesNotExist
from django.utils.feedgenerator import Atom1Feed, rfc3339_date
from django.contrib.contenttypes.models import ContentType
from django.contrib.syndication.views import Feed, add_domain
from django.contrib.sites.models import Site
from django.utils.encoding import force_str
from django.utils import datetime_safe
from django.views.generic import View
from django.http import HttpResponse, Http404
from django.urls import reverse
from actstream.models import Action, model_stream, user_stream, any_stream
class AbstractActivityStream:
"""
Abstract base class for all stream rendering.
Supports hooks for fetching streams and formatting actions.
"""
def get_stream(self, *args, **kwargs):
"""
Returns a stream method to use.
"""
raise NotImplementedError
def get_object(self, *args, **kwargs):
"""
Returns the object (eg user or actor) that the stream is for.
"""
raise NotImplementedError
def items(self, *args, **kwargs):
"""
Returns a queryset of Actions to use based on the stream method and object.
"""
return self.get_stream()(self.get_object(*args, **kwargs))
def get_uri(self, action, obj=None, date=None):
"""
Returns an RFC3987 IRI ID for the given object, action and date.
"""
if date is None:
date = action.timestamp
date = datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:{},{}:{}'.format(Site.objects.get_current().domain, date,
self.get_url(action, obj, False))
def get_url(self, action, obj=None, domain=True):
"""
Returns an RFC3987 IRI for a HTML representation of the given object, action.
If domain is true, the current site's domain will be added.
"""
if not obj:
url = reverse('actstream_detail', None, (action.pk,))
elif hasattr(obj, 'get_absolute_url'):
url = obj.get_absolute_url()
else:
ctype = ContentType.objects.get_for_model(obj)
url = reverse('actstream_actor', None, (ctype.pk, obj.pk))
if domain:
return add_domain(Site.objects.get_current().domain, url)
return url
def format(self, action):
"""
Returns a formatted dictionary for the given action.
"""
item = {
'id': self.get_uri(action),
'url': self.get_url(action),
'verb': action.verb,
'published': rfc3339_date(action.timestamp),
'actor': self.format_actor(action),
'title': str(action),
}
if action.description:
item['content'] = action.description
if action.target:
item['target'] = self.format_target(action)
if action.action_object:
item['object'] = self.format_action_object(action)
return item
def format_item(self, action, item_type='actor'):
"""
Returns a formatted dictionary for an individual item based on the action and item_type.
"""
obj = getattr(action, item_type)
return {
'id': self.get_uri(action, obj),
'url': self.get_url(action, obj),
'objectType': ContentType.objects.get_for_model(obj).name,
'displayName': str(obj)
}
def format_actor(self, action):
"""
Returns a formatted dictionary for the actor of the action.
"""
return self.format_item(action)
def format_target(self, action):
"""
Returns a formatted dictionary for the target of the action.
"""
return self.format_item(action, 'target')
def format_action_object(self, action):
"""
Returns a formatted dictionary for the action object of the action.
"""
return self.format_item(action, 'action_object')
class ActivityStreamsAtomFeed(Atom1Feed):
"""
Feed rendering class for the v1.0 Atom Activity Stream Spec
"""
def root_attributes(self):
attrs = super(ActivityStreamsAtomFeed, self).root_attributes()
attrs['xmlns:activity'] = 'http://activitystrea.ms/spec/1.0/'
return attrs
def add_root_elements(self, handler):
super(ActivityStreamsAtomFeed, self).add_root_elements(handler)
def quick_elem(self, handler, key, value):
if key == 'link':
handler.addQuickElement(key, None, {
'href': value, 'type': 'text/html', 'rel': 'alternate'})
else:
handler.addQuickElement(key, value)
def item_quick_handler(self, handler, name, item):
handler.startElement(name, {})
for key, value in item.items():
self.quick_elem(handler, key, value)
handler.endElement(name)
def add_item_elements(self, handler, item):
item.pop('unique_id')
actor = item.pop('actor')
target = item.pop('target', None)
action_object = item.pop('action_object', None)
content = item.pop('content', None)
if content:
handler.addQuickElement('content', content, {'type': 'html'})
for key, value in item.items():
if value:
self.quick_elem(handler, key, value)
self.item_quick_handler(handler, 'author', actor)
if action_object:
self.item_quick_handler(handler, 'activity:object', action_object)
if target:
self.item_quick_handler(handler, 'activity:target', target)
class ActivityStreamsBaseFeed(AbstractActivityStream, Feed):
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, action):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
Add the 'content' field of the 'Entry' item, to be used by the custom
feed generator.
"""
item = self.format(action)
item.pop('title', None)
item['uri'] = item.pop('url')
item['activity:verb'] = item.pop('verb')
return item
def format_item(self, action, item_type='actor'):
name = item_type == 'actor' and 'name' or 'title'
item = super(ActivityStreamsBaseFeed, self).format_item(action, item_type)
item[name] = item.pop('displayName')
item['activity:object-type'] = item.pop('objectType')
item.pop('url')
return item
def item_link(self, action):
return self.get_url(action)
def item_description(self, action):
if action.description:
return force_str(action.description)
def items(self, obj):
return self.get_stream()(obj)[:30]
class JSONActivityFeed(AbstractActivityStream, View):
"""
Feed that generates feeds compatible with the v1.0 JSON Activity Stream spec
"""
def dispatch(self, request, *args, **kwargs):
return HttpResponse(self.serialize(request, *args, **kwargs),
content_type='application/json')
def serialize(self, request, *args, **kwargs):
items = self.items(request, *args, **kwargs)
return json.dumps({
'totalItems': len(items),
'items': [self.format(action) for action in items]
}, indent=4 if 'pretty' in request.GET or 'pretty' in request.POST else None)
class ModelActivityMixin:
def get_object(self, request, content_type_id):
return get_object_or_404(ContentType, pk=content_type_id).model_class()
def get_stream(self):
return model_stream
class ObjectActivityMixin:
def get_object(self, request, content_type_id, object_id):
ct = get_object_or_404(ContentType, pk=content_type_id)
try:
obj = ct.get_object_for_this_type(pk=object_id)
except ObjectDoesNotExist:
raise Http404('No %s matches the given query.' % ct.model_class()._meta.object_name)
return obj
def get_stream(self):
return any_stream
class StreamKwargsMixin:
def items(self, request, *args, **kwargs):
return self.get_stream()(
self.get_object(request, *args, **kwargs),
**self.get_stream_kwargs(request)
)
class UserActivityMixin:
def get_object(self, request):
if request.user.is_authenticated:
return request.user
def get_stream(self):
return user_stream
def get_stream_kwargs(self, request):
stream_kwargs = {}
if 'with_user_activity' in request.GET:
stream_kwargs['with_user_activity'] = request.GET['with_user_activity'].lower() == 'true'
return stream_kwargs
class CustomStreamMixin:
name = None
def get_object(self):
return
def get_stream(self):
return getattr(Action.objects, self.name)
def items(self, *args, **kwargs):
return self.get_stream()(*args[1:], **kwargs)
class ModelActivityFeed(ModelActivityMixin, ActivityStreamsBaseFeed):
def title(self, model):
return 'Activity feed from %s' % model.__name__
def link(self, model):
ctype = ContentType.objects.get_for_model(model)
return reverse('actstream_model', None, (ctype.pk,))
def description(self, model):
return 'Public activities of %s' % model.__name__
class ObjectActivityFeed(ObjectActivityMixin, ActivityStreamsBaseFeed):
def title(self, obj):
return 'Activity for %s' % obj
def link(self, obj):
return self.get_url(obj)
def description(self, obj):
return 'Activity for %s' % obj
class UserActivityFeed(UserActivityMixin, ActivityStreamsBaseFeed):
def title(self, user):
return 'Activity feed for your followed actors'
def link(self, user):
if not user:
return reverse('actstream')
if hasattr(user, 'get_absolute_url'):
return user.get_absolute_url()
ctype = ContentType.objects.get_for_model(user)
return reverse('actstream_actor', None, (ctype.pk, user.pk))
def description(self, user):
return 'Public activities of actors you follow'
class AtomUserActivityFeed(UserActivityFeed):
"""
Atom feed of Activity for a given user (where actions are those that the given user follows).
"""
feed_type = ActivityStreamsAtomFeed
subtitle = UserActivityFeed.description
class AtomModelActivityFeed(ModelActivityFeed):
"""
Atom feed of Activity for a given model (where actions involve the given model as any of the entities).
"""
feed_type = ActivityStreamsAtomFeed
subtitle = ModelActivityFeed.description
class AtomObjectActivityFeed(ObjectActivityFeed):
"""
Atom feed of Activity for a given object (where actions involve the given object as any of the entities).
"""
feed_type = ActivityStreamsAtomFeed
subtitle = ObjectActivityFeed.description
class UserJSONActivityFeed(UserActivityMixin, StreamKwargsMixin, JSONActivityFeed):
"""
JSON feed of Activity for a given user (where actions are those that the given user follows).
"""
pass
class ModelJSONActivityFeed(ModelActivityMixin, JSONActivityFeed):
"""
JSON feed of Activity for a given model (where actions involve the given model as any of the entities).
"""
pass
class ObjectJSONActivityFeed(ObjectActivityMixin, JSONActivityFeed):
"""
JSON feed of Activity for a given object (where actions involve the given object as any of the entities).
"""
pass
class CustomJSONActivityFeed(CustomStreamMixin, JSONActivityFeed):
"""
JSON feed of Activity for a custom stream. self.name should be the name of the custom stream as defined in the Manager
and arguments may be passed either in the url or when calling as_view(...)
"""
pass
|
|
'''tzinfo timezone information for Europe/Zurich.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Zurich(DstTzInfo):
'''Europe/Zurich timezone definition. See datetime.tzinfo for details'''
zone = 'Europe/Zurich'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1940,11,1,23,0,0),
d(1940,12,30,22,0,0),
d(1941,5,4,1,0,0),
d(1941,10,4,22,0,0),
d(1942,5,3,1,0,0),
d(1942,10,3,22,0,0),
d(1981,3,29,1,0,0),
d(1981,9,27,1,0,0),
d(1982,3,28,1,0,0),
d(1982,9,26,1,0,0),
d(1983,3,27,1,0,0),
d(1983,9,25,1,0,0),
d(1984,3,25,1,0,0),
d(1984,9,30,1,0,0),
d(1985,3,31,1,0,0),
d(1985,9,29,1,0,0),
d(1986,3,30,1,0,0),
d(1986,9,28,1,0,0),
d(1987,3,29,1,0,0),
d(1987,9,27,1,0,0),
d(1988,3,27,1,0,0),
d(1988,9,25,1,0,0),
d(1989,3,26,1,0,0),
d(1989,9,24,1,0,0),
d(1990,3,25,1,0,0),
d(1990,9,30,1,0,0),
d(1991,3,31,1,0,0),
d(1991,9,29,1,0,0),
d(1992,3,29,1,0,0),
d(1992,9,27,1,0,0),
d(1993,3,28,1,0,0),
d(1993,9,26,1,0,0),
d(1994,3,27,1,0,0),
d(1994,9,25,1,0,0),
d(1995,3,26,1,0,0),
d(1995,9,24,1,0,0),
d(1996,3,31,1,0,0),
d(1996,10,27,1,0,0),
d(1997,3,30,1,0,0),
d(1997,10,26,1,0,0),
d(1998,3,29,1,0,0),
d(1998,10,25,1,0,0),
d(1999,3,28,1,0,0),
d(1999,10,31,1,0,0),
d(2000,3,26,1,0,0),
d(2000,10,29,1,0,0),
d(2001,3,25,1,0,0),
d(2001,10,28,1,0,0),
d(2002,3,31,1,0,0),
d(2002,10,27,1,0,0),
d(2003,3,30,1,0,0),
d(2003,10,26,1,0,0),
d(2004,3,28,1,0,0),
d(2004,10,31,1,0,0),
d(2005,3,27,1,0,0),
d(2005,10,30,1,0,0),
d(2006,3,26,1,0,0),
d(2006,10,29,1,0,0),
d(2007,3,25,1,0,0),
d(2007,10,28,1,0,0),
d(2008,3,30,1,0,0),
d(2008,10,26,1,0,0),
d(2009,3,29,1,0,0),
d(2009,10,25,1,0,0),
d(2010,3,28,1,0,0),
d(2010,10,31,1,0,0),
d(2011,3,27,1,0,0),
d(2011,10,30,1,0,0),
d(2012,3,25,1,0,0),
d(2012,10,28,1,0,0),
d(2013,3,31,1,0,0),
d(2013,10,27,1,0,0),
d(2014,3,30,1,0,0),
d(2014,10,26,1,0,0),
d(2015,3,29,1,0,0),
d(2015,10,25,1,0,0),
d(2016,3,27,1,0,0),
d(2016,10,30,1,0,0),
d(2017,3,26,1,0,0),
d(2017,10,29,1,0,0),
d(2018,3,25,1,0,0),
d(2018,10,28,1,0,0),
d(2019,3,31,1,0,0),
d(2019,10,27,1,0,0),
d(2020,3,29,1,0,0),
d(2020,10,25,1,0,0),
d(2021,3,28,1,0,0),
d(2021,10,31,1,0,0),
d(2022,3,27,1,0,0),
d(2022,10,30,1,0,0),
d(2023,3,26,1,0,0),
d(2023,10,29,1,0,0),
d(2024,3,31,1,0,0),
d(2024,10,27,1,0,0),
d(2025,3,30,1,0,0),
d(2025,10,26,1,0,0),
d(2026,3,29,1,0,0),
d(2026,10,25,1,0,0),
d(2027,3,28,1,0,0),
d(2027,10,31,1,0,0),
d(2028,3,26,1,0,0),
d(2028,10,29,1,0,0),
d(2029,3,25,1,0,0),
d(2029,10,28,1,0,0),
d(2030,3,31,1,0,0),
d(2030,10,27,1,0,0),
d(2031,3,30,1,0,0),
d(2031,10,26,1,0,0),
d(2032,3,28,1,0,0),
d(2032,10,31,1,0,0),
d(2033,3,27,1,0,0),
d(2033,10,30,1,0,0),
d(2034,3,26,1,0,0),
d(2034,10,29,1,0,0),
d(2035,3,25,1,0,0),
d(2035,10,28,1,0,0),
d(2036,3,30,1,0,0),
d(2036,10,26,1,0,0),
d(2037,3,29,1,0,0),
d(2037,10,25,1,0,0),
]
_transition_info = [
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
]
Zurich = Zurich()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RoleAssignmentsOperations:
"""RoleAssignmentsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.authorization.v2015_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_for_resource(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.RoleAssignmentListResult"]:
"""Gets role assignments for a resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource.
:type resource_type: str
:param resource_name: The name of the resource to get role assignments for.
:type resource_name: str
:param filter: The filter to apply on the operation. Use $filter=atScope() to return all role
assignments at or above the scope. Use $filter=principalId eq {id} to return all role
assignments at, above or below the scope for the specified principal.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RoleAssignmentListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.authorization.v2015_07_01.models.RoleAssignmentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleAssignmentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_for_resource.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RoleAssignmentListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_for_resource.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/roleAssignments'} # type: ignore
def list_for_resource_group(
self,
resource_group_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.RoleAssignmentListResult"]:
"""Gets role assignments for a resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param filter: The filter to apply on the operation. Use $filter=atScope() to return all role
assignments at or above the scope. Use $filter=principalId eq {id} to return all role
assignments at, above or below the scope for the specified principal.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RoleAssignmentListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.authorization.v2015_07_01.models.RoleAssignmentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleAssignmentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_for_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RoleAssignmentListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_for_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/roleAssignments'} # type: ignore
async def delete(
self,
scope: str,
role_assignment_name: str,
**kwargs: Any
) -> Optional["_models.RoleAssignment"]:
"""Deletes a role assignment.
:param scope: The scope of the role assignment to delete.
:type scope: str
:param role_assignment_name: The name of the role assignment to delete.
:type role_assignment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2015_07_01.models.RoleAssignment or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.RoleAssignment"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-07-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'roleAssignmentName': self._serialize.url("role_assignment_name", role_assignment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RoleAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}'} # type: ignore
async def create(
self,
scope: str,
role_assignment_name: str,
parameters: "_models.RoleAssignmentCreateParameters",
**kwargs: Any
) -> "_models.RoleAssignment":
"""Creates a role assignment.
:param scope: The scope of the role assignment to create. The scope can be any REST resource
instance. For example, use '/subscriptions/{subscription-id}/' for a subscription,
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for a resource group,
and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider}/{resource-type}/{resource-name}'
for a resource.
:type scope: str
:param role_assignment_name: The name of the role assignment to create. It can be any valid
GUID.
:type role_assignment_name: str
:param parameters: Parameters for the role assignment.
:type parameters: ~azure.mgmt.authorization.v2015_07_01.models.RoleAssignmentCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2015_07_01.models.RoleAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'roleAssignmentName': self._serialize.url("role_assignment_name", role_assignment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RoleAssignmentCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RoleAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}'} # type: ignore
async def get(
self,
scope: str,
role_assignment_name: str,
**kwargs: Any
) -> "_models.RoleAssignment":
"""Get the specified role assignment.
:param scope: The scope of the role assignment.
:type scope: str
:param role_assignment_name: The name of the role assignment to get.
:type role_assignment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2015_07_01.models.RoleAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'roleAssignmentName': self._serialize.url("role_assignment_name", role_assignment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RoleAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}'} # type: ignore
async def delete_by_id(
self,
role_assignment_id: str,
**kwargs: Any
) -> Optional["_models.RoleAssignment"]:
"""Deletes a role assignment.
:param role_assignment_id: The fully qualified ID of the role assignment, including the scope,
resource name and resource type. Use the format,
/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}. Example:
/subscriptions/{subId}/resourcegroups/{rgname}//providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}.
:type role_assignment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2015_07_01.models.RoleAssignment or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.RoleAssignment"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-07-01"
accept = "application/json"
# Construct URL
url = self.delete_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'roleAssignmentId': self._serialize.url("role_assignment_id", role_assignment_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RoleAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_by_id.metadata = {'url': '/{roleAssignmentId}'} # type: ignore
async def create_by_id(
self,
role_assignment_id: str,
parameters: "_models.RoleAssignmentCreateParameters",
**kwargs: Any
) -> "_models.RoleAssignment":
"""Creates a role assignment by ID.
:param role_assignment_id: The fully qualified ID of the role assignment, including the scope,
resource name and resource type. Use the format,
/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}. Example:
/subscriptions/{subId}/resourcegroups/{rgname}//providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}.
:type role_assignment_id: str
:param parameters: Parameters for the role assignment.
:type parameters: ~azure.mgmt.authorization.v2015_07_01.models.RoleAssignmentCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2015_07_01.models.RoleAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'roleAssignmentId': self._serialize.url("role_assignment_id", role_assignment_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RoleAssignmentCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RoleAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_by_id.metadata = {'url': '/{roleAssignmentId}'} # type: ignore
async def get_by_id(
self,
role_assignment_id: str,
**kwargs: Any
) -> "_models.RoleAssignment":
"""Gets a role assignment by ID.
:param role_assignment_id: The fully qualified ID of the role assignment, including the scope,
resource name and resource type. Use the format,
/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}. Example:
/subscriptions/{subId}/resourcegroups/{rgname}//providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}.
:type role_assignment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2015_07_01.models.RoleAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-07-01"
accept = "application/json"
# Construct URL
url = self.get_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'roleAssignmentId': self._serialize.url("role_assignment_id", role_assignment_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RoleAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': '/{roleAssignmentId}'} # type: ignore
def list(
self,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.RoleAssignmentListResult"]:
"""Gets all role assignments for the subscription.
:param filter: The filter to apply on the operation. Use $filter=atScope() to return all role
assignments at or above the scope. Use $filter=principalId eq {id} to return all role
assignments at, above or below the scope for the specified principal.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RoleAssignmentListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.authorization.v2015_07_01.models.RoleAssignmentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleAssignmentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RoleAssignmentListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/roleAssignments'} # type: ignore
def list_for_scope(
self,
scope: str,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.RoleAssignmentListResult"]:
"""Gets role assignments for a scope.
:param scope: The scope of the role assignments.
:type scope: str
:param filter: The filter to apply on the operation. Use $filter=atScope() to return all role
assignments at or above the scope. Use $filter=principalId eq {id} to return all role
assignments at, above or below the scope for the specified principal.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RoleAssignmentListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.authorization.v2015_07_01.models.RoleAssignmentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleAssignmentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_for_scope.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RoleAssignmentListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_for_scope.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/roleAssignments'} # type: ignore
|
|
from __future__ import print_function, division
import copy
from collections import defaultdict
from sympy.core.containers import Dict
from sympy.core.compatibility import is_sequence, as_int, range
from sympy.core.logic import fuzzy_and
from sympy.core.singleton import S
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.utilities.iterables import uniq
from .matrices import MatrixBase, ShapeError, a2idx
from .dense import Matrix
import collections
class SparseMatrix(MatrixBase):
"""
A sparse matrix (a matrix with a large number of zero elements).
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> SparseMatrix(2, 2, range(4))
Matrix([
[0, 1],
[2, 3]])
>>> SparseMatrix(2, 2, {(1, 1): 2})
Matrix([
[0, 0],
[0, 2]])
See Also
========
sympy.matrices.dense.Matrix
"""
def __init__(self, *args):
if len(args) == 1 and isinstance(args[0], SparseMatrix):
self.rows = args[0].rows
self.cols = args[0].cols
self._smat = dict(args[0]._smat)
return
self._smat = {}
if len(args) == 3:
self.rows = as_int(args[0])
self.cols = as_int(args[1])
if isinstance(args[2], collections.Callable):
op = args[2]
for i in range(self.rows):
for j in range(self.cols):
value = self._sympify(
op(self._sympify(i), self._sympify(j)))
if value:
self._smat[(i, j)] = value
elif isinstance(args[2], (dict, Dict)):
# manual copy, copy.deepcopy() doesn't work
for key in args[2].keys():
v = args[2][key]
if v:
self._smat[key] = self._sympify(v)
elif is_sequence(args[2]):
if len(args[2]) != self.rows*self.cols:
raise ValueError(
'List length (%s) != rows*columns (%s)' %
(len(args[2]), self.rows*self.cols))
flat_list = args[2]
for i in range(self.rows):
for j in range(self.cols):
value = self._sympify(flat_list[i*self.cols + j])
if value:
self._smat[(i, j)] = value
else:
# handle full matrix forms with _handle_creation_inputs
r, c, _list = Matrix._handle_creation_inputs(*args)
self.rows = r
self.cols = c
for i in range(self.rows):
for j in range(self.cols):
value = _list[self.cols*i + j]
if value:
self._smat[(i, j)] = value
def __getitem__(self, key):
if isinstance(key, tuple):
i, j = key
try:
i, j = self.key2ij(key)
return self._smat.get((i, j), S.Zero)
except (TypeError, IndexError):
if isinstance(i, slice):
# XXX remove list() when PY2 support is dropped
i = list(range(self.rows))[i]
elif is_sequence(i):
pass
else:
if i >= self.rows:
raise IndexError('Row index out of bounds')
i = [i]
if isinstance(j, slice):
# XXX remove list() when PY2 support is dropped
j = list(range(self.cols))[j]
elif is_sequence(j):
pass
else:
if j >= self.cols:
raise IndexError('Col index out of bounds')
j = [j]
return self.extract(i, j)
# check for single arg, like M[:] or M[3]
if isinstance(key, slice):
lo, hi = key.indices(len(self))[:2]
L = []
for i in range(lo, hi):
m, n = divmod(i, self.cols)
L.append(self._smat.get((m, n), S.Zero))
return L
i, j = divmod(a2idx(key, len(self)), self.cols)
return self._smat.get((i, j), S.Zero)
def __setitem__(self, key, value):
raise NotImplementedError()
def copy(self):
return self._new(self.rows, self.cols, self._smat)
@property
def is_Identity(self):
if not self.is_square:
return False
if not all(self[i, i] == 1 for i in range(self.rows)):
return False
return len(self._smat) == self.rows
def tolist(self):
"""Convert this sparse matrix into a list of nested Python lists.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.tolist()
[[1, 2], [3, 4]]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> SparseMatrix(ones(0, 3)).tolist()
[]
"""
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
I, J = self.shape
return [[self[i, j] for j in range(J)] for i in range(I)]
def row(self, i):
"""Returns column i from self as a row vector.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.row(0)
Matrix([[1, 2]])
See Also
========
col
row_list
"""
return self[i,:]
def col(self, j):
"""Returns column j from self as a column vector.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.col(0)
Matrix([
[1],
[3]])
See Also
========
row
col_list
"""
return self[:, j]
def row_list(self):
"""Returns a row-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.RL
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
See Also
========
row_op
col_list
"""
return [tuple(k + (self[k],)) for k in
sorted(list(self._smat.keys()), key=lambda k: list(k))]
RL = property(row_list, None, None, "Alternate faster representation")
def col_list(self):
"""Returns a column-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a=SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.CL
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
See Also
========
col_op
row_list
"""
return [tuple(k + (self[k],)) for k in sorted(list(self._smat.keys()), key=lambda k: list(reversed(k)))]
CL = property(col_list, None, None, "Alternate faster representation")
def _eval_trace(self):
"""Calculate the trace of a square matrix.
Examples
========
>>> from sympy.matrices import eye
>>> eye(3).trace()
3
"""
trace = S.Zero
for i in range(self.cols):
trace += self._smat.get((i, i), 0)
return trace
def _eval_transpose(self):
"""Returns the transposed SparseMatrix of this SparseMatrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.T
Matrix([
[1, 3],
[2, 4]])
"""
tran = self.zeros(self.cols, self.rows)
for key, value in self._smat.items():
key = key[1], key[0] # reverse
tran._smat[key] = value
return tran
def _eval_conjugate(self):
"""Return the by-element conjugation.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> from sympy import I
>>> a = SparseMatrix(((1, 2 + I), (3, 4), (I, -I)))
>>> a
Matrix([
[1, 2 + I],
[3, 4],
[I, -I]])
>>> a.C
Matrix([
[ 1, 2 - I],
[ 3, 4],
[-I, I]])
See Also
========
transpose: Matrix transposition
H: Hermite conjugation
D: Dirac conjugation
"""
conj = self.copy()
for key, value in self._smat.items():
conj._smat[key] = value.conjugate()
return conj
def multiply(self, other):
"""Fast multiplication exploiting the sparsity of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> A, B = SparseMatrix(ones(4, 3)), SparseMatrix(ones(3, 4))
>>> A.multiply(B) == 3*ones(4)
True
See Also
========
add
"""
A = self
B = other
# sort B's row_list into list of rows
Blist = [[] for i in range(B.rows)]
for i, j, v in B.row_list():
Blist[i].append((j, v))
Cdict = defaultdict(int)
for k, j, Akj in A.row_list():
for n, Bjn in Blist[j]:
temp = Akj*Bjn
Cdict[k, n] += temp
rv = self.zeros(A.rows, B.cols)
rv._smat = dict([(k, v) for k, v in Cdict.items() if v])
return rv
def scalar_multiply(self, scalar):
"Scalar element-wise multiplication"
M = self.zeros(*self.shape)
if scalar:
for i in self._smat:
v = scalar*self._smat[i]
if v:
M._smat[i] = v
else:
M._smat.pop(i, None)
return M
def __mul__(self, other):
"""Multiply self and other, watching for non-matrix entities.
When multiplying be a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye, zeros
>>> I = SparseMatrix(eye(3))
>>> I*I == I
True
>>> Z = zeros(3)
>>> I*Z
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> I*2 == 2*I
True
"""
if isinstance(other, SparseMatrix):
return self.multiply(other)
if isinstance(other, MatrixBase):
return other._new(self*self._new(other))
return self.scalar_multiply(other)
__matmul__ = __mul__
def __rmul__(self, other):
"""Return product the same type as other (if a Matrix).
When multiplying be a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> from sympy.matrices import Matrix, SparseMatrix
>>> A = Matrix(2, 2, range(1, 5))
>>> S = SparseMatrix(2, 2, range(2, 6))
>>> A*S == S*A
False
>>> (isinstance(A*S, SparseMatrix) ==
... isinstance(S*A, SparseMatrix) == False)
True
"""
if isinstance(other, MatrixBase):
return other*other._new(self)
return self.scalar_multiply(other)
__rmatmul__ = __rmul__
def __add__(self, other):
"""Add other to self, efficiently if possible.
When adding a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye
>>> A = SparseMatrix(eye(3)) + SparseMatrix(eye(3))
>>> B = SparseMatrix(eye(3)) + eye(3)
>>> A
Matrix([
[2, 0, 0],
[0, 2, 0],
[0, 0, 2]])
>>> A == B
True
>>> isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix)
False
"""
if isinstance(other, SparseMatrix):
return self.add(other)
elif isinstance(other, MatrixBase):
return other._new(other + self)
else:
raise NotImplementedError(
"Cannot add %s to %s" %
tuple([c.__class__.__name__ for c in (other, self)]))
def __neg__(self):
"""Negate all elements of self.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye
>>> -SparseMatrix(eye(3))
Matrix([
[-1, 0, 0],
[ 0, -1, 0],
[ 0, 0, -1]])
"""
rv = self.copy()
for k, v in rv._smat.items():
rv._smat[k] = -v
return rv
def add(self, other):
"""Add two sparse matrices with dictionary representation.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye, ones
>>> SparseMatrix(eye(3)).add(SparseMatrix(ones(3)))
Matrix([
[2, 1, 1],
[1, 2, 1],
[1, 1, 2]])
>>> SparseMatrix(eye(3)).add(-SparseMatrix(eye(3)))
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
Only the non-zero elements are stored, so the resulting dictionary
that is used to represent the sparse matrix is empty:
>>> _._smat
{}
See Also
========
multiply
"""
if not isinstance(other, SparseMatrix):
raise ValueError('only use add with %s, not %s' %
tuple([c.__class__.__name__ for c in (self, other)]))
if self.shape != other.shape:
raise ShapeError()
M = self.copy()
for i, v in other._smat.items():
v = M[i] + v
if v:
M._smat[i] = v
else:
M._smat.pop(i, None)
return M
def extract(self, rowsList, colsList):
urow = list(uniq(rowsList))
ucol = list(uniq(colsList))
smat = {}
if len(urow)*len(ucol) < len(self._smat):
# there are fewer elements requested than there are elements in the matrix
for i, r in enumerate(urow):
for j, c in enumerate(ucol):
smat[i, j] = self._smat.get((r, c), 0)
else:
# most of the request will be zeros so check all of self's entries,
# keeping only the ones that are desired
for rk, ck in self._smat:
if rk in urow and ck in ucol:
smat[(urow.index(rk), ucol.index(ck))] = self._smat[(rk, ck)]
rv = self._new(len(urow), len(ucol), smat)
# rv is nominally correct but there might be rows/cols
# which require duplication
if len(rowsList) != len(urow):
for i, r in enumerate(rowsList):
i_previous = rowsList.index(r)
if i_previous != i:
rv = rv.row_insert(i, rv.row(i_previous))
if len(colsList) != len(ucol):
for i, c in enumerate(colsList):
i_previous = colsList.index(c)
if i_previous != i:
rv = rv.col_insert(i, rv.col(i_previous))
return rv
extract.__doc__ = MatrixBase.extract.__doc__
@property
def is_hermitian(self):
"""Checks if the matrix is Hermitian.
In a Hermitian matrix element i,j is the complex conjugate of
element j,i.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> from sympy import I
>>> from sympy.abc import x
>>> a = SparseMatrix([[1, I], [-I, 1]])
>>> a
Matrix([
[ 1, I],
[-I, 1]])
>>> a.is_hermitian
True
>>> a[0, 0] = 2*I
>>> a.is_hermitian
False
>>> a[0, 0] = x
>>> a.is_hermitian
>>> a[0, 1] = a[1, 0]*I
>>> a.is_hermitian
False
"""
def cond():
d = self._smat
yield self.is_square
if len(d) <= self.rows:
yield fuzzy_and(
d[i, i].is_real for i, j in d if i == j)
else:
yield fuzzy_and(
d[i, i].is_real for i in range(self.rows) if (i, i) in d)
yield fuzzy_and(
((self[i, j] - self[j, i].conjugate()).is_zero
if (j, i) in d else False) for (i, j) in d)
return fuzzy_and(i for i in cond())
def is_symmetric(self, simplify=True):
"""Return True if self is symmetric.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye
>>> M = SparseMatrix(eye(3))
>>> M.is_symmetric()
True
>>> M[0, 2] = 1
>>> M.is_symmetric()
False
"""
if simplify:
return all((k[1], k[0]) in self._smat and
not (self[k] - self[(k[1], k[0])]).simplify()
for k in self._smat)
else:
return all((k[1], k[0]) in self._smat and
self[k] == self[(k[1], k[0])] for k in self._smat)
def has(self, *patterns):
"""Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import SparseMatrix, Float
>>> from sympy.abc import x, y
>>> A = SparseMatrix(((1, x), (0.2, 3)))
>>> A.has(x)
True
>>> A.has(y)
False
>>> A.has(Float)
True
"""
return any(self[key].has(*patterns) for key in self._smat)
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> m = SparseMatrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
out = self.copy()
for k, v in self._smat.items():
fv = f(v)
if fv:
out._smat[k] = fv
else:
out._smat.pop(k, None)
return out
def reshape(self, rows, cols):
"""Reshape matrix while retaining original size.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix(4, 2, range(8))
>>> S.reshape(2, 4)
Matrix([
[0, 1, 2, 3],
[4, 5, 6, 7]])
"""
if len(self) != rows*cols:
raise ValueError("Invalid reshape parameters %d %d" % (rows, cols))
smat = {}
for k, v in self._smat.items():
i, j = k
n = i*self.cols + j
ii, jj = divmod(n, cols)
smat[(ii, jj)] = self._smat[(i, j)]
return self._new(rows, cols, smat)
def liupc(self):
"""Liu's algorithm, for pre-determination of the Elimination Tree of
the given matrix, used in row-based symbolic Cholesky factorization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.liupc()
([[0], [], [0], [1, 2]], [4, 3, 4, 4])
References
==========
Symbolic Sparse Cholesky Factorization using Elimination Trees,
Jeroen Van Grondelle (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582
"""
# Algorithm 2.4, p 17 of reference
# get the indices of the elements that are non-zero on or below diag
R = [[] for r in range(self.rows)]
for r, c, _ in self.row_list():
if c <= r:
R[r].append(c)
inf = len(R) # nothing will be this large
parent = [inf]*self.rows
virtual = [inf]*self.rows
for r in range(self.rows):
for c in R[r][:-1]:
while virtual[c] < r:
t = virtual[c]
virtual[c] = r
c = t
if virtual[c] == inf:
parent[c] = virtual[c] = r
return R, parent
def row_structure_symbolic_cholesky(self):
"""Symbolic cholesky factorization, for pre-determination of the
non-zero structure of the Cholesky factororization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.row_structure_symbolic_cholesky()
[[0], [], [0], [1, 2]]
References
==========
Symbolic Sparse Cholesky Factorization using Elimination Trees,
Jeroen Van Grondelle (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582
"""
R, parent = self.liupc()
inf = len(R) # this acts as infinity
Lrow = copy.deepcopy(R)
for k in range(self.rows):
for j in R[k]:
while j != inf and j != k:
Lrow[k].append(j)
j = parent[j]
Lrow[k] = list(sorted(set(Lrow[k])))
return Lrow
def _cholesky_sparse(self):
"""Algorithm for numeric Cholesky factorization of a sparse matrix."""
Crowstruc = self.row_structure_symbolic_cholesky()
C = self.zeros(self.rows)
for i in range(len(Crowstruc)):
for j in Crowstruc[i]:
if i != j:
C[i, j] = self[i, j]
summ = 0
for p1 in Crowstruc[i]:
if p1 < j:
for p2 in Crowstruc[j]:
if p2 < j:
if p1 == p2:
summ += C[i, p1]*C[j, p1]
else:
break
else:
break
C[i, j] -= summ
C[i, j] /= C[j, j]
else:
C[j, j] = self[j, j]
summ = 0
for k in Crowstruc[j]:
if k < j:
summ += C[j, k]**2
else:
break
C[j, j] -= summ
C[j, j] = sqrt(C[j, j])
return C
def _LDL_sparse(self):
"""Algorithm for numeric LDL factization, exploiting sparse structure.
"""
Lrowstruc = self.row_structure_symbolic_cholesky()
L = self.eye(self.rows)
D = self.zeros(self.rows, self.cols)
for i in range(len(Lrowstruc)):
for j in Lrowstruc[i]:
if i != j:
L[i, j] = self[i, j]
summ = 0
for p1 in Lrowstruc[i]:
if p1 < j:
for p2 in Lrowstruc[j]:
if p2 < j:
if p1 == p2:
summ += L[i, p1]*L[j, p1]*D[p1, p1]
else:
break
else:
break
L[i, j] -= summ
L[i, j] /= D[j, j]
elif i == j:
D[i, i] = self[i, i]
summ = 0
for k in Lrowstruc[i]:
if k < i:
summ += L[i, k]**2*D[k, k]
else:
break
D[i, i] -= summ
return L, D
def _lower_triangular_solve(self, rhs):
"""Fast algorithm for solving a lower-triangular system,
exploiting the sparsity of the given matrix.
"""
rows = [[] for i in range(self.rows)]
for i, j, v in self.row_list():
if i > j:
rows[i].append((j, v))
X = rhs.copy()
for i in range(self.rows):
for j, v in rows[i]:
X[i, 0] -= v*X[j, 0]
X[i, 0] /= self[i, i]
return self._new(X)
def _upper_triangular_solve(self, rhs):
"""Fast algorithm for solving an upper-triangular system,
exploiting the sparsity of the given matrix.
"""
rows = [[] for i in range(self.rows)]
for i, j, v in self.row_list():
if i < j:
rows[i].append((j, v))
X = rhs.copy()
for i in range(self.rows - 1, -1, -1):
rows[i].reverse()
for j, v in rows[i]:
X[i, 0] -= v*X[j, 0]
X[i, 0] /= self[i, i]
return self._new(X)
def _diagonal_solve(self, rhs):
"Diagonal solve."
return self._new(self.rows, 1, lambda i, j: rhs[i, 0] / self[i, i])
def _cholesky_solve(self, rhs):
# for speed reasons, this is not uncommented, but if you are
# having difficulties, try uncommenting to make sure that the
# input matrix is symmetric
#assert self.is_symmetric()
L = self._cholesky_sparse()
Y = L._lower_triangular_solve(rhs)
rv = L.T._upper_triangular_solve(Y)
return rv
def _LDL_solve(self, rhs):
# for speed reasons, this is not uncommented, but if you are
# having difficulties, try uncommenting to make sure that the
# input matrix is symmetric
#assert self.is_symmetric()
L, D = self._LDL_sparse()
Z = L._lower_triangular_solve(rhs)
Y = D._diagonal_solve(Z)
return L.T._upper_triangular_solve(Y)
def cholesky(self):
"""
Returns the Cholesky decomposition L of a matrix A
such that L * L.T = A
A must be a square, symmetric, positive-definite
and non-singular matrix
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25,15,-5),(15,18,0),(-5,0,11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T == A
True
"""
from sympy.core.numbers import nan, oo
if not self.is_symmetric():
raise ValueError('Cholesky decomposition applies only to '
'symmetric matrices.')
M = self.as_mutable()._cholesky_sparse()
if M.has(nan) or M.has(oo):
raise ValueError('Cholesky decomposition applies only to '
'positive-definite matrices')
return self._new(M)
def LDLdecomposition(self):
"""
Returns the LDL Decomposition (matrices ``L`` and ``D``) of matrix
``A``, such that ``L * D * L.T == A``. ``A`` must be a square,
symmetric, positive-definite and non-singular.
This method eliminates the use of square root and ensures that all
the diagonal entries of L are 1.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T == A
True
"""
from sympy.core.numbers import nan, oo
if not self.is_symmetric():
raise ValueError('LDL decomposition applies only to '
'symmetric matrices.')
L, D = self.as_mutable()._LDL_sparse()
if L.has(nan) or L.has(oo) or D.has(nan) or D.has(oo):
raise ValueError('LDL decomposition applies only to '
'positive-definite matrices')
return self._new(L), self._new(D)
def solve_least_squares(self, rhs, method='LDL'):
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used. To find out which are
available, see the docstring of the .inv() method.
Examples
========
>>> from sympy.matrices import SparseMatrix, Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = SparseMatrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
t = self.T
return (t*self).inv(method=method)*t*rhs
def solve(self, rhs, method='LDL'):
"""Return solution to self*soln = rhs using given inversion method.
For a list of possible inversion methods, see the .inv() docstring.
"""
if not self.is_square:
if self.rows < self.cols:
raise ValueError('Under-determined system.')
elif self.rows > self.cols:
raise ValueError('For over-determined system, M, having '
'more rows than columns, try M.solve_least_squares(rhs).')
else:
return self.inv(method=method)*rhs
def _eval_inverse(self, **kwargs):
"""Return the matrix inverse using Cholesky or LDL (default)
decomposition as selected with the ``method`` keyword: 'CH' or 'LDL',
respectively.
Examples
========
>>> from sympy import SparseMatrix, Matrix
>>> A = SparseMatrix([
... [ 2, -1, 0],
... [-1, 2, -1],
... [ 0, 0, 2]])
>>> A.inv('CH')
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A.inv(method='LDL') # use of 'method=' is optional
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A * _
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
sym = self.is_symmetric()
M = self.as_mutable()
I = M.eye(M.rows)
if not sym:
t = M.T
r1 = M[0, :]
M = t*M
I = t*I
method = kwargs.get('method', 'LDL')
if method in "LDL":
solve = M._LDL_solve
elif method == "CH":
solve = M._cholesky_solve
else:
raise NotImplementedError(
'Method may be "CH" or "LDL", not %s.' % method)
rv = M.hstack(*[solve(I[:, i]) for i in range(I.cols)])
if not sym:
scale = (r1*rv[:, 0])[0, 0]
rv /= scale
return self._new(rv)
def __eq__(self, other):
try:
if self.shape != other.shape:
return False
if isinstance(other, SparseMatrix):
return self._smat == other._smat
elif isinstance(other, MatrixBase):
return self._smat == MutableSparseMatrix(other)._smat
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def as_mutable(self):
"""Returns a mutable version of this matrix.
Examples
========
>>> from sympy import ImmutableMatrix
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return MutableSparseMatrix(self)
def as_immutable(self):
"""Returns an Immutable version of this Matrix."""
from .immutable import ImmutableSparseMatrix
return ImmutableSparseMatrix(self)
def nnz(self):
"""Returns the number of non-zero elements in Matrix."""
return len(self._smat)
@classmethod
def zeros(cls, r, c=None):
"""Return an r x c matrix of zeros, square if c is omitted."""
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return cls(r, c, {})
@classmethod
def eye(cls, n):
"""Return an n x n identity matrix."""
n = as_int(n)
return cls(n, n, dict([((i, i), S.One) for i in range(n)]))
class MutableSparseMatrix(SparseMatrix, MatrixBase):
@classmethod
def _new(cls, *args, **kwargs):
return cls(*args)
def as_mutable(self):
return self.copy()
def __setitem__(self, key, value):
"""Assign value to position designated by key.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> M = SparseMatrix(2, 2, {})
>>> M[1] = 1; M
Matrix([
[0, 1],
[0, 0]])
>>> M[1, 1] = 2; M
Matrix([
[0, 1],
[0, 2]])
>>> M = SparseMatrix(2, 2, {})
>>> M[:, 1] = [1, 1]; M
Matrix([
[0, 1],
[0, 1]])
>>> M = SparseMatrix(2, 2, {})
>>> M[1, :] = [[1, 1]]; M
Matrix([
[0, 0],
[1, 1]])
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = SparseMatrix(4, 4, {})
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
rv = self._setitem(key, value)
if rv is not None:
i, j, value = rv
if value:
self._smat[(i, j)] = value
elif (i, j) in self._smat:
del self._smat[(i, j)]
__hash__ = None
def row_del(self, k):
"""Delete the given row of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix([[0, 0], [0, 1]])
>>> M
Matrix([
[0, 0],
[0, 1]])
>>> M.row_del(0)
>>> M
Matrix([[0, 1]])
See Also
========
col_del
"""
newD = {}
k = a2idx(k, self.rows)
for (i, j) in self._smat:
if i == k:
pass
elif i > k:
newD[i - 1, j] = self._smat[i, j]
else:
newD[i, j] = self._smat[i, j]
self._smat = newD
self.rows -= 1
def col_del(self, k):
"""Delete the given column of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix([[0, 0], [0, 1]])
>>> M
Matrix([
[0, 0],
[0, 1]])
>>> M.col_del(0)
>>> M
Matrix([
[0],
[1]])
See Also
========
row_del
"""
newD = {}
k = a2idx(k, self.cols)
for (i, j) in self._smat:
if j == k:
pass
elif j > k:
newD[i, j - 1] = self._smat[i, j]
else:
newD[i, j] = self._smat[i, j]
self._smat = newD
self.cols -= 1
def row_swap(self, i, j):
"""Swap, in place, columns i and j.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix.eye(3); S[2, 1] = 2
>>> S.row_swap(1, 0); S
Matrix([
[0, 1, 0],
[1, 0, 0],
[0, 2, 1]])
"""
if i > j:
i, j = j, i
rows = self.row_list()
temp = []
for ii, jj, v in rows:
if ii == i:
self._smat.pop((ii, jj))
temp.append((jj, v))
elif ii == j:
self._smat.pop((ii, jj))
self._smat[i, jj] = v
elif ii > j:
break
for k, v in temp:
self._smat[j, k] = v
def col_swap(self, i, j):
"""Swap, in place, columns i and j.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix.eye(3); S[2, 1] = 2
>>> S.col_swap(1, 0); S
Matrix([
[0, 1, 0],
[1, 0, 0],
[2, 0, 1]])
"""
if i > j:
i, j = j, i
rows = self.col_list()
temp = []
for ii, jj, v in rows:
if jj == i:
self._smat.pop((ii, jj))
temp.append((ii, v))
elif jj == j:
self._smat.pop((ii, jj))
self._smat[ii, i] = v
elif jj > j:
break
for k, v in temp:
self._smat[k, j] = v
def row_join(self, other):
"""Returns B appended after A (column-wise augmenting)::
[A B]
Examples
========
>>> from sympy import SparseMatrix, Matrix
>>> A = SparseMatrix(((1, 0, 1), (0, 1, 0), (1, 1, 0)))
>>> A
Matrix([
[1, 0, 1],
[0, 1, 0],
[1, 1, 0]])
>>> B = SparseMatrix(((1, 0, 0), (0, 1, 0), (0, 0, 1)))
>>> B
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C = A.row_join(B); C
Matrix([
[1, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 1]])
>>> C == A.row_join(Matrix(B))
True
Joining at row ends is the same as appending columns at the end
of the matrix:
>>> C == A.col_insert(A.cols, B)
True
"""
A, B = self, other
if not A.rows == B.rows:
raise ShapeError()
A = A.copy()
if not isinstance(B, SparseMatrix):
k = 0
b = B._mat
for i in range(B.rows):
for j in range(B.cols):
v = b[k]
if v:
A._smat[(i, j + A.cols)] = v
k += 1
else:
for (i, j), v in B._smat.items():
A._smat[(i, j + A.cols)] = v
A.cols += B.cols
return A
def col_join(self, other):
"""Returns B augmented beneath A (row-wise joining)::
[A]
[B]
Examples
========
>>> from sympy import SparseMatrix, Matrix, ones
>>> A = SparseMatrix(ones(3))
>>> A
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
>>> B = SparseMatrix.eye(3)
>>> B
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C = A.col_join(B); C
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C == A.col_join(Matrix(B))
True
Joining along columns is the same as appending rows at the end
of the matrix:
>>> C == A.row_insert(A.rows, Matrix(B))
True
"""
A, B = self, other
if not A.cols == B.cols:
raise ShapeError()
A = A.copy()
if not isinstance(B, SparseMatrix):
k = 0
b = B._mat
for i in range(B.rows):
for j in range(B.cols):
v = b[k]
if v:
A._smat[(i + A.rows, j)] = v
k += 1
else:
for (i, j), v in B._smat.items():
A._smat[i + A.rows, j] = v
A.rows += B.rows
return A
def copyin_list(self, key, value):
if not is_sequence(value):
raise TypeError("`value` must be of type list or tuple.")
self.copyin_matrix(key, Matrix(value))
def copyin_matrix(self, key, value):
# include this here because it's not part of BaseMatrix
rlo, rhi, clo, chi = self.key2bounds(key)
shape = value.shape
dr, dc = rhi - rlo, chi - clo
if shape != (dr, dc):
raise ShapeError(
"The Matrix `value` doesn't have the same dimensions "
"as the in sub-Matrix given by `key`.")
if not isinstance(value, SparseMatrix):
for i in range(value.rows):
for j in range(value.cols):
self[i + rlo, j + clo] = value[i, j]
else:
if (rhi - rlo)*(chi - clo) < len(self):
for i in range(rlo, rhi):
for j in range(clo, chi):
self._smat.pop((i, j), None)
else:
for i, j, v in self.row_list():
if rlo <= i < rhi and clo <= j < chi:
self._smat.pop((i, j), None)
for k, v in value._smat.items():
i, j = k
self[i + rlo, j + clo] = value[i, j]
def zip_row_op(self, i, k, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], self[k, j])``.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[0, 1] = -1
>>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M
Matrix([
[2, -1, 0],
[4, 0, 0],
[0, 0, 2]])
See Also
========
row
row_op
col_op
"""
self.row_op(i, lambda v, j: f(v, self[k, j]))
def row_op(self, i, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], j)``.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[0, 1] = -1
>>> M.row_op(1, lambda v, j: v + 2*M[0, j]); M
Matrix([
[2, -1, 0],
[4, 0, 0],
[0, 0, 2]])
See Also
========
row
zip_row_op
col_op
"""
for j in range(self.cols):
v = self._smat.get((i, j), S.Zero)
fv = f(v, j)
if fv:
self._smat[(i, j)] = fv
elif v:
self._smat.pop((i, j))
def col_op(self, j, f):
"""In-place operation on col j using two-arg functor whose args are
interpreted as (self[i, j], i) for i in range(self.rows).
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[1, 0] = -1
>>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M
Matrix([
[ 2, 4, 0],
[-1, 0, 0],
[ 0, 0, 2]])
"""
for i in range(self.rows):
v = self._smat.get((i, j), S.Zero)
fv = f(v, i)
if fv:
self._smat[(i, j)] = fv
elif v:
self._smat.pop((i, j))
def fill(self, value):
"""Fill self with the given value.
Notes
=====
Unless many values are going to be deleted (i.e. set to zero)
this will create a matrix that is slower than a dense matrix in
operations.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.zeros(3); M
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> M.fill(1); M
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
"""
if not value:
self._smat = {}
else:
v = self._sympify(value)
self._smat = dict([((i, j), v)
for i in range(self.rows) for j in range(self.cols)])
|
|
"""Unit tests for memory-based file-like objects.
StringIO -- for unicode strings
BytesIO -- for bytes
"""
import unittest
from test import support
import io
import _pyio as pyio
import pickle
class MemorySeekTestMixin:
def testInit(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
def testRead(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(buf[:1], bytesIo.read(1))
self.assertEqual(buf[1:5], bytesIo.read(4))
self.assertEqual(buf[5:], bytesIo.read(900))
self.assertEqual(self.EOF, bytesIo.read())
def testReadNoArgs(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(buf, bytesIo.read())
self.assertEqual(self.EOF, bytesIo.read())
def testSeek(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
bytesIo.read(5)
bytesIo.seek(0)
self.assertEqual(buf, bytesIo.read())
bytesIo.seek(3)
self.assertEqual(buf[3:], bytesIo.read())
self.assertRaises(TypeError, bytesIo.seek, 0.0)
def testTell(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(0, bytesIo.tell())
bytesIo.seek(5)
self.assertEqual(5, bytesIo.tell())
bytesIo.seek(10000)
self.assertEqual(10000, bytesIo.tell())
class MemoryTestMixin:
def test_detach(self):
buf = self.ioclass()
self.assertRaises(self.UnsupportedOperation, buf.detach)
def write_ops(self, f, t):
self.assertEqual(f.write(t("blah.")), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(t("Hello.")), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(5), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(t(" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(t("h")), 1)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 1)
def test_write(self):
buf = self.buftype("hello world\n")
memio = self.ioclass(buf)
self.write_ops(memio, self.buftype)
self.assertEqual(memio.getvalue(), buf)
memio = self.ioclass()
self.write_ops(memio, self.buftype)
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.write, None)
memio.close()
self.assertRaises(ValueError, memio.write, self.buftype(""))
def test_writelines(self):
buf = self.buftype("1234567890")
memio = self.ioclass()
self.assertEqual(memio.writelines([buf] * 100), None)
self.assertEqual(memio.getvalue(), buf * 100)
memio.writelines([])
self.assertEqual(memio.getvalue(), buf * 100)
memio = self.ioclass()
self.assertRaises(TypeError, memio.writelines, [buf] + [1])
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.writelines, None)
memio.close()
self.assertRaises(ValueError, memio.writelines, [])
def test_writelines_error(self):
memio = self.ioclass()
def error_gen():
yield self.buftype('spam')
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, memio.writelines, error_gen())
def test_truncate(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertRaises(ValueError, memio.truncate, -1)
memio.seek(6)
self.assertEqual(memio.truncate(), 6)
self.assertEqual(memio.getvalue(), buf[:6])
self.assertEqual(memio.truncate(4), 4)
self.assertEqual(memio.getvalue(), buf[:4])
self.assertEqual(memio.tell(), 6)
memio.seek(0, 2)
memio.write(buf)
self.assertEqual(memio.getvalue(), buf[:4] + buf)
pos = memio.tell()
self.assertEqual(memio.truncate(None), pos)
self.assertEqual(memio.tell(), pos)
self.assertRaises(TypeError, memio.truncate, '0')
memio.close()
self.assertRaises(ValueError, memio.truncate, 0)
def test_init(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
memio = self.ioclass(None)
self.assertEqual(memio.getvalue(), self.EOF)
memio.__init__(buf * 2)
self.assertEqual(memio.getvalue(), buf * 2)
memio.__init__(buf)
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.__init__, [])
def test_read(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.read(0), self.EOF)
self.assertEqual(memio.read(1), buf[:1])
self.assertEqual(memio.read(4), buf[1:5])
self.assertEqual(memio.read(900), buf[5:])
self.assertEqual(memio.read(), self.EOF)
memio.seek(0)
self.assertEqual(memio.read(), buf)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.tell(), 10)
memio.seek(0)
self.assertEqual(memio.read(-1), buf)
memio.seek(0)
self.assertEqual(type(memio.read()), type(buf))
memio.seek(100)
self.assertEqual(type(memio.read()), type(buf))
memio.seek(0)
self.assertEqual(memio.read(None), buf)
self.assertRaises(TypeError, memio.read, '')
memio.close()
self.assertRaises(ValueError, memio.read)
def test_readline(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 2)
self.assertEqual(memio.readline(0), self.EOF)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), self.EOF)
memio.seek(0)
self.assertEqual(memio.readline(5), buf[:5])
self.assertEqual(memio.readline(5), buf[5:10])
self.assertEqual(memio.readline(5), buf[10:15])
memio.seek(0)
self.assertEqual(memio.readline(-1), buf)
memio.seek(0)
self.assertEqual(memio.readline(0), self.EOF)
buf = self.buftype("1234567890\n")
memio = self.ioclass((buf * 3)[:-1])
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf[:-1])
self.assertEqual(memio.readline(), self.EOF)
memio.seek(0)
self.assertEqual(type(memio.readline()), type(buf))
self.assertEqual(memio.readline(), buf)
self.assertRaises(TypeError, memio.readline, '')
memio.close()
self.assertRaises(ValueError, memio.readline)
def test_readlines(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 10)
self.assertEqual(memio.readlines(), [buf] * 10)
memio.seek(5)
self.assertEqual(memio.readlines(), [buf[5:]] + [buf] * 9)
memio.seek(0)
self.assertEqual(memio.readlines(15), [buf] * 2)
memio.seek(0)
self.assertEqual(memio.readlines(-1), [buf] * 10)
memio.seek(0)
self.assertEqual(memio.readlines(0), [buf] * 10)
memio.seek(0)
self.assertEqual(type(memio.readlines()[0]), type(buf))
memio.seek(0)
self.assertEqual(memio.readlines(None), [buf] * 10)
self.assertRaises(TypeError, memio.readlines, '')
memio.close()
self.assertRaises(ValueError, memio.readlines)
def test_iterator(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 10)
self.assertEqual(iter(memio), memio)
self.assertTrue(hasattr(memio, '__iter__'))
self.assertTrue(hasattr(memio, '__next__'))
i = 0
for line in memio:
self.assertEqual(line, buf)
i += 1
self.assertEqual(i, 10)
memio.seek(0)
i = 0
for line in memio:
self.assertEqual(line, buf)
i += 1
self.assertEqual(i, 10)
memio = self.ioclass(buf * 2)
memio.close()
self.assertRaises(ValueError, memio.__next__)
def test_getvalue(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
memio.read()
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(type(memio.getvalue()), type(buf))
memio = self.ioclass(buf * 1000)
self.assertEqual(memio.getvalue()[-3:], self.buftype("890"))
memio = self.ioclass(buf)
memio.close()
self.assertRaises(ValueError, memio.getvalue)
def test_seek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
memio.read(5)
self.assertRaises(ValueError, memio.seek, -1)
self.assertRaises(ValueError, memio.seek, 1, -1)
self.assertRaises(ValueError, memio.seek, 1, 3)
self.assertEqual(memio.seek(0), 0)
self.assertEqual(memio.seek(0, 0), 0)
self.assertEqual(memio.read(), buf)
self.assertEqual(memio.seek(3), 3)
self.assertEqual(memio.seek(0, 1), 3)
self.assertEqual(memio.read(), buf[3:])
self.assertEqual(memio.seek(len(buf)), len(buf))
self.assertEqual(memio.read(), self.EOF)
memio.seek(len(buf) + 1)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.seek(0, 2), len(buf))
self.assertEqual(memio.read(), self.EOF)
memio.close()
self.assertRaises(ValueError, memio.seek, 0)
def test_overseek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.seek(len(buf) + 1), 11)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.tell(), 11)
self.assertEqual(memio.getvalue(), buf)
memio.write(self.EOF)
self.assertEqual(memio.getvalue(), buf)
memio.write(buf)
self.assertEqual(memio.getvalue(), buf + self.buftype('\0') + buf)
def test_tell(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.tell(), 0)
memio.seek(5)
self.assertEqual(memio.tell(), 5)
memio.seek(10000)
self.assertEqual(memio.tell(), 10000)
memio.close()
self.assertRaises(ValueError, memio.tell)
def test_flush(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.flush(), None)
def test_flags(self):
memio = self.ioclass()
self.assertEqual(memio.writable(), True)
self.assertEqual(memio.readable(), True)
self.assertEqual(memio.seekable(), True)
self.assertEqual(memio.isatty(), False)
self.assertEqual(memio.closed, False)
memio.close()
self.assertRaises(ValueError, memio.writable)
self.assertRaises(ValueError, memio.readable)
self.assertRaises(ValueError, memio.seekable)
self.assertRaises(ValueError, memio.isatty)
self.assertEqual(memio.closed, True)
def test_subclassing(self):
buf = self.buftype("1234567890")
def test1():
class MemIO(self.ioclass):
pass
m = MemIO(buf)
return m.getvalue()
def test2():
class MemIO(self.ioclass):
def __init__(me, a, b):
self.ioclass.__init__(me, a)
m = MemIO(buf, None)
return m.getvalue()
self.assertEqual(test1(), buf)
self.assertEqual(test2(), buf)
def test_instance_dict_leak(self):
# Test case for issue #6242.
# This will be caught by regrtest.py -R if this leak.
for _ in range(100):
memio = self.ioclass()
memio.foo = 1
def test_pickling(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
memio.foo = 42
memio.seek(2)
class PickleTestMemIO(self.ioclass):
def __init__(me, initvalue, foo):
self.ioclass.__init__(me, initvalue)
me.foo = foo
# __getnewargs__ is undefined on purpose. This checks that PEP 307
# is used to provide pickling support.
# Pickle expects the class to be on the module level. Here we use a
# little hack to allow the PickleTestMemIO class to derive from
# self.ioclass without having to define all combinations explictly on
# the module-level.
import __main__
PickleTestMemIO.__module__ = '__main__'
__main__.PickleTestMemIO = PickleTestMemIO
submemio = PickleTestMemIO(buf, 80)
submemio.seek(2)
# We only support pickle protocol 2 and onward since we use extended
# __reduce__ API of PEP 307 to provide pickling support.
for proto in range(2, pickle.HIGHEST_PROTOCOL):
for obj in (memio, submemio):
obj2 = pickle.loads(pickle.dumps(obj, protocol=proto))
self.assertEqual(obj.getvalue(), obj2.getvalue())
self.assertEqual(obj.__class__, obj2.__class__)
self.assertEqual(obj.foo, obj2.foo)
self.assertEqual(obj.tell(), obj2.tell())
obj2.close()
self.assertRaises(ValueError, pickle.dumps, obj2, proto)
del __main__.PickleTestMemIO
class BytesIOMixin:
def test_getbuffer(self):
memio = self.ioclass(b"1234567890")
buf = memio.getbuffer()
self.assertEqual(bytes(buf), b"1234567890")
memio.seek(5)
buf = memio.getbuffer()
self.assertEqual(bytes(buf), b"1234567890")
# Trying to change the size of the BytesIO while a buffer is exported
# raises a BufferError.
self.assertRaises(BufferError, memio.write, b'x' * 100)
self.assertRaises(BufferError, memio.truncate)
# Mutating the buffer updates the BytesIO
buf[3:6] = b"abc"
self.assertEqual(bytes(buf), b"123abc7890")
self.assertEqual(memio.getvalue(), b"123abc7890")
# After the buffer gets released, we can resize the BytesIO again
del buf
support.gc_collect()
memio.truncate()
class PyBytesIOTest(MemoryTestMixin, MemorySeekTestMixin,
BytesIOMixin, unittest.TestCase):
UnsupportedOperation = pyio.UnsupportedOperation
@staticmethod
def buftype(s):
return s.encode("ascii")
ioclass = pyio.BytesIO
EOF = b""
def test_read1(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertRaises(TypeError, memio.read1)
self.assertEqual(memio.read(), buf)
def test_readinto(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
b = bytearray(b"hello")
self.assertEqual(memio.readinto(b), 5)
self.assertEqual(b, b"12345")
self.assertEqual(memio.readinto(b), 5)
self.assertEqual(b, b"67890")
self.assertEqual(memio.readinto(b), 0)
self.assertEqual(b, b"67890")
b = bytearray(b"hello world")
memio.seek(0)
self.assertEqual(memio.readinto(b), 10)
self.assertEqual(b, b"1234567890d")
b = bytearray(b"")
memio.seek(0)
self.assertEqual(memio.readinto(b), 0)
self.assertEqual(b, b"")
self.assertRaises(TypeError, memio.readinto, '')
import array
a = array.array('b', b"hello world")
memio = self.ioclass(buf)
memio.readinto(a)
self.assertEqual(a.tobytes(), b"1234567890d")
memio.close()
self.assertRaises(ValueError, memio.readinto, b)
memio = self.ioclass(b"123")
b = bytearray()
memio.seek(42)
memio.readinto(b)
self.assertEqual(b, b"")
def test_relative_seek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.seek(-1, 1), 0)
self.assertEqual(memio.seek(3, 1), 3)
self.assertEqual(memio.seek(-4, 1), 0)
self.assertEqual(memio.seek(-1, 2), 9)
self.assertEqual(memio.seek(1, 1), 10)
self.assertEqual(memio.seek(1, 2), 11)
memio.seek(-3, 2)
self.assertEqual(memio.read(), buf[-3:])
memio.seek(0)
memio.seek(1, 1)
self.assertEqual(memio.read(), buf[1:])
def test_unicode(self):
memio = self.ioclass()
self.assertRaises(TypeError, self.ioclass, "1234567890")
self.assertRaises(TypeError, memio.write, "1234567890")
self.assertRaises(TypeError, memio.writelines, ["1234567890"])
def test_bytes_array(self):
buf = b"1234567890"
import array
a = array.array('b', list(buf))
memio = self.ioclass(a)
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(a), 10)
self.assertEqual(memio.getvalue(), buf)
def test_issue5449(self):
buf = self.buftype("1234567890")
self.ioclass(initial_bytes=buf)
self.assertRaises(TypeError, self.ioclass, buf, foo=None)
class TextIOTestMixin:
def test_newlines_property(self):
memio = self.ioclass(newline=None)
# The C StringIO decodes newlines in write() calls, but the Python
# implementation only does when reading. This function forces them to
# be decoded for testing.
def force_decode():
memio.seek(0)
memio.read()
self.assertEqual(memio.newlines, None)
memio.write("a\n")
force_decode()
self.assertEqual(memio.newlines, "\n")
memio.write("b\r\n")
force_decode()
self.assertEqual(memio.newlines, ("\n", "\r\n"))
memio.write("c\rd")
force_decode()
self.assertEqual(memio.newlines, ("\r", "\n", "\r\n"))
def test_relative_seek(self):
memio = self.ioclass()
self.assertRaises(IOError, memio.seek, -1, 1)
self.assertRaises(IOError, memio.seek, 3, 1)
self.assertRaises(IOError, memio.seek, -3, 1)
self.assertRaises(IOError, memio.seek, -1, 2)
self.assertRaises(IOError, memio.seek, 1, 1)
self.assertRaises(IOError, memio.seek, 1, 2)
def test_textio_properties(self):
memio = self.ioclass()
# These are just dummy values but we nevertheless check them for fear
# of unexpected breakage.
self.assertIsNone(memio.encoding)
self.assertIsNone(memio.errors)
self.assertFalse(memio.line_buffering)
def test_newline_none(self):
# newline=None
memio = self.ioclass("a\nb\r\nc\rd", newline=None)
self.assertEqual(list(memio), ["a\n", "b\n", "c\n", "d"])
memio.seek(0)
self.assertEqual(memio.read(1), "a")
self.assertEqual(memio.read(2), "\nb")
self.assertEqual(memio.read(2), "\nc")
self.assertEqual(memio.read(1), "\n")
memio = self.ioclass(newline=None)
self.assertEqual(2, memio.write("a\n"))
self.assertEqual(3, memio.write("b\r\n"))
self.assertEqual(3, memio.write("c\rd"))
memio.seek(0)
self.assertEqual(memio.read(), "a\nb\nc\nd")
memio = self.ioclass("a\r\nb", newline=None)
self.assertEqual(memio.read(3), "a\nb")
def test_newline_empty(self):
# newline=""
memio = self.ioclass("a\nb\r\nc\rd", newline="")
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"])
memio.seek(0)
self.assertEqual(memio.read(4), "a\nb\r")
self.assertEqual(memio.read(2), "\nc")
self.assertEqual(memio.read(1), "\r")
memio = self.ioclass(newline="")
self.assertEqual(2, memio.write("a\n"))
self.assertEqual(2, memio.write("b\r"))
self.assertEqual(2, memio.write("\nc"))
self.assertEqual(2, memio.write("\rd"))
memio.seek(0)
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"])
def test_newline_lf(self):
# newline="\n"
memio = self.ioclass("a\nb\r\nc\rd")
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"])
def test_newline_cr(self):
# newline="\r"
memio = self.ioclass("a\nb\r\nc\rd", newline="\r")
self.assertEqual(memio.read(), "a\rb\r\rc\rd")
memio.seek(0)
self.assertEqual(list(memio), ["a\r", "b\r", "\r", "c\r", "d"])
def test_newline_crlf(self):
# newline="\r\n"
memio = self.ioclass("a\nb\r\nc\rd", newline="\r\n")
self.assertEqual(memio.read(), "a\r\nb\r\r\nc\rd")
memio.seek(0)
self.assertEqual(list(memio), ["a\r\n", "b\r\r\n", "c\rd"])
def test_issue5265(self):
# StringIO can duplicate newlines in universal newlines mode
memio = self.ioclass("a\r\nb\r\n", newline=None)
self.assertEqual(memio.read(5), "a\nb\n")
def test_newline_argument(self):
self.assertRaises(TypeError, self.ioclass, newline=b"\n")
self.assertRaises(ValueError, self.ioclass, newline="error")
# These should not raise an error
for newline in (None, "", "\n", "\r", "\r\n"):
self.ioclass(newline=newline)
class PyStringIOTest(MemoryTestMixin, MemorySeekTestMixin,
TextIOTestMixin, unittest.TestCase):
buftype = str
ioclass = pyio.StringIO
UnsupportedOperation = pyio.UnsupportedOperation
EOF = ""
class PyStringIOPickleTest(TextIOTestMixin, unittest.TestCase):
"""Test if pickle restores properly the internal state of StringIO.
"""
buftype = str
UnsupportedOperation = pyio.UnsupportedOperation
EOF = ""
class ioclass(pyio.StringIO):
def __new__(cls, *args, **kwargs):
return pickle.loads(pickle.dumps(pyio.StringIO(*args, **kwargs)))
def __init__(self, *args, **kwargs):
pass
class CBytesIOTest(PyBytesIOTest):
ioclass = io.BytesIO
UnsupportedOperation = io.UnsupportedOperation
def test_getstate(self):
memio = self.ioclass()
state = memio.__getstate__()
self.assertEqual(len(state), 3)
bytearray(state[0]) # Check if state[0] supports the buffer interface.
self.assertIsInstance(state[1], int)
self.assertTrue(isinstance(state[2], dict) or state[2] is None)
memio.close()
self.assertRaises(ValueError, memio.__getstate__)
def test_setstate(self):
# This checks whether __setstate__ does proper input validation.
memio = self.ioclass()
memio.__setstate__((b"no error", 0, None))
memio.__setstate__((bytearray(b"no error"), 0, None))
memio.__setstate__((b"no error", 0, {'spam': 3}))
self.assertRaises(ValueError, memio.__setstate__, (b"", -1, None))
self.assertRaises(TypeError, memio.__setstate__, ("unicode", 0, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", 0.0, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", 0, 0))
self.assertRaises(TypeError, memio.__setstate__, (b"len-test", 0))
self.assertRaises(TypeError, memio.__setstate__)
self.assertRaises(TypeError, memio.__setstate__, 0)
memio.close()
self.assertRaises(ValueError, memio.__setstate__, (b"closed", 0, None))
check_sizeof = support.check_sizeof
@support.cpython_only
def test_sizeof(self):
basesize = support.calcobjsize('P2nN2Pn')
check = self.check_sizeof
self.assertEqual(object.__sizeof__(io.BytesIO()), basesize)
check(io.BytesIO(), basesize )
check(io.BytesIO(b'a'), basesize + 1 + 1 )
check(io.BytesIO(b'a' * 1000), basesize + 1000 + 1 )
class CStringIOTest(PyStringIOTest):
ioclass = io.StringIO
UnsupportedOperation = io.UnsupportedOperation
# XXX: For the Python version of io.StringIO, this is highly
# dependent on the encoding used for the underlying buffer.
def test_widechar(self):
buf = self.buftype("\U0002030a\U00020347")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(buf), len(buf))
self.assertEqual(memio.tell(), len(buf))
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(buf), len(buf))
self.assertEqual(memio.tell(), len(buf) * 2)
self.assertEqual(memio.getvalue(), buf + buf)
def test_getstate(self):
memio = self.ioclass()
state = memio.__getstate__()
self.assertEqual(len(state), 4)
self.assertIsInstance(state[0], str)
self.assertIsInstance(state[1], str)
self.assertIsInstance(state[2], int)
self.assertTrue(isinstance(state[3], dict) or state[3] is None)
memio.close()
self.assertRaises(ValueError, memio.__getstate__)
def test_setstate(self):
# This checks whether __setstate__ does proper input validation.
memio = self.ioclass()
memio.__setstate__(("no error", "\n", 0, None))
memio.__setstate__(("no error", "", 0, {'spam': 3}))
self.assertRaises(ValueError, memio.__setstate__, ("", "f", 0, None))
self.assertRaises(ValueError, memio.__setstate__, ("", "", -1, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", "", 0, None))
self.assertRaises(TypeError, memio.__setstate__, ("", b"", 0, None))
self.assertRaises(TypeError, memio.__setstate__, ("", "", 0.0, None))
self.assertRaises(TypeError, memio.__setstate__, ("", "", 0, 0))
self.assertRaises(TypeError, memio.__setstate__, ("len-test", 0))
self.assertRaises(TypeError, memio.__setstate__)
self.assertRaises(TypeError, memio.__setstate__, 0)
memio.close()
self.assertRaises(ValueError, memio.__setstate__, ("closed", "", 0, None))
class CStringIOPickleTest(PyStringIOPickleTest):
UnsupportedOperation = io.UnsupportedOperation
class ioclass(io.StringIO):
def __new__(cls, *args, **kwargs):
return pickle.loads(pickle.dumps(io.StringIO(*args, **kwargs)))
def __init__(self, *args, **kwargs):
pass
def test_main():
tests = [PyBytesIOTest, PyStringIOTest, CBytesIOTest, CStringIOTest,
PyStringIOPickleTest, CStringIOPickleTest]
support.run_unittest(*tests)
if __name__ == '__main__':
test_main()
|
|
import json
import logging
from core import ffCommand
from core import ffLocation
from core import getDeviceStatusDict
from core import getDeviceViewsList
from core import getRoutineViewsDict
from core import reinstallRoutinesFromConfig
from core.api.alexa import alexaHandler
from core.api.ifttt import iftttHandler
from core.api.locative import locativeHandler
from core.api.ha_bridge import ha_bridge_handler
from core.api.ha_bridge import ha_bridge_push_config
from core.firefly import app
from flask import request
#FIX
from core.database.device_db import reinstallDevices
from core.database.device_db import reinstall_indigo
from core.database.device_db import DeviceViews
device_views = DeviceViews()
@app.route('/')
def baseView():
return "This is the root page"
@app.route('/API/alexa', methods=['POST'])
def apiAlexa():
r = request.get_json(force=True)
return alexaHandler(r)
@app.route('/API/ifttt', methods=['POST'])
def apiIFTTT():
r = request.get_json(force=True)
return iftttHandler(r)
@app.route('/API/locative', methods=['POST'])
def locativeAPI():
locativeHandler(request)
return str(True)
@app.route('/API/habridge/command', methods=['POST'])
def ha_bridge_command():
r = request.get_json(force=True)
return ha_bridge_handler(r)
@app.route('/support/habridge/config', methods=['POST','GET'])
def ha_bridge_config():
return ha_bridge_push_config()
@app.route('/API/mode')
def apiMode():
return ffLocation.mode
@app.route('/support/reinstall_routines')
def supportRinstallRoutines():
config_file = '/opt/firefly_system/config/routine.json'
reinstallRoutinesFromConfig(config_file)
return 'Routines Reinstalled'
@app.route('/API/core/views/routine')
def apiCoreViewRoutine():
routine_list = getRoutineViewsDict()
return_data = {}
for r in routine_list:
if r.get('icon') is None:
continue
rID = r .get('id')
return_data[rID] = {'id': rID, 'icon': r.get('icon')}
logging.debug(str(return_data))
return json.dumps(return_data, sort_keys=True)
@app.route('/API/core/views/devices')
def apiCoreViewDevices():
#devices = getDeviceViewsList()
devices = device_views.deviceViewsList
return_data = {'devices': devices}
device_type_list = []
for d in devices:
d_type = d.get('type')
if d_type not in device_type_list:
device_type_list.append(d_type)
device_types = [
{
'index': 0,
'type': 'all',
'title': 'all devices'
}
]
device_index = 1
for d in sorted(device_type_list):
device_types.append({
'index': device_index,
'type': str(d),
'title': str(d)
})
device_index += 1
return_data['types'] = device_types
return json.dumps(return_data, sort_keys=True)
@app.route('/API/core/status/devices/all')
def apiCoreStatusDevicesAll():
device_status = device_views.deviceStatusDict
#device_status = getDeviceStatusDict()
return_data = {'devices': device_status}
device_type_list = []
for name, d in device_status.iteritems():
if d.get('views'):
d_type = d.get('views').get('type')
if d_type not in device_type_list:
device_type_list.append(d_type)
device_types = [
{
'index': 0,
'type': 'all',
'title': 'all devices'
}
]
device_index = 1
for d in sorted(device_type_list):
device_types.append({
'index': device_index,
'type': str(d),
'title': str(d)
})
device_index += 1
return_data['types'] = device_types
return json.dumps(return_data, sort_keys=True)
@app.route('/API/command', methods=['POST'])
def apiCommand():
c = request.get_json(force=True)
logging.critical(str(c))
command = c.get('command')
logging.critical(command)
device = c.get('device')
force = c.get('force')
routine = c.get('routine')
source = 'web: /API/command'
if routine:
ffCommand(device, command, routine=routine, force=force, source=source)
else:
ffCommand(device, command, source=source)
# Refresh device views on change on UI
device_views.refreshViews()
return "OK"
#rough hacks.. Fix soon
@app.route('/support/reinstall_devices')
def apiReinstallDevices():
reinstallDevices()
return "OK"
@app.route('/support/reinstall_indigo')
def apiSupportReinstallIndigo():
reinstall_indigo()
return "OK"
@app.route('/reinstall_apps')
def apiReinstallApps():
from core import appsDB
from sys import modules
from collections import OrderedDict
import pickle
appsDB.remove({})
with open('/opt/firefly_system/config/apps.json') as coreAppConfig:
appList = json.load(coreAppConfig)
for packageName, module in appList.iteritems():
for moduleName in module:
package_full_path = 'apps.' + str(packageName) + '.' + str(moduleName)
app_package_config = '/opt/firefly_system/config/app_config/' + str(packageName) + '/config.json'
logging.critical(app_package_config)
with open(str(app_package_config)) as app_package_config_file:
app_package_config_data = json.load(app_package_config_file, object_pairs_hook=OrderedDict).get(moduleName) #json.load(app_package_config_file).get(moduleName)
logging.critical(app_package_config_data)
package = __import__(package_full_path, globals={}, locals={}, fromlist=[str(packageName)], level=-1)
reload(modules[package_full_path])
for install in app_package_config_data.get('installs'):
aObj = package.App(install)
aObjBin = pickle.dumps(aObj)
a = {}
a['id'] = aObj.id
a['ffObject'] = aObjBin
a['name'] = install.get('name')
a['listen'] = aObj.listen
appsDB.insert(a)
return "OK"
|
|
"""Schema migration helpers."""
import logging
from sqlalchemy import ForeignKeyConstraint, MetaData, Table, text
from sqlalchemy.engine import reflection
from sqlalchemy.exc import InternalError, OperationalError, SQLAlchemyError
from sqlalchemy.schema import AddConstraint, DropConstraint
from .const import DOMAIN
from .models import SCHEMA_VERSION, TABLE_STATES, Base, SchemaChanges
from .util import session_scope
_LOGGER = logging.getLogger(__name__)
def migrate_schema(instance):
"""Check if the schema needs to be upgraded."""
with session_scope(session=instance.get_session()) as session:
res = (
session.query(SchemaChanges)
.order_by(SchemaChanges.change_id.desc())
.first()
)
current_version = getattr(res, "schema_version", None)
if current_version is None:
current_version = _inspect_schema_version(instance.engine, session)
_LOGGER.debug(
"No schema version found. Inspected version: %s", current_version
)
if current_version == SCHEMA_VERSION:
return
_LOGGER.warning(
"Database is about to upgrade. Schema version: %s", current_version
)
with instance.hass.timeout.freeze(DOMAIN):
for version in range(current_version, SCHEMA_VERSION):
new_version = version + 1
_LOGGER.info("Upgrading recorder db schema to version %s", new_version)
_apply_update(instance.engine, new_version, current_version)
session.add(SchemaChanges(schema_version=new_version))
_LOGGER.info("Upgrade to version %s done", new_version)
def _create_index(engine, table_name, index_name):
"""Create an index for the specified table.
The index name should match the name given for the index
within the table definition described in the models
"""
table = Table(table_name, Base.metadata)
_LOGGER.debug("Looking up index %s for table %s", index_name, table_name)
# Look up the index object by name from the table is the models
index_list = [idx for idx in table.indexes if idx.name == index_name]
if not index_list:
_LOGGER.debug("The index %s no longer exists", index_name)
return
index = index_list[0]
_LOGGER.debug("Creating %s index", index_name)
_LOGGER.warning(
"Adding index `%s` to database. Note: this can take several "
"minutes on large databases and slow computers. Please "
"be patient!",
index_name,
)
try:
index.create(engine)
except OperationalError as err:
lower_err_str = str(err).lower()
if "already exists" not in lower_err_str and "duplicate" not in lower_err_str:
raise
_LOGGER.warning(
"Index %s already exists on %s, continuing", index_name, table_name
)
except InternalError as err:
if "duplicate" not in str(err).lower():
raise
_LOGGER.warning(
"Index %s already exists on %s, continuing", index_name, table_name
)
_LOGGER.debug("Finished creating %s", index_name)
def _drop_index(engine, table_name, index_name):
"""Drop an index from a specified table.
There is no universal way to do something like `DROP INDEX IF EXISTS`
so we will simply execute the DROP command and ignore any exceptions
WARNING: Due to some engines (MySQL at least) being unable to use bind
parameters in a DROP INDEX statement (at least via SQLAlchemy), the query
string here is generated from the method parameters without sanitizing.
DO NOT USE THIS FUNCTION IN ANY OPERATION THAT TAKES USER INPUT.
"""
_LOGGER.debug("Dropping index %s from table %s", index_name, table_name)
success = False
# Engines like DB2/Oracle
try:
engine.execute(text(f"DROP INDEX {index_name}"))
except SQLAlchemyError:
pass
else:
success = True
# Engines like SQLite, SQL Server
if not success:
try:
engine.execute(
text(
"DROP INDEX {table}.{index}".format(
index=index_name, table=table_name
)
)
)
except SQLAlchemyError:
pass
else:
success = True
if not success:
# Engines like MySQL, MS Access
try:
engine.execute(
text(
"DROP INDEX {index} ON {table}".format(
index=index_name, table=table_name
)
)
)
except SQLAlchemyError:
pass
else:
success = True
if success:
_LOGGER.debug(
"Finished dropping index %s from table %s", index_name, table_name
)
else:
if index_name == "ix_states_context_parent_id":
# Was only there on nightly so we do not want
# to generate log noise or issues about it.
return
_LOGGER.warning(
"Failed to drop index %s from table %s. Schema "
"Migration will continue; this is not a "
"critical operation",
index_name,
table_name,
)
def _add_columns(engine, table_name, columns_def):
"""Add columns to a table."""
_LOGGER.warning(
"Adding columns %s to table %s. Note: this can take several "
"minutes on large databases and slow computers. Please "
"be patient!",
", ".join(column.split(" ")[0] for column in columns_def),
table_name,
)
columns_def = [f"ADD {col_def}" for col_def in columns_def]
try:
engine.execute(
text(
"ALTER TABLE {table} {columns_def}".format(
table=table_name, columns_def=", ".join(columns_def)
)
)
)
return
except (InternalError, OperationalError):
# Some engines support adding all columns at once,
# this error is when they don't
_LOGGER.info("Unable to use quick column add. Adding 1 by 1")
for column_def in columns_def:
try:
engine.execute(
text(
"ALTER TABLE {table} {column_def}".format(
table=table_name, column_def=column_def
)
)
)
except (InternalError, OperationalError) as err:
if "duplicate" not in str(err).lower():
raise
_LOGGER.warning(
"Column %s already exists on %s, continuing",
column_def.split(" ")[1],
table_name,
)
def _update_states_table_with_foreign_key_options(engine):
"""Add the options to foreign key constraints."""
inspector = reflection.Inspector.from_engine(engine)
alters = []
for foreign_key in inspector.get_foreign_keys(TABLE_STATES):
if foreign_key["name"] and not foreign_key["options"]:
alters.append(
{
"old_fk": ForeignKeyConstraint((), (), name=foreign_key["name"]),
"columns": foreign_key["constrained_columns"],
}
)
if not alters:
return
states_key_constraints = Base.metadata.tables[TABLE_STATES].foreign_key_constraints
old_states_table = Table( # noqa: F841 pylint: disable=unused-variable
TABLE_STATES, MetaData(), *[alter["old_fk"] for alter in alters]
)
for alter in alters:
try:
engine.execute(DropConstraint(alter["old_fk"]))
for fkc in states_key_constraints:
if fkc.column_keys == alter["columns"]:
engine.execute(AddConstraint(fkc))
except (InternalError, OperationalError):
_LOGGER.exception(
"Could not update foreign options in %s table", TABLE_STATES
)
def _apply_update(engine, new_version, old_version):
"""Perform operations to bring schema up to date."""
if new_version == 1:
_create_index(engine, "events", "ix_events_time_fired")
elif new_version == 2:
# Create compound start/end index for recorder_runs
_create_index(engine, "recorder_runs", "ix_recorder_runs_start_end")
# Create indexes for states
_create_index(engine, "states", "ix_states_last_updated")
elif new_version == 3:
# There used to be a new index here, but it was removed in version 4.
pass
elif new_version == 4:
# Queries were rewritten in this schema release. Most indexes from
# earlier versions of the schema are no longer needed.
if old_version == 3:
# Remove index that was added in version 3
_drop_index(engine, "states", "ix_states_created_domain")
if old_version == 2:
# Remove index that was added in version 2
_drop_index(engine, "states", "ix_states_entity_id_created")
# Remove indexes that were added in version 0
_drop_index(engine, "states", "states__state_changes")
_drop_index(engine, "states", "states__significant_changes")
_drop_index(engine, "states", "ix_states_entity_id_created")
_create_index(engine, "states", "ix_states_entity_id_last_updated")
elif new_version == 5:
# Create supporting index for States.event_id foreign key
_create_index(engine, "states", "ix_states_event_id")
elif new_version == 6:
_add_columns(
engine,
"events",
["context_id CHARACTER(36)", "context_user_id CHARACTER(36)"],
)
_create_index(engine, "events", "ix_events_context_id")
_create_index(engine, "events", "ix_events_context_user_id")
_add_columns(
engine,
"states",
["context_id CHARACTER(36)", "context_user_id CHARACTER(36)"],
)
_create_index(engine, "states", "ix_states_context_id")
_create_index(engine, "states", "ix_states_context_user_id")
elif new_version == 7:
_create_index(engine, "states", "ix_states_entity_id")
elif new_version == 8:
_add_columns(engine, "events", ["context_parent_id CHARACTER(36)"])
_add_columns(engine, "states", ["old_state_id INTEGER"])
_create_index(engine, "events", "ix_events_context_parent_id")
elif new_version == 9:
# We now get the context from events with a join
# since its always there on state_changed events
#
# Ideally we would drop the columns from the states
# table as well but sqlite doesn't support that
# and we would have to move to something like
# sqlalchemy alembic to make that work
#
_drop_index(engine, "states", "ix_states_context_id")
_drop_index(engine, "states", "ix_states_context_user_id")
# This index won't be there if they were not running
# nightly but we don't treat that as a critical issue
_drop_index(engine, "states", "ix_states_context_parent_id")
# Redundant keys on composite index:
# We already have ix_states_entity_id_last_updated
_drop_index(engine, "states", "ix_states_entity_id")
_create_index(engine, "events", "ix_events_event_type_time_fired")
_drop_index(engine, "events", "ix_events_event_type")
elif new_version == 10:
_update_states_table_with_foreign_key_options(engine)
else:
raise ValueError(f"No schema migration defined for version {new_version}")
def _inspect_schema_version(engine, session):
"""Determine the schema version by inspecting the db structure.
When the schema version is not present in the db, either db was just
created with the correct schema, or this is a db created before schema
versions were tracked. For now, we'll test if the changes for schema
version 1 are present to make the determination. Eventually this logic
can be removed and we can assume a new db is being created.
"""
inspector = reflection.Inspector.from_engine(engine)
indexes = inspector.get_indexes("events")
for index in indexes:
if index["column_names"] == ["time_fired"]:
# Schema addition from version 1 detected. New DB.
session.add(SchemaChanges(schema_version=SCHEMA_VERSION))
return SCHEMA_VERSION
# Version 1 schema changes not found, this db needs to be migrated.
current_version = SchemaChanges(schema_version=0)
session.add(current_version)
return current_version.schema_version
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test descendant package tracking code."""
from decimal import Decimal
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.messages import COIN
from test_framework.p2p import P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
chain_transaction,
)
# default limits
MAX_ANCESTORS = 25
MAX_DESCENDANTS = 25
# custom limits for node1
MAX_ANCESTORS_CUSTOM = 5
MAX_DESCENDANTS_CUSTOM = 10
assert MAX_DESCENDANTS_CUSTOM >= MAX_ANCESTORS_CUSTOM
class MempoolPackagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [
[
"-maxorphantx=1000",
"[email protected]", # immediate tx relay
],
[
"-maxorphantx=1000",
"-limitancestorcount={}".format(MAX_ANCESTORS_CUSTOM),
"-limitdescendantcount={}".format(MAX_DESCENDANTS_CUSTOM),
],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Mine some blocks and have them mature.
peer_inv_store = self.nodes[0].add_p2p_connection(P2PTxInvStore()) # keep track of invs
self.generate(self.nodes[0], COINBASE_MATURITY + 1)
utxo = self.nodes[0].listunspent(10)
txid = utxo[0]['txid']
vout = utxo[0]['vout']
value = utxo[0]['amount']
assert 'ancestorcount' not in utxo[0]
assert 'ancestorsize' not in utxo[0]
assert 'ancestorfees' not in utxo[0]
fee = Decimal("0.0001")
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
witness_chain = []
ancestor_vsize = 0
ancestor_fees = Decimal(0)
for i in range(MAX_ANCESTORS):
(txid, sent_value) = chain_transaction(self.nodes[0], [txid], [0], value, fee, 1)
value = sent_value
chain.append(txid)
# We need the wtxids to check P2P announcements
witnesstx = self.nodes[0].gettransaction(txid=txid, verbose=True)['decoded']
witness_chain.append(witnesstx['hash'])
# Check that listunspent ancestor{count, size, fees} yield the correct results
wallet_unspent = self.nodes[0].listunspent(minconf=0)
this_unspent = next(utxo_info for utxo_info in wallet_unspent if utxo_info['txid'] == txid)
assert_equal(this_unspent['ancestorcount'], i + 1)
ancestor_vsize += self.nodes[0].getrawtransaction(txid=txid, verbose=True)['vsize']
assert_equal(this_unspent['ancestorsize'], ancestor_vsize)
ancestor_fees -= self.nodes[0].gettransaction(txid=txid)['fee']
assert_equal(this_unspent['ancestorfees'], ancestor_fees * COIN)
# Wait until mempool transactions have passed initial broadcast (sent inv and received getdata)
# Otherwise, getrawmempool may be inconsistent with getmempoolentry if unbroadcast changes in between
peer_inv_store.wait_for_broadcast(witness_chain)
# Check mempool has MAX_ANCESTORS transactions in it, and descendant and ancestor
# count and fees should look correct
mempool = self.nodes[0].getrawmempool(True)
assert_equal(len(mempool), MAX_ANCESTORS)
descendant_count = 1
descendant_fees = 0
descendant_vsize = 0
assert_equal(ancestor_vsize, sum([mempool[tx]['vsize'] for tx in mempool]))
ancestor_count = MAX_ANCESTORS
assert_equal(ancestor_fees, sum([mempool[tx]['fee'] for tx in mempool]))
descendants = []
ancestors = list(chain)
for x in reversed(chain):
# Check that getmempoolentry is consistent with getrawmempool
entry = self.nodes[0].getmempoolentry(x)
assert_equal(entry, mempool[x])
# Check that the descendant calculations are correct
assert_equal(entry['descendantcount'], descendant_count)
descendant_fees += entry['fee']
assert_equal(entry['modifiedfee'], entry['fee'])
assert_equal(entry['fees']['base'], entry['fee'])
assert_equal(entry['fees']['modified'], entry['modifiedfee'])
assert_equal(entry['descendantfees'], descendant_fees * COIN)
assert_equal(entry['fees']['descendant'], descendant_fees)
descendant_vsize += entry['vsize']
assert_equal(entry['descendantsize'], descendant_vsize)
descendant_count += 1
# Check that ancestor calculations are correct
assert_equal(entry['ancestorcount'], ancestor_count)
assert_equal(entry['ancestorfees'], ancestor_fees * COIN)
assert_equal(entry['ancestorsize'], ancestor_vsize)
ancestor_vsize -= entry['vsize']
ancestor_fees -= entry['fee']
ancestor_count -= 1
# Check that parent/child list is correct
assert_equal(entry['spentby'], descendants[-1:])
assert_equal(entry['depends'], ancestors[-2:-1])
# Check that getmempooldescendants is correct
assert_equal(sorted(descendants), sorted(self.nodes[0].getmempooldescendants(x)))
# Check getmempooldescendants verbose output is correct
for descendant, dinfo in self.nodes[0].getmempooldescendants(x, True).items():
assert_equal(dinfo['depends'], [chain[chain.index(descendant)-1]])
if dinfo['descendantcount'] > 1:
assert_equal(dinfo['spentby'], [chain[chain.index(descendant)+1]])
else:
assert_equal(dinfo['spentby'], [])
descendants.append(x)
# Check that getmempoolancestors is correct
ancestors.remove(x)
assert_equal(sorted(ancestors), sorted(self.nodes[0].getmempoolancestors(x)))
# Check that getmempoolancestors verbose output is correct
for ancestor, ainfo in self.nodes[0].getmempoolancestors(x, True).items():
assert_equal(ainfo['spentby'], [chain[chain.index(ancestor)+1]])
if ainfo['ancestorcount'] > 1:
assert_equal(ainfo['depends'], [chain[chain.index(ancestor)-1]])
else:
assert_equal(ainfo['depends'], [])
# Check that getmempoolancestors/getmempooldescendants correctly handle verbose=true
v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True)
assert_equal(len(v_ancestors), len(chain)-1)
for x in v_ancestors.keys():
assert_equal(mempool[x], v_ancestors[x])
assert chain[-1] not in v_ancestors.keys()
v_descendants = self.nodes[0].getmempooldescendants(chain[0], True)
assert_equal(len(v_descendants), len(chain)-1)
for x in v_descendants.keys():
assert_equal(mempool[x], v_descendants[x])
assert chain[0] not in v_descendants.keys()
# Check that ancestor modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=1000)
ancestor_fees = 0
for x in chain:
entry = self.nodes[0].getmempoolentry(x)
ancestor_fees += entry['fee']
assert_equal(entry['fees']['ancestor'], ancestor_fees + Decimal('0.00001'))
assert_equal(entry['ancestorfees'], ancestor_fees * COIN + 1000)
# Undo the prioritisetransaction for later tests
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=-1000)
# Check that descendant modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=1000)
descendant_fees = 0
for x in reversed(chain):
entry = self.nodes[0].getmempoolentry(x)
descendant_fees += entry['fee']
assert_equal(entry['fees']['descendant'], descendant_fees + Decimal('0.00001'))
assert_equal(entry['descendantfees'], descendant_fees * COIN + 1000)
# Adding one more transaction on to the chain should fail.
assert_raises_rpc_error(-26, "too-long-mempool-chain", chain_transaction, self.nodes[0], [txid], [vout], value, fee, 1)
# Check that prioritising a tx before it's added to the mempool works
# First clear the mempool by mining a block.
self.generate(self.nodes[0], 1)
self.sync_blocks()
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Prioritise a transaction that has been mined, then add it back to the
# mempool by using invalidateblock.
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=2000)
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Keep node1's tip synced with node0
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
# Now check that the transaction is in the mempool, with the right modified fee
descendant_fees = 0
for x in reversed(chain):
entry = self.nodes[0].getmempoolentry(x)
descendant_fees += entry['fee']
if (x == chain[-1]):
assert_equal(entry['modifiedfee'], entry['fee'] + Decimal("0.00002"))
assert_equal(entry['fees']['modified'], entry['fee'] + Decimal("0.00002"))
assert_equal(entry['descendantfees'], descendant_fees * COIN + 2000)
assert_equal(entry['fees']['descendant'], descendant_fees + Decimal("0.00002"))
# Check that node1's mempool is as expected (-> custom ancestor limit)
mempool0 = self.nodes[0].getrawmempool(False)
mempool1 = self.nodes[1].getrawmempool(False)
assert_equal(len(mempool1), MAX_ANCESTORS_CUSTOM)
assert set(mempool1).issubset(set(mempool0))
for tx in chain[:MAX_ANCESTORS_CUSTOM]:
assert tx in mempool1
# TODO: more detailed check of node1's mempool (fees etc.)
# check transaction unbroadcast info (should be false if in both mempools)
mempool = self.nodes[0].getrawmempool(True)
for tx in mempool:
assert_equal(mempool[tx]['unbroadcast'], False)
# TODO: test ancestor size limits
# Now test descendant chain limits
txid = utxo[1]['txid']
value = utxo[1]['amount']
vout = utxo[1]['vout']
transaction_package = []
tx_children = []
# First create one parent tx with 10 children
(txid, sent_value) = chain_transaction(self.nodes[0], [txid], [vout], value, fee, 10)
parent_transaction = txid
for i in range(10):
transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
# Sign and send up to MAX_DESCENDANT transactions chained off the parent tx
chain = [] # save sent txs for the purpose of checking node1's mempool later (see below)
for _ in range(MAX_DESCENDANTS - 1):
utxo = transaction_package.pop(0)
(txid, sent_value) = chain_transaction(self.nodes[0], [utxo['txid']], [utxo['vout']], utxo['amount'], fee, 10)
chain.append(txid)
if utxo['txid'] is parent_transaction:
tx_children.append(txid)
for j in range(10):
transaction_package.append({'txid': txid, 'vout': j, 'amount': sent_value})
mempool = self.nodes[0].getrawmempool(True)
assert_equal(mempool[parent_transaction]['descendantcount'], MAX_DESCENDANTS)
assert_equal(sorted(mempool[parent_transaction]['spentby']), sorted(tx_children))
for child in tx_children:
assert_equal(mempool[child]['depends'], [parent_transaction])
# Sending one more chained transaction will fail
utxo = transaction_package.pop(0)
assert_raises_rpc_error(-26, "too-long-mempool-chain", chain_transaction, self.nodes[0], [utxo['txid']], [utxo['vout']], utxo['amount'], fee, 10)
# Check that node1's mempool is as expected, containing:
# - txs from previous ancestor test (-> custom ancestor limit)
# - parent tx for descendant test
# - txs chained off parent tx (-> custom descendant limit)
self.wait_until(lambda: len(self.nodes[1].getrawmempool()) ==
MAX_ANCESTORS_CUSTOM + 1 + MAX_DESCENDANTS_CUSTOM, timeout=10)
mempool0 = self.nodes[0].getrawmempool(False)
mempool1 = self.nodes[1].getrawmempool(False)
assert set(mempool1).issubset(set(mempool0))
assert parent_transaction in mempool1
for tx in chain[:MAX_DESCENDANTS_CUSTOM]:
assert tx in mempool1
for tx in chain[MAX_DESCENDANTS_CUSTOM:]:
assert tx not in mempool1
# TODO: more detailed check of node1's mempool (fees etc.)
# TODO: test descendant size limits
# Test reorg handling
# First, the basics:
self.generate(self.nodes[0], 1)
self.sync_blocks()
self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash())
self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash())
# Now test the case where node1 has a transaction T in its mempool that
# depends on transactions A and B which are in a mined block, and the
# block containing A and B is disconnected, AND B is not accepted back
# into node1's mempool because its ancestor count is too high.
# Create 8 transactions, like so:
# Tx0 -> Tx1 (vout0)
# \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7
#
# Mine them in the next block, then generate a new tx8 that spends
# Tx1 and Tx7, and add to node1's mempool, then disconnect the
# last block.
# Create tx0 with 2 outputs
utxo = self.nodes[0].listunspent()
txid = utxo[0]['txid']
value = utxo[0]['amount']
vout = utxo[0]['vout']
send_value = (value - fee) / 2
inputs = [ {'txid' : txid, 'vout' : vout} ]
outputs = {}
for _ in range(2):
outputs[self.nodes[0].getnewaddress()] = send_value
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
tx0_id = txid
value = send_value
# Create tx1
tx1_id, _ = chain_transaction(self.nodes[0], [tx0_id], [0], value, fee, 1)
# Create tx2-7
vout = 1
txid = tx0_id
for _ in range(6):
(txid, sent_value) = chain_transaction(self.nodes[0], [txid], [vout], value, fee, 1)
vout = 0
value = sent_value
# Mine these in a block
self.generate(self.nodes[0], 1)
self.sync_all()
# Now generate tx8, with a big fee
inputs = [ {'txid' : tx1_id, 'vout': 0}, {'txid' : txid, 'vout': 0} ]
outputs = { self.nodes[0].getnewaddress() : send_value + value - 4*fee }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
self.sync_mempools()
# Now try to disconnect the tip on each node...
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
self.sync_blocks()
if __name__ == '__main__':
MempoolPackagesTest().main()
|
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Version number extraction from Bazel build labels.
Arbitrary labels can be associated with a build by passing the `--embed_label`
flag. Release management systems can use this to tag the build with information
that can be used to decide the build number/version number of a bundle without
requiring that that transient information be checked into source control.
This script takes two arguments. The first points to a file containing the JSON
representation of a "control" structure. This control structure is a dictionary
with the following keys:
build_info_path: The path to the build info file (`ctx.info_file.path` from
Starlark) that contains the embedded label information.
build_label_pattern: The regular expression that should be matched against the
build label, with possible placeholders corresponding to `capture_groups`.
build_version_pattern: The string (possibly containing placeholders) that
should be used as the value of `CFBundleVersion`.
fallback_build_label: A build label to use when the no `--embed_label` was
provided on the build.
capture_groups: A dictionary whose keys correspond to placeholders found in
`build_label_pattern` and whose values are regular expressions that should
be used to match and capture those segments.
short_version_string_pattern: The string (possibly containing placeholders)
that should be used as the value of `CFBundleShortVersionString`. If
omitted, `build_version_pattern` will be used.
The second argument is the path to the output file. The output is written as a
JSON dictionary containing at most two values:
build_version: The string to use for `CFBundleVersion`.
short_version_string: The string to use for `CFBundleShortVersionString`.
This dictionary may be empty if there was no build label found in the build info
file. (This allows the script to complete gracefully in local development when
the --embed_label flag is often not being passed.)
"""
import contextlib
import json
import re
import string
import sys
class VersionToolError(ValueError):
"""Raised for all errors.
Custom ValueError used to allow catching (and logging) just the VersionTool
errors.
"""
def __init__(self, msg):
"""Initializes an error with the given message.
Args:
msg: The message for the error.
"""
ValueError.__init__(self, msg)
class DefaultFormatDict(dict):
"""A dictionary that ignores non-present args when passed to `vformat`.
If a key is requested that is not in the dictionary, then `{key}` is returned,
which effectively ignores formatting placeholders in the `vformat` string that
are not present in the dictonary.
"""
def __missing__(self, key):
return '{%s}' % key
@contextlib.contextmanager
def _testable_open(fp, mode='r'):
"""Opens a file or uses an existing file-like object.
This allows the logic to be written in such a way that it does not care
whether the "paths" its given in the control structure are paths to files or
file-like objects (such as StringIO) that support testing.
Args:
fp: Either a string representing the path to a file that should be opened,
or an existing file-like object that should be used directly.
mode: The mode with which to open the file, if `fp` is a string.
Yields:
The file-like object to be used in the body of the nested statements.
"""
if hasattr(fp, 'read') and hasattr(fp, 'write'):
yield fp
else:
yield open(fp, mode)
class VersionTool(object):
"""Implements the core functionality of the versioning tool."""
def __init__(self, control):
"""Initializes VersionTool with the given control options.
Args:
control: The dictionary of options used to control the tool. Please see
the moduledoc for a description of the format of this dictionary.
"""
self._build_info_path = control.get('build_info_path')
self._build_label_pattern = control.get('build_label_pattern')
self._build_version_pattern = control.get('build_version_pattern')
self._capture_groups = control.get('capture_groups')
# `or None` on the next to normalize empty string to None also.
self._fallback_build_label = control.get('fallback_build_label') or None
# Use the build_version pattern if short_version_string is not specified so
# that they both end up the same.
self._short_version_string_pattern = control.get(
'short_version_string_pattern') or self._build_version_pattern
def run(self):
"""Performs the operations requested by the control struct."""
substitutions = {}
build_label = None
if self._build_label_pattern:
build_label = self._extract_build_label() or self._fallback_build_label
# It's ok if the build label is not present; this is common during local
# development.
if build_label:
# Substitute the placeholders with named capture groups to extract
# the components from the label and add them to the substitutions
# dictionary.
resolved_pattern = self._build_label_pattern
for name, pattern in self._capture_groups.items():
resolved_pattern = resolved_pattern.replace(
"{%s}" % name, "(?P<%s>%s)" % (name, pattern))
match = re.match(resolved_pattern, build_label)
if match:
substitutions = match.groupdict()
else:
raise VersionToolError(
'The build label ("%s") did not match the pattern ("%s").' %
(build_label, resolved_pattern))
# Build the result dictionary by substituting the extracted values for
# the placeholders. Also, verify that all groups have been substituted; it's
# an error if they weren't (unless no --embed_label was provided at all, in
# which case we silently allow it to support local development easily).
result = {}
build_version = self._substitute_and_verify(
self._build_version_pattern, substitutions, 'build_version',
build_label)
if build_version:
result['build_version'] = build_version
short_version_string = self._substitute_and_verify(
self._short_version_string_pattern, substitutions,
'short_version_string', build_label)
if short_version_string:
result['short_version_string'] = short_version_string
return result
def _extract_build_label(self):
"""Extracts and returns the build label from the build info file.
Returns:
The value of the `BUILD_EMBED_LABEL` line in the build info file, or None
if the file did not exist.
"""
if not self._build_info_path:
return None
with _testable_open(self._build_info_path) as build_info_file:
content = build_info_file.read()
match = re.search(r"^BUILD_EMBED_LABEL\s(.*)$", content, re.MULTILINE)
if match:
return match.group(1)
return None
@staticmethod
def _substitute_and_verify(pattern, substitutions, key, build_label):
"""Substitutes placeholders with captured values and verifies completeness.
If no build label was passed via --embed_label, then the version number will
be used only if it does not contain any placeholders. If it does, then it
is an error.
Args:
pattern: The build version or short version string pattern, potentially
containing placeholders, to substitute into.
substitutions: The dictionary of substitutions to make.
key: The name of the result dictionary key being processed, for error
reporting purposes.
build_label: The build label from which values were extracted, for error
reporting purposes.
Returns:
The substituted version string, or None if it still contained
placeholders but no --embed_label was set.
Raises:
VersionToolError if --embed_label was provided but the version string
still contained placeholders after substitution.
"""
version = string.Formatter().vformat(
pattern, (), DefaultFormatDict(**substitutions))
if re.search(r"\{[^}]*\}", version):
if build_label:
raise VersionToolError(
'--embed_label had a non-empty label ("%s") but the version string '
'"%s" ("%s") still contained placeholders after substitution' % (
build_label, key, version))
else:
return None
return version
def _main(control_path, output_path):
"""Called when running the tool from a shell.
Args:
control_path: The path to the control file.
output_path: The path to the file where the output will be written.
"""
with open(control_path) as control_file:
control = json.load(control_file)
tool = VersionTool(control)
try:
version_data = tool.run()
except VersionToolError as e:
# Log tools errors cleanly for build output.
sys.stderr.write('ERROR: %s\n' % e)
sys.exit(1)
with open(output_path, 'w') as output_file:
# Sort the keys to get deterministic ordering of the output JSON.
json.dump(version_data, output_file, sort_keys=True)
if __name__ == '__main__':
if len(sys.argv) < 3:
sys.stderr.write(
'ERROR: Path to control file and/or output file not specified.\n')
exit(1)
_main(sys.argv[1], sys.argv[2])
|
|
import os, re, sublime, sublime_plugin, colorsys
# RGBA rgba(RRR, GGG, BBB, A.A)
# rgb(50, 150, 200)
# rgba(50, 150, 200, 0.5)
# ARGB argb(AAA, RRR, GGG, BBB)
# argb(255, 50, 150, 200)
# HEX #RRGGBBAA
# #A98
# #A98F
# #AA9988
# #AA9988FF
# INT 0xAARRGGBB
# 0x5599FF
# 0xFFAA443F
# HSLA hsla(HHH, SSS, LLL, A.A)
# hsl(100, 50, 50)
# hsla(100, 50%, 50%, 0.5)
# Float Array [ R, G, B, A ]
# [0.55555, 0.1, 0.8, 0.5]
# {0.55555F, 0.1F, 0.8F, 1F}
#
cache_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "julooColorHighlightCache/")
float_r = '\s*(0(?:\.[0-9]+)?|1(?:\.0+)?)[fF]?\s*'
rgb_regex = re.compile('a?rgba?\( *(\d{1,3}) *, *(\d{1,3}) *, *(\d{1,3}) *(?:, *(\d{0,3}(?:\.\d+)?) *)?\)')
hsl_regex = re.compile('hsla?\( *(\d{1,3}) *, *(\d{1,3})%? *, *(\d{1,3})%? *(?:, *(\d?(?:\.\d+)?) *)?\)')
float_regex = re.compile('(?:\{|\[)'+ float_r +','+ float_r +','+ float_r +'(?:,'+ float_r +')?(?:\}|\])')
class Color():
r = 0
g = 0
b = 0
a = 255
valid = True
def __init__(self, color):
if color.find("rgb") == 0:
s = re.search(rgb_regex, color)
(r, g, b, a) = s.groups()
self.r = int('0'+ r)
self.g = int('0'+ g)
self.b = int('0'+ b)
if a == None:
self.a = 255
else:
self.a = int(float('0'+ a) * 255)
elif color.find("argb") == 0:
s = re.search(rgb_regex, color)
(a, r, g, b) = s.groups()
if b == None:
self.valid = False
else:
self.r = int('0'+ r)
self.g = int('0'+ g)
self.b = int('0'+ b)
self.a = int('0'+ a)
elif color.find("hsl") == 0:
s = re.search(hsl_regex, color)
(h, s, l, a) = s.groups()
rgb = colorsys.hls_to_rgb(float('0'+ h) / 360, float('0'+ s) / 100, float('0'+ l) / 100)
self.r = int(rgb[0] * 255)
self.g = int(rgb[1] * 255)
self.b = int(rgb[2] * 255)
if a == None:
self.a = 255
else:
self.a = int(float('0'+ a) * 255)
elif color.find("0x") == 0:
length = len(color) - 2
if length == 6:
(a, r, g, b) = ("FF", color[2:4], color[4:6], color[6:8])
elif length == 8:
(a, r, g, b) = (color[2:4], color[4:6], color[6:8], color[8:])
else:
self.valid = False
return
self.r = self._htoi(r)
self.g = self._htoi(g)
self.b = self._htoi(b)
self.a = self._htoi(a)
elif color.find("#") == 0:
length = len(color) - 1
if length == 3 or length == 4:
if length == 3:
(r, g, b, a) = (color[1:2], color[2:3], color[3:4], "F")
else:
(r, g, b, a) = (color[1:2], color[2:3], color[3:4], color[4])
r += r
g += g
b += b
a += a
elif length == 6:
(r, g, b, a) = (color[1:3], color[3:5], color[5:7], "FF")
elif length == 8:
(r, g, b, a) = (color[1:3], color[3:5], color[5:7], color[7:])
else:
self.valid = False
return
self.r = self._htoi(r)
self.g = self._htoi(g)
self.b = self._htoi(b)
self.a = self._htoi(a)
elif color.find("{") == 0 or color.find("[") == 0:
s = re.search(float_regex, color)
(r, g, b, a) = s.groups()
self.r = int(float(r) * 255)
self.g = int(float(g) * 255)
self.b = int(float(b) * 255)
if a == None:
self.a = 255
else:
self.a = int(float(a) * 255)
def _htoi(self, hex):
return int(hex, 16)
def _itoh(self, n):
return (('0'+ hex(n)[2:])[-2:]).upper()
def to_hsl(self):
c = colorsys.rgb_to_hls(self.r / 255, self.g / 255, self.b / 255)
hsl = str(round(c[0] * 360)) +", "+ str(round(c[1] * 100)) +"%, "+ str(round(c[2] * 100)) +"%"
if self.a == 255:
return "hsl("+ hsl +")"
else:
return "hsla("+ hsl +", "+ str(round(self.a / 255, 2)) +")"
def to_int(self):
i = "0x"
if self.a != 0:
i += self._itoh(self.a)
return i + self._itoh(self.r) + self._itoh(self.g) + self._itoh(self.b)
def to_hex(self):
h = "#"+ self._itoh(self.r) + self._itoh(self.g) + self._itoh(self.b)
if self.a != 255:
h += self._itoh(self.a)
return h
def to_rgb(self):
rgb = str(self.r) +", "+ str(self.g) +", "+ str(self.b)
if self.a == 255:
return "rgb("+ rgb +")"
else:
if self.a == 0:
a = "0"
else:
a = str(round(self.a / 255, 2))
return "rgba("+ rgb +", "+ a +")"
def sublime_hex(self):
h = "#"+ self._itoh(self.r) + self._itoh(self.g) + self._itoh(self.b)
if self.a == 0:
h += "01"
else:
h += self._itoh(self.a)
return h
def contrasted_hex(self):
if self.r < 10 and self.g < 10 and self.b < 10:
return "#FFFFFFFF"
else:
return "#000000FF"
lastColor = None
lastColorRegion = None
class JulooColorConvert(sublime_plugin.TextCommand):
def run(self, edit, **args):
global lastColorRegion, lastColor
if lastColorRegion != None:
arg = args['to']
if arg == 'hex':
self.view.replace(edit, lastColorRegion, lastColor.to_hex())
elif arg == 'rgb':
self.view.replace(edit, lastColorRegion, lastColor.to_rgb())
elif arg == 'hsl':
self.view.replace(edit, lastColorRegion, lastColor.to_hsl())
else:
self.view.replace(edit, lastColorRegion, lastColor.to_int())
print(arg)
class JulooColorHighlight(sublime_plugin.EventListener):
color_regex = '(#|0x)[0-9a-fA-F]{8}|(#|0x)[0-9a-fA-F]{6}|#[0-9a-fA-F]{3,4}|(?:a?rgba?\( *\d{1,3} *, *\d{1,3} *, *\d{1,3} *(?:, *\d{0,3}(?:\.\d+)? *)?\))|(?:hsla?\( *\d{1,3} *, *\d{1,3}%? *, *\d{1,3}%? *(?:, *\d{0,3}(?:\.\d+)? *)?\))|(?:\{|\\[)'+ float_r +','+ float_r +','+ float_r +'(?:,'+ float_r +')?(?:\}|\\])'
xml_template = """ <dict>
<key>scope</key>
<string>juloo.color</string>
<key>settings</key>
<dict>
<key>background</key>
<string>{0}</string>
<key>foreground</key>
<string>{0}</string>
</dict>
</dict>
<dict>
<key>scope</key>
<string>juloo.colortext</string>
<key>settings</key>
<dict>
<key>fontStyle</key>
<string>normal</string>
<key>background</key>
<string>{0}</string>
<key>foreground</key>
<string>{1}</string>
</dict>
</dict>
"""
tmp_sel = None
def get_xml_path(self, id):
return cache_path + str(id) +".tmTheme"
def get_full_path(self, theme_path):
return os.path.join(sublime.packages_path(), os.path.normpath(theme_path))
def get_colored_region(self, view):
if len(view.sel()) == 1:
sel = view.sel()[0]
sel = sublime.Region(sel.end(), sel.end())
line = view.line(sel)
startPos = max(line.begin(), sel.begin() - 30)
endPos = min(sel.end() + 30, line.end())
m = sublime.Region(startPos, startPos)
max_iteration = 5
while max_iteration > 0:
m = view.find(self.color_regex, m.end())
if m == None or m.end() > endPos:
break
if m.contains(sel):
return m
max_iteration -= 1
return None
def on_close(self, view):
if view.settings().has("old_color_scheme"):
old_color_scheme = view.settings().get("old_color_scheme")
view.settings().set("color_scheme", old_color_scheme)
view.settings().erase("old_color_scheme")
full_path = self.get_full_path(self.get_xml_path(view.id()))
if os.path.exists(full_path):
os.remove(full_path)
def on_selection_modified_async(self, view):
global lastColorRegion, lastColor
if len(view.sel()) == 0 or view.sel()[0] == self.tmp_sel:
return;
else:
self.tmp_sel = view.sel()[0]
region = self.get_colored_region(view)
if region == None:
view.erase_status("color_juloo")
view.erase_regions("colorhightlight")
view.erase_regions("texthightlight")
if view.settings().has("old_color_scheme"):
view.settings().erase("old_color_scheme")
view.settings().erase("color_scheme")
full_path = self.get_full_path(self.get_xml_path(view.id()))
if os.path.exists(full_path):
os.remove(full_path)
else:
lastColorRegion = region
color = Color(view.substr(region))
lastColor = color
if color.valid:
status = "[ Color: "+ color.to_hex() +", "+ color.to_rgb() +", "+ color.to_int() +", "+ color.to_hsl() +" ]"
else:
status = "[ Invalid color ]"
view.set_status("color_juloo", status)
if not color.valid:
return;
if view.settings().has("old_color_scheme"):
color_scheme = view.settings().get("old_color_scheme")
else:
color_scheme = view.settings().get("color_scheme")
view.settings().set("old_color_scheme", color_scheme)
data = sublime.load_resource(color_scheme)
index = data.find("</array>")
xml = self.xml_template.format(color.sublime_hex(), color.contrasted_hex())
data = data[:index] + xml + data[index:]
if not os.path.exists(self.get_full_path(cache_path)):
os.mkdir(self.get_full_path(cache_path))
f = open(self.get_full_path(self.get_xml_path(view.id())), "wb")
f.write(data.encode("utf-8"))
f.close()
view.settings().set("color_scheme", self.get_xml_path(view.id()).replace(sublime.packages_path(), "Packages"))
view.add_regions("colorhightlight", [region], "juloo.color", "circle", sublime.HIDDEN)
view.add_regions("texthightlight", [region], "juloo.colortext", "", sublime.DRAW_NO_OUTLINE)
|
|
"""xmltramp: Make XML documents easily accessible."""
__version__ = "2.16"
__author__ = "Aaron Swartz"
__credits__ = "Many thanks to pjz, bitsko, and DanC."
__copyright__ = "(C) 2003 Aaron Swartz. GNU GPL 2."
if not hasattr(__builtins__, 'True'): True, False = 1, 0
def isstr(f): return isinstance(f, type('')) or isinstance(f, type(u''))
def islst(f): return isinstance(f, type(())) or isinstance(f, type([]))
empty = {'http://www.w3.org/1999/xhtml': ['img', 'br', 'hr', 'meta', 'link', 'base', 'param', 'input', 'col', 'area']}
def quote(x, elt=True):
if elt and '<' in x and len(x) > 24 and x.find(']]>') == -1: return "<![CDATA["+x+"]]>"
else: x = x.replace('&', '&').replace('<', '<').replace(']]>', ']]>')
if not elt: x = x.replace('"', '"')
return x
class Element:
def __init__(self, name, attrs=None, children=None, prefixes=None):
if islst(name) and name[0] == None: name = name[1]
if attrs:
na = {}
for k in attrs.keys():
if islst(k) and k[0] == None: na[k[1]] = attrs[k]
else: na[k] = attrs[k]
attrs = na
self._name = name
self._attrs = attrs or {}
self._dir = children or []
prefixes = prefixes or {}
self._prefixes = dict(zip(prefixes.values(), prefixes.keys()))
if prefixes: self._dNS = prefixes.get(None, None)
else: self._dNS = None
def __repr__(self, recursive=0, multiline=0, inprefixes=None):
def qname(name, inprefixes):
if islst(name):
if inprefixes[name[0]] is not None:
return inprefixes[name[0]]+':'+name[1]
else:
return name[1]
else:
return name
def arep(a, inprefixes, addns=1):
out = ''
for p in self._prefixes.keys():
if not p in inprefixes.keys():
if addns: out += ' xmlns'
if addns and self._prefixes[p]: out += ':'+self._prefixes[p]
if addns: out += '="'+quote(p, False)+'"'
inprefixes[p] = self._prefixes[p]
for k in a.keys():
out += ' ' + qname(k, inprefixes)+ '="' + quote(a[k], False) + '"'
return out
inprefixes = inprefixes or {u'http://www.w3.org/XML/1998/namespace':'xml'}
# need to call first to set inprefixes:
attributes = arep(self._attrs, inprefixes, recursive)
out = '<' + qname(self._name, inprefixes) + attributes
if not self._dir and (self._name[0] in empty.keys()
and self._name[1] in empty[self._name[0]]):
out += ' />'
return out
out += '>'
if recursive:
content = 0
for x in self._dir:
if isinstance(x, Element): content = 1
pad = '\n' + ('\t' * recursive)
for x in self._dir:
if multiline and content: out += pad
if isstr(x): out += quote(x)
elif isinstance(x, Element):
out += x.__repr__(recursive+1, multiline, inprefixes.copy())
else:
raise TypeError, "I wasn't expecting "+`x`+"."
if multiline and content: out += '\n' + ('\t' * (recursive-1))
else:
if self._dir: out += '...'
out += '</'+qname(self._name, inprefixes)+'>'
return out
def __unicode__(self):
text = ''
for x in self._dir:
text += unicode(x)
return ' '.join(text.split())
def __str__(self):
return self.__unicode__().encode('utf-8')
def __getattr__(self, n):
if n[0] == '_': raise AttributeError, "Use foo['"+n+"'] to access the child element."
if self._dNS: n = (self._dNS, n)
for x in self._dir:
if isinstance(x, Element) and x._name == n: return x
raise AttributeError, 'No child element named \''+n+"'"
def __hasattr__(self, n):
for x in self._dir:
if isinstance(x, Element) and x._name == n: return True
return False
def __setattr__(self, n, v):
if n[0] == '_': self.__dict__[n] = v
else: self[n] = v
def __getitem__(self, n):
if isinstance(n, type(0)): # d[1] == d._dir[1]
return self._dir[n]
elif isinstance(n, slice(0).__class__):
# numerical slices
if isinstance(n.start, type(0)): return self._dir[n.start:n.stop]
# d['foo':] == all <foo>s
n = n.start
if self._dNS and not islst(n): n = (self._dNS, n)
out = []
for x in self._dir:
if isinstance(x, Element) and x._name == n: out.append(x)
return out
else: # d['foo'] == first <foo>
if self._dNS and not islst(n): n = (self._dNS, n)
for x in self._dir:
if isinstance(x, Element) and x._name == n: return x
raise KeyError
def __setitem__(self, n, v):
if isinstance(n, type(0)): # d[1]
self._dir[n] = v
elif isinstance(n, slice(0).__class__):
# d['foo':] adds a new foo
n = n.start
if self._dNS and not islst(n): n = (self._dNS, n)
nv = Element(n)
self._dir.append(nv)
else: # d["foo"] replaces first <foo> and dels rest
if self._dNS and not islst(n): n = (self._dNS, n)
nv = Element(n); nv._dir.append(v)
replaced = False
todel = []
for i in range(len(self)):
if self[i]._name == n:
if replaced:
todel.append(i)
else:
self[i] = nv
replaced = True
if not replaced: self._dir.append(nv)
for i in todel: del self[i]
def __delitem__(self, n):
if isinstance(n, type(0)): del self._dir[n]
elif isinstance(n, slice(0).__class__):
# delete all <foo>s
n = n.start
if self._dNS and not islst(n): n = (self._dNS, n)
for i in range(len(self)):
if self[i]._name == n: del self[i]
else:
# delete first foo
for i in range(len(self)):
if self[i]._name == n: del self[i]
break
def __call__(self, *_pos, **_set):
if _set:
for k in _set.keys(): self._attrs[k] = _set[k]
if len(_pos) > 1:
for i in range(0, len(_pos), 2):
self._attrs[_pos[i]] = _pos[i+1]
if len(_pos) == 1 is not None:
return self._attrs[_pos[0]]
if len(_pos) == 0:
return self._attrs
def __len__(self): return len(self._dir)
class Namespace:
def __init__(self, uri): self.__uri = uri
def __getattr__(self, n): return (self.__uri, n)
def __getitem__(self, n): return (self.__uri, n)
from xml.sax.handler import EntityResolver, DTDHandler, ContentHandler, ErrorHandler
class Seeder(EntityResolver, DTDHandler, ContentHandler, ErrorHandler):
def __init__(self):
self.stack = []
self.ch = ''
self.prefixes = {}
ContentHandler.__init__(self)
def startPrefixMapping(self, prefix, uri):
if not self.prefixes.has_key(prefix): self.prefixes[prefix] = []
self.prefixes[prefix].append(uri)
def endPrefixMapping(self, prefix):
self.prefixes[prefix].pop()
def startElementNS(self, name, qname, attrs):
ch = self.ch; self.ch = ''
if ch and not ch.isspace(): self.stack[-1]._dir.append(ch)
attrs = dict(attrs)
newprefixes = {}
for k in self.prefixes.keys(): newprefixes[k] = self.prefixes[k][-1]
self.stack.append(Element(name, attrs, prefixes=newprefixes.copy()))
def characters(self, ch):
self.ch += ch
def endElementNS(self, name, qname):
ch = self.ch; self.ch = ''
if ch and not ch.isspace(): self.stack[-1]._dir.append(ch)
element = self.stack.pop()
if self.stack:
self.stack[-1]._dir.append(element)
else:
self.result = element
from xml.sax import make_parser
from xml.sax.handler import feature_namespaces
def seed(fileobj):
seeder = Seeder()
parser = make_parser()
parser.setFeature(feature_namespaces, 1)
parser.setContentHandler(seeder)
parser.parse(fileobj)
return seeder.result
def parse(text):
from StringIO import StringIO
return seed(StringIO(text))
def load(url):
import urllib
return seed(urllib.urlopen(url))
def unittest():
parse('<doc>a<baz>f<b>o</b>ob<b>a</b>r</baz>a</doc>').__repr__(1,1) == \
'<doc>\n\ta<baz>\n\t\tf<b>o</b>ob<b>a</b>r\n\t</baz>a\n</doc>'
assert str(parse("<doc />")) == ""
assert str(parse("<doc>I <b>love</b> you.</doc>")) == "I love you."
assert parse("<doc>\nmom\nwow\n</doc>")[0].strip() == "mom\nwow"
assert str(parse('<bing> <bang> <bong>center</bong> </bang> </bing>')) == "center"
assert str(parse('<doc>\xcf\x80</doc>')) == '\xcf\x80'
d = Element('foo', attrs={'foo':'bar'}, children=['hit with a', Element('bar'), Element('bar')])
try:
d._doesnotexist
raise "ExpectedError", "but found success. Damn."
except AttributeError: pass
assert d.bar._name == 'bar'
try:
d.doesnotexist
raise "ExpectedError", "but found success. Damn."
except AttributeError: pass
assert hasattr(d, 'bar') == True
assert d('foo') == 'bar'
d(silly='yes')
assert d('silly') == 'yes'
assert d() == d._attrs
assert d[0] == 'hit with a'
d[0] = 'ice cream'
assert d[0] == 'ice cream'
del d[0]
assert d[0]._name == "bar"
assert len(d[:]) == len(d._dir)
assert len(d[1:]) == len(d._dir) - 1
assert len(d['bar':]) == 2
d['bar':] = 'baz'
assert len(d['bar':]) == 3
assert d['bar']._name == 'bar'
d = Element('foo')
doc = Namespace("http://example.org/bar")
bbc = Namespace("http://example.org/bbc")
dc = Namespace("http://purl.org/dc/elements/1.1/")
d = parse("""<doc version="2.7182818284590451"
xmlns="http://example.org/bar"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:bbc="http://example.org/bbc">
<author>John Polk and John Palfrey</author>
<dc:creator>John Polk</dc:creator>
<dc:creator>John Palfrey</dc:creator>
<bbc:show bbc:station="4">Buffy</bbc:show>
</doc>""")
assert repr(d) == '<doc version="2.7182818284590451">...</doc>'
assert d.__repr__(1) == '<doc xmlns:bbc="http://example.org/bbc" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns="http://example.org/bar" version="2.7182818284590451"><author>John Polk and John Palfrey</author><dc:creator>John Polk</dc:creator><dc:creator>John Palfrey</dc:creator><bbc:show bbc:station="4">Buffy</bbc:show></doc>'
assert d.__repr__(1,1) == '<doc xmlns:bbc="http://example.org/bbc" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns="http://example.org/bar" version="2.7182818284590451">\n\t<author>John Polk and John Palfrey</author>\n\t<dc:creator>John Polk</dc:creator>\n\t<dc:creator>John Palfrey</dc:creator>\n\t<bbc:show bbc:station="4">Buffy</bbc:show>\n</doc>'
assert repr(parse("<doc xml:lang='en' />")) == '<doc xml:lang="en"></doc>'
assert str(d.author) == str(d['author']) == "John Polk and John Palfrey"
assert d.author._name == doc.author
assert str(d[dc.creator]) == "John Polk"
assert d[dc.creator]._name == dc.creator
assert str(d[dc.creator:][1]) == "John Palfrey"
d[dc.creator] = "Me!!!"
assert str(d[dc.creator]) == "Me!!!"
assert len(d[dc.creator:]) == 1
d[dc.creator:] = "You!!!"
assert len(d[dc.creator:]) == 2
assert d[bbc.show](bbc.station) == "4"
d[bbc.show](bbc.station, "5")
assert d[bbc.show](bbc.station) == "5"
e = Element('e')
e.c = '<img src="foo">'
assert e.__repr__(1) == '<e><c><img src="foo"></c></e>'
e.c = '2 > 4'
assert e.__repr__(1) == '<e><c>2 > 4</c></e>'
e.c = 'CDATA sections are <em>closed</em> with ]]>.'
assert e.__repr__(1) == '<e><c>CDATA sections are <em>closed</em> with ]]>.</c></e>'
e.c = parse('<div xmlns="http://www.w3.org/1999/xhtml">i<br /><span></span>love<br />you</div>')
assert e.__repr__(1) == '<e><c><div xmlns="http://www.w3.org/1999/xhtml">i<br /><span></span>love<br />you</div></c></e>'
e = Element('e')
e('c', 'that "sucks"')
assert e.__repr__(1) == '<e c="that "sucks""></e>'
assert quote("]]>") == "]]>"
assert quote('< dkdkdsd dkd sksdksdfsd fsdfdsf]]> kfdfkg >') == '< dkdkdsd dkd sksdksdfsd fsdfdsf]]> kfdfkg >'
assert parse('<x a="<"></x>').__repr__(1) == '<x a="<"></x>'
assert parse('<a xmlns="http://a"><b xmlns="http://b"/></a>').__repr__(1) == '<a xmlns="http://a"><b xmlns="http://b"></b></a>'
if __name__ == '__main__': unittest()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import fixtures
from oslo_log import log as logging
from oslo_utils import uuidutils
from nova import exception
from nova.objects import fields as obj_fields
from nova.tests.fixtures import nova as nova_fixtures
LOG = logging.getLogger(__name__)
class GlanceFixture(fixtures.Fixture):
"""A fixture for simulating Glance."""
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image1 = {
'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'size': '25165824',
'min_ram': 0,
'min_disk': 0,
'protected': False,
'visibility': 'public',
'tags': ['tag1', 'tag2'],
'properties': {
'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': obj_fields.Architecture.X86_64,
},
}
image2 = {
'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '58145823',
'min_ram': 0,
'min_disk': 0,
'protected': False,
'visibility': 'public',
'tags': [],
'properties': {
'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
},
}
image3 = {
'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'bare',
'disk_format': 'raw',
'size': '83594576',
'min_ram': 0,
'min_disk': 0,
'protected': False,
'visibility': 'public',
'tags': ['tag3', 'tag4'],
'properties': {
'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': obj_fields.Architecture.X86_64,
},
}
image4 = {
'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '84035174',
'min_ram': 0,
'min_disk': 0,
'protected': False,
'visibility': 'public',
'tags': [],
'properties': {
'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
},
}
image5 = {
'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '26360814',
'min_ram': 0,
'min_disk': 0,
'protected': False,
'visibility': 'public',
'tags': [],
'properties': {
'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None,
},
}
auto_disk_config_disabled_image = {
'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'size': '49163826',
'min_ram': 0,
'min_disk': 0,
'protected': False,
'visibility': 'public',
'tags': [],
'properties': {
'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': obj_fields.Architecture.X86_64,
'auto_disk_config': 'False',
},
}
auto_disk_config_enabled_image = {
'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'size': '74185822',
'min_ram': 0,
'min_disk': 0,
'protected': False,
'visibility': 'public',
'tags': [],
'properties': {
'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': obj_fields.Architecture.X86_64,
'auto_disk_config': 'True',
},
}
def __init__(self, test):
super().__init__()
self.test = test
self.images = {}
def setUp(self):
super().setUp()
self.test.useFixture(nova_fixtures.ConfPatcher(
group='glance', api_servers=['http://localhost:9292']))
self.test.stub_out(
'nova.image.glance.API.get_remote_image_service',
lambda context, image_href: (self, image_href))
self.test.stub_out(
'nova.image.glance.get_default_image_service',
lambda: self)
self.create(None, self.image1)
self.create(None, self.image2)
self.create(None, self.image3)
self.create(None, self.image4)
self.create(None, self.image5)
self.create(None, self.auto_disk_config_disabled_image)
self.create(None, self.auto_disk_config_enabled_image)
self._imagedata = {}
# TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(list(self.images.values()))
def download(
self, context, image_id, data=None, dst_path=None, trusted_certs=None,
):
self.show(context, image_id)
if data:
data.write(self._imagedata.get(image_id, b''))
elif dst_path:
with open(dst_path, 'wb') as data:
data.write(self._imagedata.get(image_id, b''))
def show(
self, context, image_id, include_locations=False, show_deleted=True,
):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
LOG.warning(
'Unable to find image id %s. Have images: %s',
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
image_id = str(metadata.get('id', uuidutils.generate_uuid()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.CouldNotUploadImage(image_id=image_id)
image_meta = copy.deepcopy(metadata)
# Glance sets the size value when an image is created, so we
# need to do that here to fake things out if it's not provided
# by the caller. This is needed to avoid a KeyError in the
# image-size API.
if 'size' not in image_meta:
image_meta['size'] = None
# Similarly, Glance provides the status on the image once it's created
# and this is checked in the compute API when booting a server from
# this image, so we just fake it out to be 'active' even though this
# is mostly a lie on a newly created image.
if 'status' not in metadata:
image_meta['status'] = 'active'
# The owner of the image is by default the request context project_id.
if context and 'owner' not in image_meta.get('properties', {}):
# Note that normally "owner" is a top-level field in an image
# resource in glance but we have to fake this out for the images
# proxy API by throwing it into the generic "properties" dict.
image_meta.get('properties', {})['owner'] = context.project_id
self.images[image_id] = image_meta
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None,
purge_props=False):
"""Replace the contents of the given image with the new data.
:raises: ImageNotFound if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
if purge_props:
self.images[image_id] = copy.deepcopy(metadata)
else:
image = self.images[image_id]
try:
image['properties'].update(metadata.pop('properties'))
except KeyError:
pass
image.update(metadata)
return self.images[image_id]
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
"""
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def get_location(self, context, image_id):
if image_id in self.images:
return 'fake_location'
return None
|
|
# Copyright (c) 2010, 2012, 2014 roger
# Copyright (c) 2011 Kirk Strauser
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2011 Roger Duran
# Copyright (c) 2012-2015 Tycho Andersen
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014-2015 Sean Vig
# Copyright (c) 2014 Adi Sieker
# Copyright (c) 2014 dmpayton
# Copyright (c) 2014 Jody Frankowski
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
import subprocess
import cairocffi
from . import base
from .. import bar
from libqtile.log_utils import logger
from six import u
__all__ = [
'Volume',
]
re_vol = re.compile('\[(\d?\d?\d?)%\]')
class Volume(base._TextBox):
"""Widget that display and change volume
If theme_path is set it draw widget as icons.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("cardid", None, "Card Id"),
("device", "default", "Device Name"),
("channel", "Master", "Channel"),
("padding", 3, "Padding left and right. Calculated if None."),
("theme_path", None, "Path of the icons"),
("update_interval", 0.2, "Update time in seconds."),
("emoji", False, "Use emoji to display volume states, only if ``theme_path`` is not set."
"The specified font needs to contain the correct unicode characters."),
("mute_command", None, "Mute command"),
("volume_up_command", None, "Volume up command"),
("volume_down_command", None, "Volume down command"),
("get_volume_command", None, "Command to get the current volume"),
]
def __init__(self, **config):
base._TextBox.__init__(self, '0', width=bar.CALCULATED, **config)
self.add_defaults(Volume.defaults)
if self.theme_path:
self.length_type = bar.STATIC
self.length = 0
self.surfaces = {}
self.volume = None
def timer_setup(self):
self.timeout_add(self.update_interval, self.update)
if self.theme_path:
self.setup_images()
def create_amixer_command(self, *args):
cmd = ['amixer']
if (self.cardid is not None):
cmd.extend(['-c', str(self.cardid)])
if (self.device is not None):
cmd.extend(['-D', str(self.device)])
cmd.extend([x for x in args])
return cmd
def button_press(self, x, y, button):
if button == 5:
if self.volume_down_command is not None:
subprocess.call(self.volume_down_command)
else:
subprocess.call(self.create_amixer_command('-q',
'sset',
self.channel,
'2%-'))
elif button == 4:
if self.volume_up_command is not None:
subprocess.call(self.volume_up_command)
else:
subprocess.call(self.create_amixer_command('-q',
'sset',
self.channel,
'2%+'))
elif button == 1:
if self.mute_command is not None:
subprocess.call(self.mute_command)
else:
subprocess.call(self.create_amixer_command('-q',
'sset',
self.channel,
'toggle'))
self.draw()
def update(self):
vol = self.get_volume()
if vol != self.volume:
self.volume = vol
# Update the underlying canvas size before actually attempting
# to figure out how big it is and draw it.
self._update_drawer()
self.bar.draw()
self.timeout_add(self.update_interval, self.update)
def _update_drawer(self):
if self.theme_path:
self.drawer.clear(self.background or self.bar.background)
if self.volume <= 0:
img_name = 'audio-volume-muted'
elif self.volume <= 30:
img_name = 'audio-volume-low'
elif self.volume < 80:
img_name = 'audio-volume-medium'
else: # self.volume >= 80:
img_name = 'audio-volume-high'
self.drawer.ctx.set_source(self.surfaces[img_name])
self.drawer.ctx.paint()
elif self.emoji:
if self.volume <= 0:
self.text = u('\U0001f507')
elif self.volume <= 30:
self.text = u('\U0001f508')
elif self.volume < 80:
self.text = u('\U0001f509')
elif self.volume >= 80:
self.text = u('\U0001f50a')
else:
if self.volume == -1:
self.text = 'M'
else:
self.text = '%s%%' % self.volume
def setup_images(self):
for img_name in (
'audio-volume-high',
'audio-volume-low',
'audio-volume-medium',
'audio-volume-muted'
):
try:
img = cairocffi.ImageSurface.create_from_png(
os.path.join(self.theme_path, '%s.png' % img_name)
)
except cairocffi.Error:
self.theme_path = None
self.length_type = bar.CALCULATED
logger.exception('Volume switching to text mode')
return
input_width = img.get_width()
input_height = img.get_height()
sp = input_height / float(self.bar.height - 1)
width = input_width / sp
if width > self.length:
self.length = int(width) + self.actual_padding * 2
imgpat = cairocffi.SurfacePattern(img)
scaler = cairocffi.Matrix()
scaler.scale(sp, sp)
scaler.translate(self.actual_padding * -1, 0)
imgpat.set_matrix(scaler)
imgpat.set_filter(cairocffi.FILTER_BEST)
self.surfaces[img_name] = imgpat
def get_volume(self):
try:
get_volume_cmd = self.create_amixer_command('sget',
self.channel)
if self.get_volume_command:
get_volume_cmd = self.get_volume_command
mixer_out = self.call_process(get_volume_cmd)
except subprocess.CalledProcessError:
return -1
if '[off]' in mixer_out:
return -1
volgroups = re_vol.search(mixer_out)
if volgroups:
return int(volgroups.groups()[0])
else:
# this shouldn't happen
return -1
def draw(self):
if self.theme_path:
self.drawer.draw(offsetx=self.offset, width=self.length)
else:
base._TextBox.draw(self)
|
|
import grpc
import uuid
import logging
import os
import time
from google import auth
from google.protobuf import (duration_pb2)
from resultstoreapi.cloud.devtools.resultstore_v2.proto import (
resultstore_upload_pb2_grpc, resultstore_upload_pb2,
resultstore_download_pb2_grpc, resultstore_download_pb2, invocation_pb2,
invocation_pb2_grpc, action_pb2, common_pb2, configured_target_pb2,
target_pb2)
from resultstoreui_utils import (get_default_invocation,
get_default_configuration, gen_new_uuid,
get_parent, get_default_target,
get_default_configured_target,
get_default_action, get_name)
from bigstore_client import BigStoreClient
from collections import defaultdict
_LOGGER = logging.getLogger(__name__)
_LOGGER.setLevel(logging.INFO)
_FIELD_MASK_HEADER = 'x-goog-fieldmask'
RESOURCE_TYPES = defaultdict()
class Error(Exception):
"""Generic Exception class for this module."""
class ResultStoreClient(object):
"""Client for ResultStore v2"""
def __init__(self, credentials, flags):
"""
Initialize the ResultStore Client
Args:
credentials (Credentials): Credentials used to make gRPCS calls
flags (absl.flags): Various flags defined in resultstoreui.py
"""
self.credentials = credentials
self.flags = flags
self.authorization_token = self.get_authorization_token()
def create_upload_request(self,
resources,
target_id=None,
config_id=None,
action_id=None):
"""
Create upload requests based on resource type
Args:
resource (One of Target/ConfiguredTarget/Action/Configuration): Resource to be uploaded
resource_type (str): Type of resource to create an upload request for
target_id (str): Id of target to be uploaded
config_id (str): Id of config to be uploaded
action_id (str): Id of action to be uploaded
Returns:
Upload Request
"""
request = resultstore_upload_pb2.UploadRequest(
id={
'target_id': target_id,
'configuration_id': config_id,
'action_id': action_id
},
target=resources.get('target'),
action=resources.get('action'),
configuration=resources.get('configuration'),
configured_target=resources.get('configured_target'),
upload_operation=resultstore_upload_pb2.UploadRequest.CREATE)
return request
def get_authorization_token(self):
"""
Generates an authoirzation token if the user does not specify one via
the --authorization_token flag
Returns:
An authorization token
"""
if not self.flags.authorization_token:
authorization_token = gen_new_uuid()
_LOGGER.error('Your auth token is: %s', authorization_token)
return authorization_token
return self.flags.authorization_token
def get_invocation(self, invocation_name, metadata=None):
"""
Get a resultstore invocation by name
Args:
invocation_name (str): The name of the invocation to get
metadata (Sequence[Tuple[str, str]]): Metadata param for the grpc call
Returns:
The response or error from the ResultStore v2 gRPC Stub Call
"""
stub = resultstore_download_pb2_grpc.ResultStoreDownloadStub(
self.credentials.get_active_channel())
if not metadata:
metadata = [(_FIELD_MASK_HEADER, 'name')]
request = resultstore_download_pb2.GetInvocationRequest(
name=invocation_name)
try:
response = stub.GetInvocation(request, metadata=metadata)
except grpc.RpcError as rpc_error:
_LOGGER.error('Received error: %s', rpc_error)
return rpc_error
else:
_LOGGER.info('Received message: %s', response)
return response
def create_invocation(self):
"""
Create a resultstore invocation
Args:
resume_token (bytes): Initial resume token to put the invocation into batch mode
Returns:
The response or error from the ResultStore v2 gRPC Stub Call
"""
request_id = gen_new_uuid()
invocation = get_default_invocation()
stub = resultstore_upload_pb2_grpc.ResultStoreUploadStub(
self.credentials.get_active_channel())
request = resultstore_upload_pb2.CreateInvocationRequest(
request_id=request_id,
authorization_token=self.authorization_token,
invocation=invocation,
)
request.invocation.CopyFrom(invocation)
request.invocation_id = invocation.id.invocation_id
try:
response = stub.CreateInvocation(request)
except grpc.RpcError as rpc_error:
_LOGGER.error('Received error: %s', rpc_error)
return rpc_error
else:
_LOGGER.info('Received message: %s', response)
return response
def create_configuration(self, config):
"""
Create a resultstore invocation configuration
Args:
config (google.devtools.resultstore.v2.Configuration): The configuration to create
Returns:
The response or error from the ResultStore v2 gRPC Stub Call
"""
stub = resultstore_upload_pb2_grpc.ResultStoreUploadStub(
self.credentials.get_active_channel())
request = resultstore_upload_pb2.CreateConfigurationRequest(
request_id=gen_new_uuid(),
authorization_token=self.authorization_token,
parent=get_parent(config),
config_id=config.id.configuration_id)
request.configuration.CopyFrom(config)
try:
response = stub.CreateConfiguration(request)
except grpc.RpcError as rpc_error:
_LOGGER.error('Received error: %s', rpc_error)
return rpc_error
else:
_LOGGER.info('Received message: %s', response)
return response
def create_target(self, target):
"""
Create a resultstore invocation target
Args:
target (google.devtools.resultstore.v2.Target): The target to create
Returns:
The response or error from the ResultStore v2 gRPC Stub Call
"""
stub = resultstore_upload_pb2_grpc.ResultStoreUploadStub(
self.credentials.get_active_channel())
request = resultstore_upload_pb2.CreateTargetRequest(
request_id=gen_new_uuid(),
authorization_token=self.authorization_token,
parent=get_parent(target),
target_id=target.id.target_id)
request.target.CopyFrom(target)
try:
response = stub.CreateTarget(request)
except grpc.RpcError as rpc_error:
_LOGGER.error('Received error: %s', rpc_error)
return rpc_error
else:
_LOGGER.info('Received message: %s', response)
return response
def create_configured_target(self, config_target, parent=None):
"""
Create a resultstore invocation target
Args:
config_target (google.devtools.resultstore.v2.Target): The ConfiguredTarget to create
parent (str): The name of the parent resource of the ConfiguredTarget to create
Returns:
The response or error from the ResultStore v2 gRPC Stub Call
"""
stub = resultstore_upload_pb2_grpc.ResultStoreUploadStub(
self.credentials.get_active_channel())
request = resultstore_upload_pb2.CreateConfiguredTargetRequest(
request_id=gen_new_uuid(),
authorization_token=self.authorization_token,
config_id=config_target.id.configuration_id)
request.configured_target.CopyFrom(config_target)
if parent is not None:
request.parent = parent
request.configured_target.ClearField('id')
else:
request.parent = get_parent(config_target)
try:
response = stub.CreateConfiguredTarget(request)
except grpc.RpcError as rpc_error:
_LOGGER.error('Received error: %s', rpc_error)
return rpc_error
else:
_LOGGER.info('Received message: %s', response)
return response
def create_action(self, action, parent=None):
"""
Create a resultstore invocation configured target action
Args:
action (google.devtools.resultstore.v2.Action): The Action to create
parent (str): The name of the parent resource of the Action to create
Returns:
The response or error from the ResultStore v2 gRPC Stub Call
"""
stub = resultstore_upload_pb2_grpc.ResultStoreUploadStub(
self.credentials.get_active_channel())
request = resultstore_upload_pb2.CreateActionRequest(
request_id=gen_new_uuid(),
authorization_token=self.authorization_token,
action_id=action.id.action_id,
)
request.action.CopyFrom(action)
if parent is not None:
request.parent = parent
request.action.ClearField('id')
else:
request.parent = get_parent(action)
try:
response = stub.CreateAction(request)
except grpc.RpcError as rpc_error:
_LOGGER.error('Received error: %s', rpc_error)
return rpc_error
else:
_LOGGER.info('Received message: %s', response)
return response
def update_action(self, action, update_fields):
"""
Update a resultstore invocation configured target action
Args:
action (google.devtools.resultstore.v2.Action): The Action to update
update_fields (Sequence[str]): The list of paths specifying which fields to update
Returns:
The response or error from the ResultStore v2 gRPC Stub Call
"""
if not update_fields:
raise Error('At least one update field must be provided.')
stub = resultstore_upload_pb2_grpc.ResultStoreUploadStub(
self.credentials.get_active_channel())
request = resultstore_upload_pb2.UpdateActionRequest(
authorization_token=self.authorization_token)
request.action.CopyFrom(action)
request.update_mask.paths.extend(update_fields)
try:
response = stub.UpdateAction(request)
except grpc.RpcError as rpc_error:
_LOGGER.error('Received error: %s', rpc_error)
return rpc_error
else:
_LOGGER.info('Received message: %s', response)
return response
def update_configured_target(self, config_target, update_fields):
"""
Update a resultstore invocation configured target
Args:
config_target (google.devtools.resultstore.v2.ConfiguredTarget): The ConfiguredTarget
to update
update_fields (Sequence[str]): The list of paths specifying which fields to update
Returns:
The response or error from the ResultStore v2 gRPC Stub Call
"""
if not update_fields:
raise Error('At least one update field must be provided.')
stub = resultstore_upload_pb2_grpc.ResultStoreUploadStub(
self.credentials.get_active_channel())
request = resultstore_upload_pb2.UpdateConfiguredTargetRequest(
authorization_token=self.authorization_token)
request.configured_target.CopyFrom(config_target)
request.update_mask.paths.extend(update_fields)
try:
response = stub.UpdateConfiguredTarget(request)
except grpc.RpcError as rpc_error:
_LOGGER.error('Received error: %s', rpc_error)
return rpc_error
else:
_LOGGER.info('Received message: %s', response)
return response
def update_target(self, target, update_fields):
"""
Update a resultstore invocation target
Args:
target (google.devtools.resultstore.v2.Target): The Target
to update
update_fields (Sequence[str]): The list of paths specifying which fields to update
Returns:
The response or error from the ResultStore v2 gRPC Stub Call
Raises:
Error: If no update fields are provided
"""
if not update_fields:
raise Error('At least one update field must be provided.')
stub = resultstore_upload_pb2_grpc.ResultStoreUploadStub(
self.credentials.get_active_channel())
request = resultstore_upload_pb2.UpdateTargetRequest(
authorization_token=self.authorization_token)
request.target.CopyFrom(target)
request.update_mask.paths.extend(update_fields)
try:
response = stub.UpdateTarget(request)
except grpc.RpcError as rpc_error:
_LOGGER.error('Received error: %s', rpc_error)
return rpc_error
else:
_LOGGER.info('Received message: %s', response)
return response
def finalize_configured_target(self, invocation_id, target_id, config_id):
"""
Finalize a resultstore invocation configured target
Args:
invocation_id (str): The invocation id for the configured target to finalize
target_id (str): The target id for the configured target to finalize
config_id (str): The confugration id for the configured target to finalize
Returns:
The response or error from the ResultStore v2 gRPC Stub Call
"""
stub = resultstore_upload_pb2_grpc.ResultStoreUploadStub(
self.credentials.get_active_channel())
request = resultstore_upload_pb2.FinalizeConfiguredTargetRequest(
authorization_token=self.authorization_token,
name=get_name(invocation_id, target_id, config_id))
try:
response = stub.FinalizeConfiguredTarget(request)
except grpc.RpcError as rpc_error:
_LOGGER.error('Received error: %s', rpc_error)
return rpc_error
else:
_LOGGER.info('Received message: %s', response)
return response
def finalize_target(self, invocation_id, target_id):
"""
Finalize a resultstore invocation target
Args:
invocation_id (str): The invocation id for the target to finalize
target_id (str): The id of the target to finalize
Returns:
The response or error from the ResultStore v2 gRPC Stub Call
"""
stub = resultstore_upload_pb2_grpc.ResultStoreUploadStub(
self.credentials.get_active_channel())
request = resultstore_upload_pb2.FinalizeTargetRequest(
authorization_token=self.authorization_token,
name=get_name(invocation_id, target_id))
try:
response = stub.FinalizeTarget(request)
except grpc.RpcError as rpc_error:
_LOGGER.error('Received error: %s', rpc_error)
return rpc_error
else:
_LOGGER.info('Received message: %s', response)
return response
def create_resource_requests(self, invocation_id, config_id):
"""
Create upload requests to be batch uploaded
Args:
invocation_id (str): Invocation to upload to
config_id (str): Invocation configuration id to use or create
Returns:
A list of upload requests
"""
requests = []
if self.flags.create_config:
config = get_default_configuration(invocation_id)
config_request = self.create_upload_request(
defaultdict(None, {'configuration': config}),
config_id=config.id.configuration_id)
requests.append(config_request)
target = get_default_target(invocation_id)
target_id = target.id.target_id
target_request = self.create_upload_request(
defaultdict(None, {'target': target}), target_id)
files = None
if self.flags.files:
files = self._upload_files(target_id)
requests.append(target_request)
config_target = get_default_configured_target(invocation_id, target_id,
config_id)
config_target_request = self.create_upload_request(
defaultdict(None, {'configured_target': config_target}), target_id,
config_id)
requests.append(config_target_request)
action = get_default_action(invocation_id,
target_id,
config_id,
files=files)
action_id = action.id.action_id
action_request = self.create_upload_request(
defaultdict(None, {'action': action}), target_id, config_id,
action_id)
requests.append(action_request)
return requests
def finalize_batch_upload(self, resume_token, next_resume_token,
invocation_id):
"""
Finalize an invocation that was in batch mode
Args:
resume_token (bytes): Current resume token
next_resume_token (bytes): Next resume token
invocation_id (str): Invocation ID to be finalized
"""
invocation = get_default_invocation(invocation_id)
finalize_invocation_request = resultstore_upload_pb2.UploadRequest(
invocation=invocation,
upload_operation=(resultstore_upload_pb2.UploadRequest.FINALIZE))
self.batch_upload(resume_token, next_resume_token, invocation_id,
[finalize_invocation_request])
def batch_upload(self, resume_token, next_resume_token, invocation_id,
upload_requests):
"""
Batch upload the provided upload_requests to the invocation
Args:
resume_token (bytes): Current resume token
next_resume_token (bytes): Next resume token
invocation_id (str): Invocation ID to upload to
upload_requests (Sequence[UploadRequest]): List of UploadRequests to be uploaded
Returns:
The response or error from the ResultStore v2 gRPC Stub Call
"""
invocation_name = 'invocations/' + invocation_id
stub = resultstore_upload_pb2_grpc.ResultStoreUploadStub(
self.credentials.get_active_channel())
request = resultstore_upload_pb2.UploadBatchRequest(
parent=invocation_name,
resume_token=resume_token,
next_resume_token=next_resume_token,
authorization_token=self.authorization_token)
request.upload_requests.extend(upload_requests)
try:
response = stub.UploadBatch(request)
except grpc.RpcError as rpc_error:
_LOGGER.error('Received error: %s', rpc_error)
return rpc_error
else:
_LOGGER.info('Received message: %s', response)
return response
def batch_upload_wrapper(self, resume_token, next_resume_token):
"""
Batch upload to a given invocation
Args:
resume_token (bytes): Current resume token
next_resume_token (bytes): Next resume token
"""
batched_requests = self.create_resource_requests(
self.flags.invocation_id, self.flags.config_id)
self.batch_upload(resume_token, next_resume_token,
self.flags.invocation_id, batched_requests)
def single_upload(self):
"""
Uploads a single invocation to resultstore
"""
config_id = self.flags.config_id
config = get_default_configuration(self.flags.invocation_id, config_id)
self.create_configuration(config)
config_id = config.id.configuration_id
target = get_default_target(self.flags.invocation_id,
self.flags.target_name)
self.create_target(target)
target_id = target.id.target_id
config_target = get_default_configured_target(self.flags.invocation_id,
target_id, config_id)
self.create_configured_target(config_target)
action = get_default_action(self.flags.invocation_id, target_id,
config_id)
if self.flags.action_type == 'Test':
test_action = action_pb2.TestAction()
action.test_action.CopyFrom(test_action)
else:
build_action = action_pb2.BuildAction()
action.build_action.CopyFrom(build_action)
self.create_action(action)
self._file_upload_helper(target_id, action.id.action_id, config_id)
self.finalize_configured_target(self.flags.invocation_id, target_id,
config_id)
self.finalize_target(self.flags.invocation_id, target_id)
def _file_upload_helper(self, target_id, action_id, config_id):
"""
Uploads files specified by the --files flag to the given target
Args:
target_id (str): Id of the target to update
action_id (str): Id of the action to update
config_id (str): Id of the config to be updated
"""
result_status = common_pb2.Status.Value(self.flags.status)
start_time = time.time()
additional_files = self._upload_files(target_id)
end_time = time.time()
duration_seconds = int(end_time - start_time)
duration = duration_pb2.Duration(seconds=duration_seconds)
new_action = action_pb2.Action(
name=get_name(self.flags.invocation_id, target_id, config_id,
action_id),
timing=common_pb2.Timing(duration=duration),
status_attributes=common_pb2.StatusAttributes(
status=result_status),
files=additional_files)
self.update_action(new_action,
['timing.duration', 'status_attributes', 'files'])
new_config_target = configured_target_pb2.ConfiguredTarget(
name=get_name(self.flags.invocation_id, target_id, config_id),
timing=common_pb2.Timing(duration=duration))
self.update_configured_target(new_config_target,
['timing.duration', 'status_attributes'])
new_target = target_pb2.Target(
name=get_name(self.flags.invocation_id, target_id),
timing=common_pb2.Timing(duration=duration))
self.update_target(new_target,
['timing.duration', 'status_attributes'])
def _upload_files(self, target_id):
"""
Uploads files to bigstore at the given target_id
Args:
target_id (str): Target for files to be associated with
Returns:
uploaded_files: A list of file_pb2.File() objects
"""
storage_dir = '{}/{}/'.format(self.flags.invocation_id, target_id)
bigstore_client = BigStoreClient(self.credentials,
self.flags.bigstore_project_name,
storage_dir, self.flags.bucket_name)
additional_files = bigstore_client.upload_files_to_bigstore(
self.flags.files)
return additional_files
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import multiprocessing
import os
import re
import socket
import subprocess
import sys
import warnings
from django.conf import settings
from django.core.management import base
from django import template
# Suppress DeprecationWarnings which clutter the output to the point of
# rendering it unreadable.
warnings.simplefilter('ignore')
cmd_name = __name__.split('.')[-1]
CURDIR = os.path.realpath(os.path.dirname(__file__))
PROJECT_PATH = os.path.realpath(os.path.join(CURDIR, '../..'))
STATIC_PATH = os.path.realpath(os.path.join(PROJECT_PATH, '../static'))
# Known apache regular expression to retrieve it's version
APACHE_VERSION_REG = r'Apache/(?P<version>[\d.]*)'
# Known apache commands to retrieve it's version
APACHE2_VERSION_CMDS = (
(('/usr/sbin/apache2ctl', '-V'), APACHE_VERSION_REG),
(('/usr/sbin/apache2', '-v'), APACHE_VERSION_REG),
)
# Known apache log directory locations
APACHE_LOG_DIRS = (
'/var/log/httpd', # RHEL / Red Hat / CentOS / Fedora Linux
'/var/log/apache2', # Debian / Ubuntu Linux
)
# Default log directory
DEFAULT_LOG_DIR = '/var/log'
def _getattr(obj, name, default):
"""Like getattr but return `default` if None or False.
By default, getattr(obj, name, default) returns default only if
attr does not exist, here, we return `default` even if attr evaluates to
None or False.
"""
value = getattr(obj, name, default)
if value:
return value
else:
return default
context = template.Context({
'DJANGO_SETTINGS_MODULE': os.environ['DJANGO_SETTINGS_MODULE'],
'HOSTNAME': socket.getfqdn(),
'PROJECT_PATH': os.path.realpath(
_getattr(settings, 'ROOT_PATH', PROJECT_PATH)),
'STATIC_PATH': os.path.realpath(
_getattr(settings, 'STATIC_ROOT', STATIC_PATH)),
'SSLCERT': '/etc/pki/tls/certs/ca.crt',
'SSLKEY': '/etc/pki/tls/private/ca.key',
'CACERT': None,
'PROCESSES': multiprocessing.cpu_count() + 1,
})
context['PROJECT_ROOT'] = os.path.dirname(context['PROJECT_PATH'])
context['PROJECT_DIR_NAME'] = os.path.basename(
context['PROJECT_PATH'].split(context['PROJECT_ROOT'])[1])
context['PROJECT_NAME'] = context['PROJECT_DIR_NAME']
context['WSGI_FILE'] = os.path.join(
context['PROJECT_PATH'], 'wsgi/horizon.wsgi')
VHOSTNAME = context['HOSTNAME'].split('.')
VHOSTNAME[0] = context['PROJECT_NAME']
context['VHOSTNAME'] = '.'.join(VHOSTNAME)
if len(VHOSTNAME) > 1:
context['DOMAINNAME'] = '.'.join(VHOSTNAME[1:])
else:
context['DOMAINNAME'] = 'openstack.org'
context['ADMIN'] = 'webmaster@%s' % context['DOMAINNAME']
context['ACTIVATE_THIS'] = None
virtualenv = os.environ.get('VIRTUAL_ENV')
if virtualenv:
activate_this = os.path.join(
virtualenv, 'bin/activate_this.py')
if os.path.exists(activate_this):
context['ACTIVATE_THIS'] = activate_this
# Try to detect apache's version
# We fallback on 2.4.
context['APACHE2_VERSION'] = 2.4
APACHE2_VERSION = None
for cmd in APACHE2_VERSION_CMDS:
if os.path.exists(cmd[0][0]):
try:
reg = re.compile(cmd[1])
res = reg.search(
subprocess.check_output(cmd[0], stderr=subprocess.STDOUT))
if res:
APACHE2_VERSION = res.group('version')
break
except subprocess.CalledProcessError:
pass
if APACHE2_VERSION:
ver_nums = APACHE2_VERSION.split('.')
if len(ver_nums) >= 2:
try:
context['APACHE2_VERSION'] = float('.'.join(ver_nums[:2]))
except ValueError:
pass
def find_apache_log_dir():
for log_dir in APACHE_LOG_DIRS:
if os.path.exists(log_dir) and os.path.isdir(log_dir):
return log_dir
return DEFAULT_LOG_DIR
context['LOGDIR'] = find_apache_log_dir()
class Command(base.BaseCommand):
args = ''
help = """Create %(wsgi_file)s
or the contents of an apache %(p_name)s.conf file (on stdout).
The apache configuration is generated on stdout because the place of this
file is distribution dependent.
examples::
manage.py %(cmd_name)s --wsgi # creates %(wsgi_file)s
manage.py %(cmd_name)s --apache # creates an apache vhost conf file (on \
stdout).
manage.py %(cmd_name)s --apache --ssl --mail=%(admin)s \
--project=%(p_name)s --hostname=%(hostname)s
To create an acpache configuration file, redirect the output towards the
location you desire, e.g.::
manage.py %(cmd_name)s --apache > \
/etc/httpd/conf.d/openstack_dashboard.conf
""" % {
'cmd_name': cmd_name,
'p_name': context['PROJECT_NAME'],
'wsgi_file': context['WSGI_FILE'],
'admin': context['ADMIN'],
'hostname': context['VHOSTNAME'], }
def add_arguments(self, parser):
# TODO(ygbo): Add an --nginx option.
parser.add_argument(
"-a", "--apache",
default=False, action="store_true", dest="apache",
help="generate an apache vhost configuration"
)
parser.add_argument(
"--cacert",
dest="cacert",
help=("Use with the --apache and --ssl option to define the path"
" to the SSLCACertificateFile"),
metavar="CACERT"
)
parser.add_argument(
"-f", "--force",
default=False, action="store_true", dest="force",
help="force overwriting of an existing %s file" %
context['WSGI_FILE']
)
parser.add_argument(
"-H", "--hostname",
dest="hostname",
help=("Use with the --apache option to define the server's"
" hostname (default : %s)") % context['VHOSTNAME'],
metavar="HOSTNAME"
)
parser.add_argument(
"--logdir",
dest="logdir",
help=("Use with the --apache option to define the path to "
"the apache log directory(default : %s)"
% context['LOGDIR']),
metavar="CACERT"
)
parser.add_argument(
"-m", "--mail",
dest="mail",
help=("Use with the --apache option to define the web site"
" administrator's email (default : %s)") %
context['ADMIN'],
metavar="MAIL"
)
parser.add_argument(
"-n", "--namedhost",
default=False, action="store_true", dest="namedhost",
help=("Use with the --apache option. The apache vhost "
"configuration will work only when accessed with "
"the proper hostname (see --hostname).")
)
parser.add_argument(
"--processes",
dest="processes",
help=("Use with the --apache option to define the number of "
"apache processes (by default the number of cpus +1 which "
"is %s on this machine).") % context['PROCESSES'],
metavar="PROCESSES"
)
parser.add_argument(
"-p", "--project",
dest="project",
help=("Use with the --apache option to define the project "
"name (default : %s)") % context['PROJECT_NAME'],
metavar="PROJECT"
)
parser.add_argument(
"-s", "--ssl",
default=False, action="store_true", dest="ssl",
help=("Use with the --apache option. The apache vhost "
"configuration will use an SSL configuration")
)
parser.add_argument(
"--sslcert",
dest="sslcert",
help=("Use with the --apache and --ssl option to define "
"the path to the SSLCertificateFile (default : %s)"
) % context['SSLCERT'],
metavar="SSLCERT"
)
parser.add_argument(
"--sslkey",
dest="sslkey",
help=("Use with the --apache and --ssl option to define "
"the path to the SSLCertificateKeyFile "
"(default : %s)") % context['SSLKEY'],
metavar="SSLKEY"
)
parser.add_argument(
"--apache-version",
dest="apache_version",
type=float,
help=("Use with the --apache option to define the apache "
"major (as a floating point number) version "
"(default : %s)."
% context['APACHE2_VERSION']),
metavar="APACHE_VERSION"
)
parser.add_argument(
"-w", "--wsgi",
default=False, action="store_true", dest="wsgi",
help="generate the horizon.wsgi file"
)
def handle(self, *args, **options):
force = options.get('force')
context['SSL'] = options.get('ssl')
if options.get('mail'):
context['ADMIN'] = options['mail']
if options.get('cacert'):
context['CACERT'] = options['cacert']
if options.get('logdir'):
context['LOGDIR'] = options['logdir'].rstrip('/')
if options.get('processes'):
context['PROCESSES'] = options['processes']
if options.get('project'):
context['PROJECT_NAME'] = options['project']
if options.get('hostname'):
context['VHOSTNAME'] = options['hostname']
if options.get('sslcert'):
context['SSLCERT'] = options['sslcert']
if options.get('sslkey'):
context['SSLKEY'] = options['sslkey']
if options.get('apache_version'):
context['APACHE2_VERSION'] = options['apache_version']
if options.get('namedhost'):
context['NAMEDHOST'] = context['VHOSTNAME']
else:
context['NAMEDHOST'] = '*'
# Generate the WSGI.
if options.get('wsgi'):
with open(
os.path.join(CURDIR, 'horizon.wsgi.template'), 'r'
) as fp:
wsgi_template = template.Template(fp.read())
if not os.path.exists(context['WSGI_FILE']) or force:
with open(context['WSGI_FILE'], 'w') as fp:
fp.write(wsgi_template.render(context))
print('Generated "%s"' % context['WSGI_FILE'])
else:
sys.exit('"%s" already exists, use --force to overwrite' %
context['WSGI_FILE'])
# Generate the apache configuration.
elif options.get('apache'):
with open(
os.path.join(CURDIR, 'apache_vhost.conf.template'), 'r'
) as fp:
wsgi_template = template.Template(fp.read())
sys.stdout.write(wsgi_template.render(context))
else:
self.print_help('manage.py', cmd_name)
|
|
import socialforcemodel as sfm
import numpy as np
import matplotlib.pyplot as plt
import os
import csv
import psutil
from pympler import asizeof, tracker
np.seterr(all='raise')
try:
import progressbar
except ImportError, e:
print "Progressbar package not found. Please run 'pip install progressbar'"
exit()
def sensor(world, position, sensor_range):
peds = world.quadtree.get_pedestrians_in_range(position, sensor_range)
actual_peds = set()
range_squared = sensor_range**2
for p in peds:
if ((p.position[0] - position[0])**2 +
(p.position[1] - position[1])**2) <= range_squared:
actual_peds.add(p)
results = {}
results['count'] = len(actual_peds)
if len(actual_peds):
average_speed = 0.0
for p in actual_peds:
average_speed += p.speed
results['average_speed'] = average_speed / len(actual_peds)
else:
results['average_speed'] = 0.0
return results
def sensor_far(world):
return sensor(world, [14.0, 5.0], 2.0)
def sensor_near(world):
return sensor(world, [8.0, 5.0], 2.0)
def plot(item, measurements, fig, subplot=111):
ax = fig.add_subplot(subplot)
ax.scatter(range(len(measurements)), measurements)
ax.set_title('average ' + item[2])
def main(args):
barrier_start = 50.0
barrier_points = [[50.0, 1.0], [50.0, 4.0]]
barrier_time = args.barriertime
mean, theta, sigma = 0.0, 0.05, 0.005
measurements = []
for r in range(args.repetitions):
barrier_state = 0
if os.path.exists("hddm/{}_pedestrians_{}.csv".format(args.outfile, r)):
print "Already done, continue..."
continue
with open("hddm/{}_pedestrians_{}.csv".format(args.outfile, r), "w") as ped_outfile:
ped_writer = csv.writer(ped_outfile)
ped_writer.writerow(['p', 'mass', 'radius', 'desired_velocity', 'maximum_velocity'])
with open("hddm/{}_measurements_{}.csv".format(args.outfile, r), "w") as csv_outfile:
csv_writer = csv.writer(csv_outfile)
csv_writer.writerow(['t', 'p', 'pos_x', 'pos_y', 'vel_x', 'vel_y', 'speed', 'local_density', 'local_velocity_variance'])
all_pedestrians = set()
if not os.path.exists("img"):
os.makedirs("img")
if not os.path.exists("img/" + args.outfile):
os.makedirs("img/" + args.outfile)
if not os.path.exists("measurements"):
os.makedirs("measurements")
measurements.append({
't': [],
'count_near': [],
'count_far': [],
'speed_near': [],
'speed_far': []
})
loader = sfm.ParameterLoader(args.file)
world = loader.world
if args.pedestrian_file != '':
with open(args.pedestrian_file) as infile:
import pickle
data = pickle.load(infile)
# exit()
for p in data:
ped = sfm.Pedestrian(group=world.groups[0],
radius=p['radius'],
mass=p['mass'],
desired_velocity=p['desired_velocity'],
maximum_velocity=p['maximum_velocity'],
relaxation_time=p['relaxation_time'],
target_path=p['target_path'],
start=p['position'])
ped.velocity = p['velocity']
ped.next_velocity = p['velocity']
ped.speed = p['speed']
ped.next_speed = p['speed']
world.groups[0].add_pedestrian(ped)
print "Imported {} pedestrians".format(len(world.groups[0].get_pedestrians()))
world.update()
world.groups[0].spawn_max = args.max_pedestrians
# world.groups[0].set_ornstein_uhlenbeck_process(self, 0, 0.05, 1.0):
for group in world.groups:
group.set_ornstein_uhlenbeck_process(mean, theta, sigma)
bar = progressbar.ProgressBar()
for step in bar(range(args.steps)):
if not world.step():
break
world.update()
for group in world.groups:
for p in group.get_pedestrians():
all_pedestrians.add(p)
# if step % 5 == 0:
# figure = world.plot()
# figure.savefig("img/" + args.outfile + "/" + str((step + 1) // 5).zfill(4) + ".png",
# bbox_inches = 'tight',
# pad_inches = 0.1)
# figure.clear()
# plt.close(figure)
# if step % 5 == 0:
# near = sensor_near(world)
# far = sensor_far(world)
# measurements[r]['t'].append(world.time)
# measurements[r]['count_near'].append(near['count'])
# measurements[r]['count_far'].append(far['count'])
# measurements[r]['speed_near'].append(near['average_speed'])
# measurements[r]['speed_far'].append(far['average_speed'])
# print len(all_pedestrians)
# Cleanup to avoid high memory usage.
if step % 200 == 0:
# tr.print_diff()
# process = psutil.Process(os.getpid())
# print "Before:", process.memory_info().rss
# print len(all_pedestrians)
# Get all pedestrians no longer in simulation.
current_pedestrians = set()
for group in world.groups:
current_pedestrians = current_pedestrians.union(group.get_pedestrians())
retired_pedestrians = all_pedestrians - current_pedestrians
# Write all pedestrian data to file.
with open("hddm/{}_pedestrians_{}.csv".format(args.outfile, r), "a") as ped_outfile:
with open("hddm/{}_measurements_{}.csv".format(args.outfile, r), "a") as csv_outfile:
ped_writer = csv.writer(ped_outfile)
csv_writer = csv.writer(csv_outfile)
for p in retired_pedestrians:
m = p.measurements
row = [p.id, "%.4f" % p.mass, "%.4f" % p.radius,
"%.4f" % p.desired_velocity, "%.4f" % p.maximum_velocity]
ped_writer.writerow(row)
for p in all_pedestrians:
m = p.measurements
for arr in m:
s = arr['self']
row = ["%.2f" % s['time'], p.id, "%.4f" % s['position'][0], "%.4f" % s['position'][1],
"%.4f" % s['velocity'][0], "%.4f" % s['velocity'][1], "%.4f" % s['speed'],
"%.4f" % arr['forces']['local_density'], "%.4f" % arr['forces']['local_velocity_variance']]
csv_writer.writerow(row)
# Empty all data.
p.measurements = []
# Remove pedestrians from local variables.
all_pedestrians = current_pedestrians
# process = psutil.Process(os.getpid())
# print "After:", process.memory_info().rss
if barrier_state == 0 and barrier_time != 0 and world.time > barrier_start:
barrier_state = 1
world.add_obstacle(sfm.Obstacle(barrier_points))
elif barrier_state == 1 and world.time > barrier_start + barrier_time:
barrier_state = 2
del world.obstacles[-1]
histogram = None
# Write all pedestrian data to file.
with open("hddm/{}_pedestrians_{}.csv".format(args.outfile, r), "a") as ped_outfile:
with open("hddm/{}_measurements_{}.csv".format(args.outfile, r), "a") as csv_outfile:
ped_writer = csv.writer(ped_outfile)
csv_writer = csv.writer(csv_outfile)
for p in all_pedestrians:
if p.id == 0:
histogram = [m['self']['random'] for m in p.measurements]
m = p.measurements
row = [p.id, "%.4f" % p.mass, "%.4f" % p.radius,
"%.4f" % p.desired_velocity, "%.4f" % p.maximum_velocity]
ped_writer.writerow(row)
m = p.measurements
for arr in m:
s = arr['self']
row = ["%.2f" % s['time'], p.id, "%.4f" % s['position'][0], "%.4f" % s['position'][1],
"%.4f" % s['velocity'][0], "%.4f" % s['velocity'][1], "%.4f" % s['speed'],
"%.4f" % arr['forces']['local_density'], "%.4f" % arr['forces']['local_velocity_variance']]
csv_writer.writerow(row)
# plt.clf()
# plt.hist(histogram)
# plt.show()
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('file', help='YAML-file')
parser.add_argument('-s', '--steps', help='Number of steps', type=int, default=500)
parser.add_argument('-o', '--outfile', help='File for measurements', default='measurements')
parser.add_argument('-p', '--pedestrian_file', help='Pedestrian file', default='')
parser.add_argument('-m', '--max_pedestrians', help='max pedestrians', type=int, default=100)
parser.add_argument('-r', '--repetitions', default=1, type=int)
parser.add_argument('-b', '--barriertime', default=0, type=int)
args = parser.parse_args(sys.argv[1:])
main(args)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._bindings_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BindingsOperations:
"""BindingsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
**kwargs: Any
) -> "_models.BindingResource":
"""Get a Binding and its properties.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param binding_name: The name of the Binding resource.
:type binding_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BindingResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2020_07_01.models.BindingResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BindingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: "_models.BindingResource",
**kwargs: Any
) -> "_models.BindingResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(binding_resource, 'BindingResource')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BindingResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BindingResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('BindingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: "_models.BindingResource",
**kwargs: Any
) -> AsyncLROPoller["_models.BindingResource"]:
"""Create a new Binding or update an exiting Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param binding_name: The name of the Binding resource.
:type binding_name: str
:param binding_resource: Parameters for the create or update operation.
:type binding_resource: ~azure.mgmt.appplatform.v2020_07_01.models.BindingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2020_07_01.models.BindingResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
binding_resource=binding_resource,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('BindingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Operation to delete a Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param binding_name: The name of the Binding resource.
:type binding_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: "_models.BindingResource",
**kwargs: Any
) -> "_models.BindingResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(binding_resource, 'BindingResource')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BindingResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('BindingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: "_models.BindingResource",
**kwargs: Any
) -> AsyncLROPoller["_models.BindingResource"]:
"""Operation to update an exiting Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param binding_name: The name of the Binding resource.
:type binding_name: str
:param binding_resource: Parameters for the update operation.
:type binding_resource: ~azure.mgmt.appplatform.v2020_07_01.models.BindingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2020_07_01.models.BindingResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
binding_resource=binding_resource,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('BindingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
service_name: str,
app_name: str,
**kwargs: Any
) -> AsyncIterable["_models.BindingResourceCollection"]:
"""Handles requests to list all resources in an App.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BindingResourceCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.appplatform.v2020_07_01.models.BindingResourceCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResourceCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("BindingResourceCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings'} # type: ignore
|
|
'''
Created on 09.01.2011
@author: michi
'''
from PyQt4.QtCore import QAbstractTableModel,QVariant, Qt, QModelIndex
from sqlalchemy import Table
from sqlalchemy.sql.expression import _UnaryExpression,Alias
from sqlalchemy.sql.operators import asc_op,desc_op
import sqlalchemy.schema
from sqlalchemy.sql import func
from ems import qt4
class AlchemyCoreModelR(QAbstractTableModel):
asc = 1
desc = 2
def __init__(self,con,queryBuilder,dataListener=None):
'''
die originale Query beinhaltet:
from
where
KEIN order_by
KEIN offset
KEIN limit
'''
queryBuilder.setDirtyListener(self.forceReset)
self._queryBuilder = queryBuilder
self._connection = con
self.__labeled2DottedColumn = None
self._resultCache = {}
super(AlchemyCoreModelR, self).__init__()
self._dirty = True
self.__lastSortByIndex = None
self.__lastSortByOrder = None
self._inResetProgress = False
self.__dataListener = dataListener
self.__limit = None
self.__offset = None
self.columnHeaderTranslated = {}
@property
def queryBuilder(self):
return self._queryBuilder
def getLimit(self):
return self.__limit
def setLimit(self,limit,skipReset=False):
if isinstance(limit, tuple):
self.__offset = limit[0]
self.__limit = limit[1]
else:
self.__limit = limit
if not skipReset:
self.forceReset()
def delLimit(self):
self.__limit = None
self.forceReset()
limit = property(getLimit, setLimit, delLimit,
"Set limit (and offset) part of tplQuery")
def getOffset(self):
return self.__offset
def setOffset(self,offset,skipReset=False):
if isinstance(offset,tuple):
self.__offset = offset[0]
self.__limit = offset[1]
else:
self.__offset = offset
if not skipReset:
self.forceReset()
def delOffset(self):
self.__offset = None
self.forceReset()
offset = property(getOffset, setOffset, delOffset,
"Set offset (and limit) part of tplQuery")
def getDottedColumnNameOfLabeled(self,labeledName):
if self.__labeled2DottedColumn is None:
self.__labeled2DottedColumn = {}
for column in self._queryBuilder.possibleColumns:
columnName = str(column)
self.__labeled2DottedColumn[columnName.replace('.', '_')] = \
str(columnName)
return self.__labeled2DottedColumn[labeledName]
def buildQuery(self,skipLimit=False,skipOffset=False):
query = self._queryBuilder.getQuery()
if self.__offset is not None and not skipOffset:
query = query.offset(self.__offset)
if self.__limit is not None and not skipLimit:
query = query.limit(self.__limit)
return query
def getDataListener(self):
return self.__dataListener
def setDataListener(self, dataListener):
self.__dataListener = dataListener
def delDataListener(self):
self.__dataListener = None
dataListener = property(getDataListener,setDataListener,delDataListener)
def data(self, index, role=Qt.DisplayRole):
if self.__dataListener is not None:
self.__dataListener.data(index, role)
self.perform()
if not index.isValid() or \
not (0 <= index.row() < self.rowCount()):
return QVariant()
if role == Qt.DisplayRole:
value = self._resultCache[index.row()][index.column()]
if isinstance(value, basestring):
return QVariant(unicode(value))
return QVariant(value)
if role == qt4.ColumnNameRole:
return QVariant(unicode(self._queryBuilder.currentColumnList[index.column()]))
return QVariant()
def rowCount(self, index=QModelIndex()):
self.perform()
return len(self._resultCache)
def columnCount(self, index=QModelIndex()):
self.perform()
return len(self._queryBuilder.currentColumnList)
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return QVariant(int(Qt.AlignLeft|Qt.AlignVCenter))
return QVariant(int(Qt.AlignRight|Qt.AlignVCenter))
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
# labeled = str(self.currentColumnList[section])
# return QVariant(self.getDottedColumnNameOfLabeled(labeled))
columnName = unicode(self._queryBuilder.currentColumnList[section])
if self.columnHeaderTranslated.has_key(columnName):
return QVariant(self.columnHeaderTranslated[columnName])
return QVariant(columnName)
return QVariant(int(section + 1))
def perform(self):
if not self._dirty and not self._queryBuilder.dirty:
return
self._inResetProgress = True
lastPerformedQuery = self.buildQuery()
labeledQuery = lastPerformedQuery.apply_labels()
#print labeledQuery
sqlResult = self._connection.execute(labeledQuery)
self._resultCache.clear()
i=self.getCacheIndexOffset()
for row in sqlResult:
self._resultCache[i] = row
i += 1
# self.currentColumnList = []
# for column in labeledQuery.inner_columns:
# self.currentColumnList.append(column)
self._dirty = False
self._inResetProgress = False
self.reset()
def getCacheIndexOffset(self):
return 0
def sort(self,columnIndex,order):
#Fast lookup to not poll any methods
if self.__lastSortByIndex == columnIndex and \
self.__lastSortByOrder == order:
return
self.__lastSortByIndex = columnIndex
self.__lastSortByOrder = order
column = self._queryBuilder.currentColumnList[columnIndex]
orderByTuple = self._queryBuilder.orderBy
#Look if previously the query was ordered and if so, the direction
prevColumn = None
prevDirection = None
#No ORDER BY was set
if orderByTuple is None:
pass
#Simple ORDER BY by Column Name
elif isinstance(orderByTuple[0],sqlalchemy.schema.Column):
prevColumn = orderByTuple[0]
prevDirection = 'ASC'
#ORDER BY desc() or asc()
elif isinstance(orderByTuple[0],_UnaryExpression):
prevColumn = orderByTuple[0].element
if orderByTuple[0].modifier is asc_op:
prevDirection = 'ASC'
if orderByTuple[0].modifier is desc_op:
prevDirection = 'DESC'
#if new Col is the same as old, switch direction
if unicode(column) == unicode(prevColumn):
if prevDirection == 'ASC':
self._queryBuilder.setOrderBy(column.desc())
else:
self._queryBuilder.setOrderBy(column.asc())
else:
self._queryBuilder.setOrderBy(column.asc())
def _cacheReset(self):
self._dirty = True
self.perform()
def forceReset(self):
return self._cacheReset()
class AlchemyCoreModelRBuffered(AlchemyCoreModelR):
def __init__(self,con,queryBuilder,dataListener=None,distance=50):
super(AlchemyCoreModelRBuffered, self).__init__(con,queryBuilder,
dataListener)
self._dirty = False
self.distance = distance
self.__countQuery = None
self.__totalCount = None;
def getDistance(self):
return self.__distance
def setDistance(self, value):
self.__distance = value
self.__distanceHalf = value/2
self.__distanceDouble = value*2
def delDistance(self):
del self.__distance
def getCountQuery(self):
if self.__countQuery is None:
for row in self._queryBuilder.possibleColumns:
col = row
break
return self.buildQuery(skipLimit=True,skipOffset=True)\
.with_only_columns(
[func.count(col)]
)
return self.__countQuery
def rowCount(self, index=QModelIndex()):
if self.__totalCount is None:
self.__totalCount = self._connection.execute(self.countQuery).\
first()[0]
return self.__totalCount
def setCountQuery(self, value):
self.__countQuery = value
def delCountQuery(self):
del self.__countQuery
countQuery = property(getCountQuery, setCountQuery, delCountQuery, "countQuery's docstring")
def data(self, index, role=Qt.DisplayRole):
try:
return super(AlchemyCoreModelRBuffered, self).data(index,role)
except KeyError:
if self._queryBuilder.dirty:
self.__totalCount = None
self.__countQuery = None
self.fillCacheEntries(index.row())
return super(AlchemyCoreModelRBuffered, self).data(index,role)
def fillCacheEntries(self, notFoundRowNumber):
if self._inResetProgress:
return
# print "###Row %s not found" % notFoundRowNumber
limit = self.calculateLimit(notFoundRowNumber)
self.setOffset(limit[0],True)
self.setLimit(limit[1],True)
sqlResult = self._connection.execute(self.buildQuery().apply_labels())
i = limit[0]
for row in sqlResult:
self._resultCache[i] = row
i += 1
self._dirty = False
self.reset()
def getCacheIndexOffset(self):
if self.offset is not None:
return self.offset
return 0
def calculateLimit(self, notFoundIndex):
lowerBoundary = notFoundIndex - (self.__distanceHalf)
distanceFactor = lowerBoundary/self.__distance
if distanceFactor < 0:
distanceFactor = 0
limitOffset = distanceFactor*self.__distance
limit = self.__distanceDouble
if limitOffset in self._resultCache:
limitOffset += self.__distance
limit = self.__distance
return (limitOffset,limit)
def forceReset(self):
self.__countQuery = None
self.__totalCount = None
return super(AlchemyCoreModelRBuffered, self).forceReset()
distance = property(getDistance, setDistance, delDistance, "distance's docstring")
|
|
"""Provides device triggers for lutron caseta."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.components.homeassistant.triggers import event as event_trigger
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_EVENT,
CONF_PLATFORM,
CONF_TYPE,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers.typing import ConfigType
from .const import (
ACTION_PRESS,
ACTION_RELEASE,
ATTR_ACTION,
ATTR_BUTTON_NUMBER,
ATTR_SERIAL,
BUTTON_DEVICES,
CONF_SUBTYPE,
DOMAIN,
LUTRON_CASETA_BUTTON_EVENT,
)
SUPPORTED_INPUTS_EVENTS_TYPES = [ACTION_PRESS, ACTION_RELEASE]
LUTRON_BUTTON_TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(SUPPORTED_INPUTS_EVENTS_TYPES),
}
)
PICO_2_BUTTON_BUTTON_TYPES = {
"on": 2,
"off": 4,
}
PICO_2_BUTTON_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(PICO_2_BUTTON_BUTTON_TYPES),
}
)
PICO_2_BUTTON_RAISE_LOWER_BUTTON_TYPES = {
"on": 2,
"off": 4,
"raise": 5,
"lower": 6,
}
PICO_2_BUTTON_RAISE_LOWER_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(PICO_2_BUTTON_RAISE_LOWER_BUTTON_TYPES),
}
)
PICO_3_BUTTON_BUTTON_TYPES = {
"on": 2,
"stop": 3,
"off": 4,
}
PICO_3_BUTTON_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(PICO_3_BUTTON_BUTTON_TYPES),
}
)
PICO_3_BUTTON_RAISE_LOWER_BUTTON_TYPES = {
"on": 2,
"stop": 3,
"off": 4,
"raise": 5,
"lower": 6,
}
PICO_3_BUTTON_RAISE_LOWER_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(PICO_3_BUTTON_RAISE_LOWER_BUTTON_TYPES),
}
)
PICO_4_BUTTON_BUTTON_TYPES = {
"button_1": 8,
"button_2": 9,
"button_3": 10,
"button_4": 11,
}
PICO_4_BUTTON_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(PICO_4_BUTTON_BUTTON_TYPES),
}
)
PICO_4_BUTTON_ZONE_BUTTON_TYPES = {
"on": 8,
"raise": 9,
"lower": 10,
"off": 11,
}
PICO_4_BUTTON_ZONE_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(PICO_4_BUTTON_ZONE_BUTTON_TYPES),
}
)
PICO_4_BUTTON_SCENE_BUTTON_TYPES = {
"button_1": 8,
"button_2": 9,
"button_3": 10,
"off": 11,
}
PICO_4_BUTTON_SCENE_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(PICO_4_BUTTON_SCENE_BUTTON_TYPES),
}
)
PICO_4_BUTTON_2_GROUP_BUTTON_TYPES = {
"group_1_button_1": 8,
"group_1_button_2": 9,
"group_2_button_1": 10,
"group_2_button_2": 11,
}
PICO_4_BUTTON_2_GROUP_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(PICO_4_BUTTON_2_GROUP_BUTTON_TYPES),
}
)
FOUR_GROUP_REMOTE_BUTTON_TYPES = {
"open_all": 2,
"stop_all": 3,
"close_all": 4,
"raise_all": 5,
"lower_all": 6,
"open_1": 10,
"stop_1": 11,
"close_1": 12,
"raise_1": 13,
"lower_1": 14,
"open_2": 18,
"stop_2": 19,
"close_2": 20,
"raise_2": 21,
"lower_2": 22,
"open_3": 26,
"stop_3": 27,
"close_3": 28,
"raise_3": 29,
"lower_3": 30,
"open_4": 34,
"stop_4": 35,
"close_4": 36,
"raise_4": 37,
"lower_4": 38,
}
FOUR_GROUP_REMOTE_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(FOUR_GROUP_REMOTE_BUTTON_TYPES),
}
)
DEVICE_TYPE_SCHEMA_MAP = {
"Pico2Button": PICO_2_BUTTON_TRIGGER_SCHEMA,
"Pico2ButtonRaiseLower": PICO_2_BUTTON_RAISE_LOWER_TRIGGER_SCHEMA,
"Pico3Button": PICO_3_BUTTON_TRIGGER_SCHEMA,
"Pico3ButtonRaiseLower": PICO_3_BUTTON_RAISE_LOWER_TRIGGER_SCHEMA,
"Pico4Button": PICO_4_BUTTON_TRIGGER_SCHEMA,
"Pico4ButtonScene": PICO_4_BUTTON_SCENE_TRIGGER_SCHEMA,
"Pico4ButtonZone": PICO_4_BUTTON_ZONE_TRIGGER_SCHEMA,
"Pico4Button2Group": PICO_4_BUTTON_2_GROUP_TRIGGER_SCHEMA,
"FourGroupRemote": FOUR_GROUP_REMOTE_TRIGGER_SCHEMA,
}
DEVICE_TYPE_SUBTYPE_MAP = {
"Pico2Button": PICO_2_BUTTON_BUTTON_TYPES,
"Pico2ButtonRaiseLower": PICO_2_BUTTON_RAISE_LOWER_BUTTON_TYPES,
"Pico3Button": PICO_3_BUTTON_BUTTON_TYPES,
"Pico3ButtonRaiseLower": PICO_3_BUTTON_RAISE_LOWER_BUTTON_TYPES,
"Pico4Button": PICO_4_BUTTON_BUTTON_TYPES,
"Pico4ButtonScene": PICO_4_BUTTON_SCENE_BUTTON_TYPES,
"Pico4ButtonZone": PICO_4_BUTTON_ZONE_BUTTON_TYPES,
"Pico4Button2Group": PICO_4_BUTTON_2_GROUP_BUTTON_TYPES,
"FourGroupRemote": FOUR_GROUP_REMOTE_BUTTON_TYPES,
}
TRIGGER_SCHEMA = vol.Any(
PICO_2_BUTTON_TRIGGER_SCHEMA,
PICO_3_BUTTON_RAISE_LOWER_TRIGGER_SCHEMA,
PICO_4_BUTTON_TRIGGER_SCHEMA,
PICO_4_BUTTON_SCENE_TRIGGER_SCHEMA,
PICO_4_BUTTON_ZONE_TRIGGER_SCHEMA,
PICO_4_BUTTON_2_GROUP_TRIGGER_SCHEMA,
FOUR_GROUP_REMOTE_TRIGGER_SCHEMA,
)
async def async_validate_trigger_config(hass: HomeAssistant, config: ConfigType):
"""Validate config."""
# if device is available verify parameters against device capabilities
device = get_button_device_by_dr_id(hass, config[CONF_DEVICE_ID])
if not device:
return config
schema = DEVICE_TYPE_SCHEMA_MAP.get(device["type"])
if not schema:
raise InvalidDeviceAutomationConfig(
f"Device type {device['type']} not supported: {config[CONF_DEVICE_ID]}"
)
return schema(config)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> list[dict]:
"""List device triggers for lutron caseta devices."""
triggers = []
device = get_button_device_by_dr_id(hass, device_id)
if not device:
raise InvalidDeviceAutomationConfig(f"Device not found: {device_id}")
valid_buttons = DEVICE_TYPE_SUBTYPE_MAP.get(device["type"], [])
for trigger in SUPPORTED_INPUTS_EVENTS_TYPES:
for subtype in valid_buttons:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: trigger,
CONF_SUBTYPE: subtype,
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
device = get_button_device_by_dr_id(hass, config[CONF_DEVICE_ID])
schema = DEVICE_TYPE_SCHEMA_MAP.get(device["type"])
valid_buttons = DEVICE_TYPE_SUBTYPE_MAP.get(device["type"])
config = schema(config)
event_config = {
event_trigger.CONF_PLATFORM: CONF_EVENT,
event_trigger.CONF_EVENT_TYPE: LUTRON_CASETA_BUTTON_EVENT,
event_trigger.CONF_EVENT_DATA: {
ATTR_SERIAL: device["serial"],
ATTR_BUTTON_NUMBER: valid_buttons[config[CONF_SUBTYPE]],
ATTR_ACTION: config[CONF_TYPE],
},
}
event_config = event_trigger.TRIGGER_SCHEMA(event_config)
return await event_trigger.async_attach_trigger(
hass, event_config, action, automation_info, platform_type="device"
)
def get_button_device_by_dr_id(hass: HomeAssistant, device_id: str):
"""Get a lutron device for the given device id."""
if DOMAIN not in hass.data:
return None
for config_entry in hass.data[DOMAIN]:
button_devices = hass.data[DOMAIN][config_entry][BUTTON_DEVICES]
device = button_devices.get(device_id)
if device:
return device
return None
|
|
from __future__ import print_function, division
from functools import wraps
from sympy.core import S, Symbol, Tuple, Integer, Basic, Expr
from sympy.core.decorators import call_highest_priority
from sympy.core.compatibility import range
from sympy.core.sympify import SympifyError, sympify
from sympy.functions import conjugate, adjoint
from sympy.matrices import ShapeError
from sympy.simplify import simplify
def _sympifyit(arg, retval=None):
# This version of _sympifyit sympifies MutableMatrix objects
def deco(func):
@wraps(func)
def __sympifyit_wrapper(a, b):
try:
b = sympify(b, strict=True)
return func(a, b)
except SympifyError:
return retval
return __sympifyit_wrapper
return deco
class MatrixExpr(Basic):
""" Superclass for Matrix Expressions
MatrixExprs represent abstract matrices, linear transformations represented
within a particular basis.
Examples
========
>>> from sympy import MatrixSymbol
>>> A = MatrixSymbol('A', 3, 3)
>>> y = MatrixSymbol('y', 3, 1)
>>> x = (A.T*A).I * A * y
See Also
========
MatrixSymbol
MatAdd
MatMul
Transpose
Inverse
"""
_op_priority = 11.0
is_Matrix = True
is_MatrixExpr = True
is_Identity = None
is_Inverse = False
is_Transpose = False
is_ZeroMatrix = False
is_MatAdd = False
is_MatMul = False
is_commutative = False
def __new__(cls, *args, **kwargs):
args = map(sympify, args)
return Basic.__new__(cls, *args, **kwargs)
# The following is adapted from the core Expr object
def __neg__(self):
return MatMul(S.NegativeOne, self).doit()
def __abs__(self):
raise NotImplementedError
@_sympifyit('other', NotImplemented)
@call_highest_priority('__radd__')
def __add__(self, other):
return MatAdd(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__add__')
def __radd__(self, other):
return MatAdd(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rsub__')
def __sub__(self, other):
return MatAdd(self, -other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return MatAdd(other, -self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
return MatMul(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return MatMul(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
if not self.is_square:
raise ShapeError("Power of non-square matrix %s" % self)
if other is S.NegativeOne:
return Inverse(self)
elif other is S.Zero:
return Identity(self.rows)
elif other is S.One:
return self
return MatPow(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__pow__')
def __rpow__(self, other):
raise NotImplementedError("Matrix Power not defined")
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rdiv__')
def __div__(self, other):
return self * other**S.NegativeOne
@_sympifyit('other', NotImplemented)
@call_highest_priority('__div__')
def __rdiv__(self, other):
raise NotImplementedError()
#return MatMul(other, Pow(self, S.NegativeOne))
__truediv__ = __div__
__rtruediv__ = __rdiv__
@property
def rows(self):
return self.shape[0]
@property
def cols(self):
return self.shape[1]
@property
def is_square(self):
return self.rows == self.cols
def _eval_conjugate(self):
from sympy.matrices.expressions.adjoint import Adjoint
from sympy.matrices.expressions.transpose import Transpose
return Adjoint(Transpose(self))
def _eval_inverse(self):
from sympy.matrices.expressions.inverse import Inverse
return Inverse(self)
def _eval_transpose(self):
return Transpose(self)
def _eval_power(self, exp):
return MatPow(self, exp)
def _eval_simplify(self, **kwargs):
if self.is_Atom:
return self
else:
return self.__class__(*[simplify(x, **kwargs) for x in self.args])
def _eval_adjoint(self):
from sympy.matrices.expressions.adjoint import Adjoint
return Adjoint(self)
def _entry(self, i, j):
raise NotImplementedError(
"Indexing not implemented for %s" % self.__class__.__name__)
def adjoint(self):
return adjoint(self)
def conjugate(self):
return conjugate(self)
def transpose(self):
from sympy.matrices.expressions.transpose import transpose
return transpose(self)
T = property(transpose, None, None, 'Matrix transposition.')
def inverse(self):
return self._eval_inverse()
@property
def I(self):
return self.inverse()
def valid_index(self, i, j):
def is_valid(idx):
return isinstance(idx, (int, Integer, Symbol, Expr))
return (is_valid(i) and is_valid(j) and
(0 <= i) != False and (i < self.rows) != False and
(0 <= j) != False and (j < self.cols) != False)
def __getitem__(self, key):
if not isinstance(key, tuple) and isinstance(key, slice):
from sympy.matrices.expressions.slice import MatrixSlice
return MatrixSlice(self, key, (0, None, 1))
if isinstance(key, tuple) and len(key) == 2:
i, j = key
if isinstance(i, slice) or isinstance(j, slice):
from sympy.matrices.expressions.slice import MatrixSlice
return MatrixSlice(self, i, j)
i, j = sympify(i), sympify(j)
if self.valid_index(i, j) != False:
return self._entry(i, j)
else:
raise IndexError("Invalid indices (%s, %s)" % (i, j))
elif isinstance(key, (int, Integer)):
# row-wise decomposition of matrix
rows, cols = self.shape
if not (isinstance(rows, Integer) and isinstance(cols, Integer)):
raise IndexError("Single index only supported for "
"non-symbolic matrix shapes.")
key = sympify(key)
i = key // cols
j = key % cols
if self.valid_index(i, j) != False:
return self._entry(i, j)
else:
raise IndexError("Invalid index %s" % key)
elif isinstance(key, (Symbol, Expr)):
raise IndexError("Single index only supported for "
"non-symbolic indices.")
raise IndexError("Invalid index, wanted %s[i,j]" % self)
def as_explicit(self):
"""
Returns a dense Matrix with elements represented explicitly
Returns an object of type ImmutableMatrix.
Examples
========
>>> from sympy import Identity
>>> I = Identity(3)
>>> I
I
>>> I.as_explicit()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
as_mutable: returns mutable Matrix type
"""
from sympy.matrices.immutable import ImmutableMatrix
return ImmutableMatrix([[ self[i, j]
for j in range(self.cols)]
for i in range(self.rows)])
def as_mutable(self):
"""
Returns a dense, mutable matrix with elements represented explicitly
Examples
========
>>> from sympy import Identity
>>> I = Identity(3)
>>> I
I
>>> I.shape
(3, 3)
>>> I.as_mutable()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
as_explicit: returns ImmutableMatrix
"""
return self.as_explicit().as_mutable()
def __array__(self):
from numpy import empty
a = empty(self.shape, dtype=object)
for i in range(self.rows):
for j in range(self.cols):
a[i, j] = self[i, j]
return a
def equals(self, other):
"""
Test elementwise equality between matrices, potentially of different
types
>>> from sympy import Identity, eye
>>> Identity(3).equals(eye(3))
True
"""
return self.as_explicit().equals(other)
def canonicalize(self):
return self
def as_coeff_mmul(self):
return 1, MatMul(self)
class MatrixElement(Expr):
parent = property(lambda self: self.args[0])
i = property(lambda self: self.args[1])
j = property(lambda self: self.args[2])
_diff_wrt = True
class MatrixSymbol(MatrixExpr):
"""Symbolic representation of a Matrix object
Creates a SymPy Symbol to represent a Matrix. This matrix has a shape and
can be included in Matrix Expressions
>>> from sympy import MatrixSymbol, Identity
>>> A = MatrixSymbol('A', 3, 4) # A 3 by 4 Matrix
>>> B = MatrixSymbol('B', 4, 3) # A 4 by 3 Matrix
>>> A.shape
(3, 4)
>>> 2*A*B + Identity(3)
I + 2*A*B
"""
is_commutative = False
def __new__(cls, name, n, m):
n, m = sympify(n), sympify(m)
obj = Basic.__new__(cls, name, n, m)
return obj
def _hashable_content(self):
return(self.name, self.shape)
@property
def shape(self):
return self.args[1:3]
@property
def name(self):
return self.args[0]
def _eval_subs(self, old, new):
# only do substitutions in shape
shape = Tuple(*self.shape)._subs(old, new)
return MatrixSymbol(self.name, *shape)
def __call__(self, *args):
raise TypeError( "%s object is not callable" % self.__class__ )
def _entry(self, i, j):
return MatrixElement(self, i, j)
@property
def free_symbols(self):
return set((self,))
def doit(self, **hints):
if hints.get('deep', True):
return type(self)(self.name, self.args[1].doit(**hints),
self.args[2].doit(**hints))
else:
return self
def _eval_simplify(self, **kwargs):
return self
class Identity(MatrixExpr):
"""The Matrix Identity I - multiplicative identity
>>> from sympy.matrices import Identity, MatrixSymbol
>>> A = MatrixSymbol('A', 3, 5)
>>> I = Identity(3)
>>> I*A
A
"""
is_Identity = True
def __new__(cls, n):
return super(Identity, cls).__new__(cls, sympify(n))
@property
def rows(self):
return self.args[0]
@property
def cols(self):
return self.args[0]
@property
def shape(self):
return (self.args[0], self.args[0])
def _eval_transpose(self):
return self
def _eval_trace(self):
return self.rows
def _eval_inverse(self):
return self
def conjugate(self):
return self
def _entry(self, i, j):
if i == j:
return S.One
else:
return S.Zero
def _eval_determinant(self):
return S.One
class ZeroMatrix(MatrixExpr):
"""The Matrix Zero 0 - additive identity
>>> from sympy import MatrixSymbol, ZeroMatrix
>>> A = MatrixSymbol('A', 3, 5)
>>> Z = ZeroMatrix(3, 5)
>>> A+Z
A
>>> Z*A.T
0
"""
is_ZeroMatrix = True
def __new__(cls, m, n):
return super(ZeroMatrix, cls).__new__(cls, m, n)
@property
def shape(self):
return (self.args[0], self.args[1])
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
if other != 1 and not self.is_square:
raise ShapeError("Power of non-square matrix %s" % self)
if other == 0:
return Identity(self.rows)
return self
def _eval_transpose(self):
return ZeroMatrix(self.cols, self.rows)
def _eval_trace(self):
return S.Zero
def _eval_determinant(self):
return S.Zero
def conjugate(self):
return self
def _entry(self, i, j):
return S.Zero
def __nonzero__(self):
return False
__bool__ = __nonzero__
def matrix_symbols(expr):
return [sym for sym in expr.free_symbols if sym.is_Matrix]
from .matmul import MatMul
from .matadd import MatAdd
from .matpow import MatPow
from .transpose import Transpose
from .inverse import Inverse
|
|
# For lazy imports
genshi = None
genshi_loader = dict()
simplejson = None
kid = None
mako = None
mako_lookup = None
kajiki_loader = None
try:
from pkg_resources import resource_filename
except ImportError:
def resource_filename(module, filename):
names = module.split(".")+ [filename]
pathname = os.path.join(*names)
return pathname
import os.path
import logging
import cherrypy
from cherrypy import request, response
from gearshift import config
from gearshift.util import (
get_template_encoding_default,
get_mime_type_for_format, mime_type_has_charset, Bunch)
from gearshift.view import stdvars
from gearshift.i18n import gettext, lazy_gettext
log = logging.getLogger("gearshift.expose")
engines = dict()
def render_kajiki(template=None, info=None, format=None, fragment=False, mapping=None):
global kajiki_loader
if kajiki_loader is None:
# Lazy imports of Kajiki
from kajiki import PackageLoader
kajiki_loader = PackageLoader()
Template = kajiki_loader.import_(template)
context = Bunch()
context.update(stdvars())
context.update(info)
templ = Template(context)
return templ.render()
engines['kajiki'] = render_kajiki
def render_genshi(template=None, info=None, format=None, fragment=False, mapping=None):
global genshi, genshi_loader
if genshi is None:
# Lazy imports of Genshi
import genshi
import genshi.template
import genshi.output
import genshi.input
import genshi.filters
def genshi_loader_callback(template):
"""This function will be called by genshi TemplateLoader after
loading the template"""
translator = genshi.filters.Translator(gettext)
# Genshi 0.6 supports translation directives. Lets use them if available.
if hasattr(translator, "setup"):
translator.setup(template)
else:
template.filters.insert(0, translator)
if config.get("i18n.run_template_filter", False):
callback = genshi_loader_callback
else:
callback = None
auto_reload = config.get("genshi.auto_reload", "1")
if isinstance(auto_reload, basestring):
auto_reload = auto_reload.lower() in ('1', 'on', 'yes', 'true')
max_cache_size = config.get("genshi.max_cache_size", 25)
genshi_loader = genshi.template.TemplateLoader([""],
auto_reload=auto_reload,
callback=genshi_loader_callback,
max_cache_size=max_cache_size,
)
# Choose Genshi template engine
if format == "text":
cls = genshi.template.NewTextTemplate
default_extension = "txt"
else:
cls = None # Default is Markup
default_extension = "html"
if "/" in template:
# Path notation. Use first path part as module name
module, pathname = template.split("/", 1)
template = resource_filename(module, pathname)
elif "." in template:
# Dotted notation
module, filename = template.rsplit(".", 1)
filename = '%s.%s' % (filename, default_extension)
template = resource_filename(module, filename)
else:
template = '%s.%s' % (template, default_extension)
encoding = config.get("genshi.encoding", "utf-8")
templ = genshi_loader.load(template, encoding=encoding, cls=cls)
if format == 'html' and not fragment:
mapping.setdefault('doctype', config.get('genshi.default_doctype',
'html-transitional'))
serializer = genshi.output.get_serializer(format, **mapping)
extras = {
'XML' : genshi.input.XML,
'HTML' : genshi.input.HTML,
'ET' : genshi.input.ET,
'_' : lazy_gettext
}
context = genshi.template.Context(**extras)
context.push(stdvars())
context.push(info)
stream = templ.generate(context)
if config.get('genshi.html_form_filler', False):
stream = stream | genshi.filters.HTMLFormFiller(data=info)
encode = genshi.output.encode
return encode(serializer(stream), method=serializer, encoding=encoding)
engines['genshi'] = render_genshi
def default_json(obj):
if hasattr(obj, '__json__'):
return obj.__json__()
return ""
def render_json(template=None, info=None, format=None, fragment=False, mapping=None):
"""Engine for JSON. Misses most of the features of TurboJSON, but this
one works on the Google App Engine
"""
global simplejson
if not simplejson:
try:
from django.utils import simplejson # App Engine friendly
except ImportError:
import simplejson
# filter info parameters
info = dict([(key, info[key]) for key in info.keys() if not (key.startswith("tg_") and key != "tg_flash")])
return simplejson.dumps(info, default=default_json)
engines['json'] = render_json
def render_kid(template=None, info=None, format=None, fragment=False, mapping=None):
"""We need kid support in order to get some of the tests working
"""
global kid
if kid is None:
import kid
extension = "kid"
if "." in template:
module, filename = template.rsplit(".", 1)
filename = '%s.%s' % (filename, extension)
template = resource_filename(module, filename)
else:
template = '%s.%s' % (template, extension)
template = kid.Template(file=template, fragment=fragment, **info)
return template.serialize()
engines['kid'] = render_kid
def render_mako(template=None, info=None, format=None, fragment=False, mapping=None):
global mako, mako_lookup
if mako is None:
import mako
import mako.lookup
mako_lookup = mako.lookup.TemplateLookup(directories=[''])
extension = format
if "." in template:
module, filename = template.rsplit(".", 1)
filename = '%s.%s' % (filename, extension)
template = resource_filename(module, filename)
else:
template = '%s.%s' % (template, extension)
templ = mako_lookup.get_template(template)
try:
ret = templ.render(**info)
except Exception:
ret = mako.exceptions.html_error_template().render()
return ret
engines['mako'] = render_mako
def _choose_engine(template):
if isinstance(template, basestring):
colon = template.find(":")
if colon > -1:
enginename = template[:colon]
template = template[colon+1:]
else:
engine = engines.get(template, None)
if engine:
return engine, None, template
enginename = config.get("tg.defaultview", "genshi")
else:
enginename = config.get("tg.defaultview", "genshi")
engine = engines.get(enginename, None)
if not engine:
raise KeyError, \
"Template engine %s is not installed" % enginename
return engine, template, enginename
def render(info, template=None, format=None, headers=None, mapping=None,
fragment=False):
"""Renders data in the desired format.
@param info: the data itself
@type info: dict
@param format: "html", "xml", "text" or "json"
@type format: string
@param headers: for response headers, primarily the content type
@type headers: dict
@param fragment: passed through to tell the template if only a
fragment of a page is desired
@type fragment: bool
@param template: name of the template to use
@type template: string
"""
# What's this stuff for? Just for testing?
environ = getattr(cherrypy.request, 'wsgi_environ', {})
if environ.get('paste.testing', False):
cherrypy.request.wsgi_environ['paste.testing_variables']['raw'] = info
template = format == 'json' and 'json' or info.pop("tg_template", template)
engine, template, enginename = _choose_engine(template)
if format:
if format == 'plain':
if enginename == 'genshi':
format = 'text'
elif format == 'text':
if enginename == 'kid':
format = 'plain'
else:
format = enginename == 'json' and 'json' or config.get(
"%s.outputformat" % enginename,
config.get("%s.default_format" % enginename, 'html'))
if isinstance(headers, dict):
# Determine the proper content type and charset for the response.
# We simply derive the content type from the format here
# and use the charset specified in the configuration setting.
# This could be improved by also examining the engine and the output.
content_type = headers.get('Content-Type')
if not content_type:
if format:
content_format = format
if isinstance(content_format, (tuple, list)):
content_format = content_format[0]
if isinstance(content_format, str):
content_format = content_format.split(
)[0].split('-' , 1)[0].lower()
else:
content_format = 'html'
else:
content_format = 'html'
content_type = get_mime_type_for_format(content_format)
if mime_type_has_charset(
content_type) and '; charset=' not in content_type:
charset = get_template_encoding_default(enginename)
if charset:
content_type += '; charset=' + charset
headers['Content-Type'] = content_type
mapping = mapping or dict()
return engine(info=info, format=format, fragment=fragment,
template=template, mapping=mapping)
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'DataDomainIndex.groups'
db.delete_column(u'profiles_datadomainindex', 'groups_id')
# Adding field 'DataDomainIndex.group'
db.add_column(u'profiles_datadomainindex', 'group', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['profiles.Group']), keep_default=False)
def backwards(self, orm):
# Adding field 'DataDomainIndex.groups'
db.add_column(u'profiles_datadomainindex', 'groups', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['profiles.Group']), keep_default=False)
# Deleting field 'DataDomainIndex.group'
db.delete_column(u'profiles_datadomainindex', 'group_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 11, 11, 4, 51, 765794)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 11, 11, 4, 51, 764476)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maps.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geo_meta_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geom_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'shape_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zoom_threshold': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'profiles.customvalue': {
'Meta': {'object_name': 'CustomValue'},
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_value': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'supress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_operator': ('django.db.models.fields.CharField', [], {'max_length': "'255'"})
},
u'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'order': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'subdomain_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subdomains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.datadomainindex': {
'Meta': {'ordering': "['order']", 'object_name': 'DataDomainIndex'},
'dataDomain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']", 'null': 'True'})
},
u'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.IndicatorPart']"})
},
u'profiles.flatvalue': {
'Meta': {'object_name': 'FlatValue'},
'display_title': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'f_moe': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_number': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_percent': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'geography': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'geography_name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'geography_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'geometry_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'indicator_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'time_key': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'value_type': ('django.db.models.fields.CharField', [], {'max_length': "'100'"})
},
u'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataSource']", 'symmetrical': 'False', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'shapefile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.ShapeFile']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}),
'summary_level': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
u'profiles.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
u'profiles.groupindex': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupIndex'},
'groups': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': u"orm['profiles.Indicator']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_generated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'U.S. Census Bureau'", 'max_length': '300', 'blank': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
u'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']"})
},
u'profiles.legendoption': {
'Meta': {'object_name': 'LegendOption'},
'bin_options': ('django.db.models.fields.TextField', [], {'default': "''"}),
'bin_type': ('django.db.models.fields.CharField', [], {'default': "'jenks'", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.precalculatedvalue': {
'Meta': {'object_name': 'PrecalculatedValue'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'geo_record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'table': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'profiles.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
't_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.time': {
'Meta': {'object_name': 'Time'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
u'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
|
|
import atexit
from collections import defaultdict
from multiprocessing.pool import ThreadPool
import threading
import ray
from dask.core import istask, ishashable, _execute_task
from dask.local import get_async, apply_sync
from dask.system import CPU_COUNT
from dask.threaded import pack_exception, _thread_get_id
from .callbacks import local_ray_callbacks, unpack_ray_callbacks
from .common import unpack_object_refs
main_thread = threading.current_thread()
default_pool = None
pools = defaultdict(dict)
pools_lock = threading.Lock()
def ray_dask_get(dsk, keys, **kwargs):
"""
A Dask-Ray scheduler. This scheduler will send top-level (non-inlined) Dask
tasks to a Ray cluster for execution. The scheduler will wait for the
tasks to finish executing, fetch the results, and repackage them into the
appropriate Dask collections. This particular scheduler uses a threadpool
to submit Ray tasks.
This can be passed directly to `dask.compute()`, as the scheduler:
>>> dask.compute(obj, scheduler=ray_dask_get)
You can override the currently active global Dask-Ray callbacks (e.g.
supplied via a context manager), the number of threads to use when
submitting the Ray tasks, or the threadpool used to submit Ray tasks:
>>> dask.compute(
obj,
scheduler=ray_dask_get,
ray_callbacks=some_ray_dask_callbacks,
num_workers=8,
pool=some_cool_pool,
)
Args:
dsk (Dict): Dask graph, represented as a task DAG dictionary.
keys (List[str]): List of Dask graph keys whose values we wish to
compute and return.
ray_callbacks (Optional[list[callable]]): Dask-Ray callbacks.
num_workers (Optional[int]): The number of worker threads to use in
the Ray task submission traversal of the Dask graph.
pool (Optional[ThreadPool]): A multiprocessing threadpool to use to
submit Ray tasks.
Returns:
Computed values corresponding to the provided keys.
"""
num_workers = kwargs.pop("num_workers", None)
pool = kwargs.pop("pool", None)
# We attempt to reuse any other thread pools that have been created within
# this thread and with the given number of workers. We reuse a global
# thread pool if num_workers is not given and we're in the main thread.
global default_pool
thread = threading.current_thread()
if pool is None:
with pools_lock:
if num_workers is None and thread is main_thread:
if default_pool is None:
default_pool = ThreadPool(CPU_COUNT)
atexit.register(default_pool.close)
pool = default_pool
elif thread in pools and num_workers in pools[thread]:
pool = pools[thread][num_workers]
else:
pool = ThreadPool(num_workers)
atexit.register(pool.close)
pools[thread][num_workers] = pool
ray_callbacks = kwargs.pop("ray_callbacks", None)
with local_ray_callbacks(ray_callbacks) as ray_callbacks:
# Unpack the Ray-specific callbacks.
(
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
ray_postsubmit_all_cbs,
ray_finish_cbs,
) = unpack_ray_callbacks(ray_callbacks)
# NOTE: We hijack Dask's `get_async` function, injecting a different
# task executor.
object_refs = get_async(
_apply_async_wrapper(
pool.apply_async,
_rayify_task_wrapper,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
),
len(pool._pool),
dsk,
keys,
get_id=_thread_get_id,
pack_exception=pack_exception,
**kwargs,
)
if ray_postsubmit_all_cbs is not None:
for cb in ray_postsubmit_all_cbs:
cb(object_refs, dsk)
# NOTE: We explicitly delete the Dask graph here so object references
# are garbage-collected before this function returns, i.e. before all
# Ray tasks are done. Otherwise, no intermediate objects will be
# cleaned up until all Ray tasks are done.
del dsk
result = ray_get_unpack(object_refs)
if ray_finish_cbs is not None:
for cb in ray_finish_cbs:
cb(result)
# cleanup pools associated with dead threads.
with pools_lock:
active_threads = set(threading.enumerate())
if thread is not main_thread:
for t in list(pools):
if t not in active_threads:
for p in pools.pop(t).values():
p.close()
return result
def _apply_async_wrapper(apply_async, real_func, *extra_args, **extra_kwargs):
"""
Wraps the given pool `apply_async` function, hotswapping `real_func` in as
the function to be applied and adding `extra_args` and `extra_kwargs` to
`real_func`'s call.
Args:
apply_async (callable): The pool function to be wrapped.
real_func (callable): The real function that we wish the pool apply
function to execute.
*extra_args: Extra positional arguments to pass to the `real_func`.
**extra_kwargs: Extra keyword arguments to pass to the `real_func`.
Returns:
A wrapper function that will ignore it's first `func` argument and
pass `real_func` in its place. To be passed to `dask.local.get_async`.
"""
def wrapper(func, args=(), kwds={}, callback=None): # noqa: M511
return apply_async(
real_func,
args=args + extra_args,
kwds=dict(kwds, **extra_kwargs),
callback=callback,
)
return wrapper
def _rayify_task_wrapper(
key,
task_info,
dumps,
loads,
get_id,
pack_exception,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
):
"""
The core Ray-Dask task execution wrapper, to be given to the thread pool's
`apply_async` function. Exactly the same as `execute_task`, except that it
calls `_rayify_task` on the task instead of `_execute_task`.
Args:
key (str): The Dask graph key whose corresponding task we wish to
execute.
task_info: The task to execute and its dependencies.
dumps (callable): A result serializing function.
loads (callable): A task_info deserializing function.
get_id (callable): An ID generating function.
pack_exception (callable): An exception serializing function.
ray_presubmit_cbs (callable): Pre-task submission callbacks.
ray_postsubmit_cbs (callable): Post-task submission callbacks.
ray_pretask_cbs (callable): Pre-task execution callbacks.
ray_posttask_cbs (callable): Post-task execution callbacks.
Returns:
A 3-tuple of the task's key, a literal or a Ray object reference for a
Ray task's result, and whether the Ray task submission failed.
"""
try:
task, deps = loads(task_info)
result = _rayify_task(
task,
key,
deps,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
)
id = get_id()
result = dumps((result, id))
failed = False
except BaseException as e:
result = pack_exception(e, dumps)
failed = True
return key, result, failed
def _rayify_task(
task,
key,
deps,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
):
"""
Rayifies the given task, submitting it as a Ray task to the Ray cluster.
Args:
task (tuple): A Dask graph value, being either a literal, dependency
key, Dask task, or a list thereof.
key (str): The Dask graph key for the given task.
deps (dict): The dependencies of this task.
ray_presubmit_cbs (callable): Pre-task submission callbacks.
ray_postsubmit_cbs (callable): Post-task submission callbacks.
ray_pretask_cbs (callable): Pre-task execution callbacks.
ray_posttask_cbs (callable): Post-task execution callbacks.
Returns:
A literal, a Ray object reference representing a submitted task, or a
list thereof.
"""
if isinstance(task, list):
# Recursively rayify this list. This will still bottom out at the first
# actual task encountered, inlining any tasks in that task's arguments.
return [
_rayify_task(
t,
key,
deps,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
) for t in task
]
elif istask(task):
# Unpacks and repacks Ray object references and submits the task to the
# Ray cluster for execution.
if ray_presubmit_cbs is not None:
alternate_returns = [
cb(task, key, deps) for cb in ray_presubmit_cbs
]
for alternate_return in alternate_returns:
# We don't submit a Ray task if a presubmit callback returns
# a non-`None` value, instead we return said value.
# NOTE: This returns the first non-None presubmit callback
# return value.
if alternate_return is not None:
return alternate_return
func, args = task[0], task[1:]
# If the function's arguments contain nested object references, we must
# unpack said object references into a flat set of arguments so that
# Ray properly tracks the object dependencies between Ray tasks.
object_refs, repack = unpack_object_refs(args, deps)
# Submit the task using a wrapper function.
object_ref = dask_task_wrapper.options(name=f"dask:{key!s}").remote(
func, repack, key, ray_pretask_cbs, ray_posttask_cbs, *object_refs)
if ray_postsubmit_cbs is not None:
for cb in ray_postsubmit_cbs:
cb(task, key, deps, object_ref)
return object_ref
elif not ishashable(task):
return task
elif task in deps:
return deps[task]
else:
return task
@ray.remote
def dask_task_wrapper(func, repack, key, ray_pretask_cbs, ray_posttask_cbs,
*args):
"""
A Ray remote function acting as a Dask task wrapper. This function will
repackage the given flat `args` into its original data structures using
`repack`, execute any Dask subtasks within the repackaged arguments
(inlined by Dask's optimization pass), and then pass the concrete task
arguments to the provide Dask task function, `func`.
Args:
func (callable): The Dask task function to execute.
repack (callable): A function that repackages the provided args into
the original (possibly nested) Python objects.
key (str): The Dask key for this task.
ray_pretask_cbs (callable): Pre-task execution callbacks.
ray_posttask_cbs (callable): Post-task execution callback.
*args (ObjectRef): Ray object references representing the Dask task's
arguments.
Returns:
The output of the Dask task. In the context of Ray, a
dask_task_wrapper.remote() invocation will return a Ray object
reference representing the Ray task's result.
"""
if ray_pretask_cbs is not None:
pre_states = [
cb(key, args) if cb is not None else None for cb in ray_pretask_cbs
]
repacked_args, repacked_deps = repack(args)
# Recursively execute Dask-inlined tasks.
actual_args = [_execute_task(a, repacked_deps) for a in repacked_args]
# Execute the actual underlying Dask task.
result = func(*actual_args)
if ray_posttask_cbs is not None:
for cb, pre_state in zip(ray_posttask_cbs, pre_states):
if cb is not None:
cb(key, result, pre_state)
return result
def ray_get_unpack(object_refs):
"""
Unpacks object references, gets the object references, and repacks.
Traverses arbitrary data structures.
Args:
object_refs: A (potentially nested) Python object containing Ray object
references.
Returns:
The input Python object with all contained Ray object references
resolved with their concrete values.
"""
if isinstance(object_refs, tuple):
object_refs = list(object_refs)
if isinstance(object_refs, list) and any(not isinstance(x, ray.ObjectRef)
for x in object_refs):
# We flatten the object references before calling ray.get(), since Dask
# loves to nest collections in nested tuples and Ray expects a flat
# list of object references. We repack the results after ray.get()
# completes.
object_refs, repack = unpack_object_refs(*object_refs)
computed_result = ray.get(object_refs)
return repack(computed_result)
else:
return ray.get(object_refs)
def ray_dask_get_sync(dsk, keys, **kwargs):
"""
A synchronous Dask-Ray scheduler. This scheduler will send top-level
(non-inlined) Dask tasks to a Ray cluster for execution. The scheduler will
wait for the tasks to finish executing, fetch the results, and repackage
them into the appropriate Dask collections. This particular scheduler
submits Ray tasks synchronously, which can be useful for debugging.
This can be passed directly to `dask.compute()`, as the scheduler:
>>> dask.compute(obj, scheduler=ray_dask_get_sync)
You can override the currently active global Dask-Ray callbacks (e.g.
supplied via a context manager):
>>> dask.compute(
obj,
scheduler=ray_dask_get_sync,
ray_callbacks=some_ray_dask_callbacks,
)
Args:
dsk (Dict): Dask graph, represented as a task DAG dictionary.
keys (List[str]): List of Dask graph keys whose values we wish to
compute and return.
Returns:
Computed values corresponding to the provided keys.
"""
ray_callbacks = kwargs.pop("ray_callbacks", None)
with local_ray_callbacks(ray_callbacks) as ray_callbacks:
# Unpack the Ray-specific callbacks.
(
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
ray_postsubmit_all_cbs,
ray_finish_cbs,
) = unpack_ray_callbacks(ray_callbacks)
# NOTE: We hijack Dask's `get_async` function, injecting a different
# task executor.
object_refs = get_async(
_apply_async_wrapper(
apply_sync,
_rayify_task_wrapper,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
),
1,
dsk,
keys,
**kwargs,
)
if ray_postsubmit_all_cbs is not None:
for cb in ray_postsubmit_all_cbs:
cb(object_refs, dsk)
# NOTE: We explicitly delete the Dask graph here so object references
# are garbage-collected before this function returns, i.e. before all
# Ray tasks are done. Otherwise, no intermediate objects will be
# cleaned up until all Ray tasks are done.
del dsk
result = ray_get_unpack(object_refs)
if ray_finish_cbs is not None:
for cb in ray_finish_cbs:
cb(result)
return result
|
|
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_db import exception as db_exc
from nova.db import api as db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova.db.sqlalchemy import models as main_models
from nova import exception
from nova.objects import base
from nova.objects import fields
from nova import quota
def ids_from_instance(context, instance):
if (context.is_admin and
context.project_id != instance['project_id']):
project_id = instance['project_id']
else:
project_id = context.project_id
if context.user_id != instance['user_id']:
user_id = instance['user_id']
else:
user_id = context.user_id
return project_id, user_id
# TODO(lyj): This method needs to be cleaned up once the
# ids_from_instance helper method is renamed or some common
# method is added for objects.quotas.
def ids_from_security_group(context, security_group):
return ids_from_instance(context, security_group)
# TODO(PhilD): This method needs to be cleaned up once the
# ids_from_instance helper method is renamed or some common
# method is added for objects.quotas.
def ids_from_server_group(context, server_group):
return ids_from_instance(context, server_group)
@base.NovaObjectRegistry.register
class Quotas(base.NovaObject):
# Version 1.0: initial version
# Version 1.1: Added create_limit() and update_limit()
# Version 1.2: Added limit_check() and count()
# Version 1.3: Added check_deltas(), limit_check_project_and_user(),
# and count_as_dict()
VERSION = '1.3'
fields = {
# TODO(melwitt): Remove this field in version 2.0 of the object.
'reservations': fields.ListOfStringsField(nullable=True),
'project_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
}
def __init__(self, *args, **kwargs):
super(Quotas, self).__init__(*args, **kwargs)
# Set up defaults.
self.reservations = []
self.project_id = None
self.user_id = None
self.obj_reset_changes()
@staticmethod
@db_api.api_context_manager.reader
def _get_from_db(context, project_id, resource, user_id=None):
model = api_models.ProjectUserQuota if user_id else api_models.Quota
query = context.session.query(model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
query = query.filter_by(user_id=user_id)
result = query.first()
if not result:
if user_id:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@staticmethod
@db_api.api_context_manager.reader
def _get_all_from_db(context, project_id):
return context.session.query(api_models.ProjectUserQuota).\
filter_by(project_id=project_id).\
all()
@staticmethod
@db_api.api_context_manager.reader
def _get_all_from_db_by_project(context, project_id):
# by_project refers to the returned dict that has a 'project_id' key
rows = context.session.query(api_models.Quota).\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@staticmethod
@db_api.api_context_manager.reader
def _get_all_from_db_by_project_and_user(context, project_id, user_id):
# by_project_and_user refers to the returned dict that has
# 'project_id' and 'user_id' keys
columns = (api_models.ProjectUserQuota.resource,
api_models.ProjectUserQuota.hard_limit)
user_quotas = context.session.query(*columns).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
all()
result = {'project_id': project_id, 'user_id': user_id}
for user_quota in user_quotas:
result[user_quota.resource] = user_quota.hard_limit
return result
@staticmethod
@db_api.api_context_manager.writer
def _destroy_all_in_db_by_project(context, project_id):
per_project = context.session.query(api_models.Quota).\
filter_by(project_id=project_id).\
delete(synchronize_session=False)
per_user = context.session.query(api_models.ProjectUserQuota).\
filter_by(project_id=project_id).\
delete(synchronize_session=False)
if not per_project and not per_user:
raise exception.ProjectQuotaNotFound(project_id=project_id)
@staticmethod
@db_api.api_context_manager.writer
def _destroy_all_in_db_by_project_and_user(context, project_id, user_id):
result = context.session.query(api_models.ProjectUserQuota).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
delete(synchronize_session=False)
if not result:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
@staticmethod
@db_api.api_context_manager.reader
def _get_class_from_db(context, class_name, resource):
result = context.session.query(api_models.QuotaClass).\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
@staticmethod
@db_api.api_context_manager.reader
def _get_all_class_from_db_by_name(context, class_name):
# by_name refers to the returned dict that has a 'class_name' key
rows = context.session.query(api_models.QuotaClass).\
filter_by(class_name=class_name).\
all()
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
@staticmethod
@db_api.api_context_manager.writer
def _create_limit_in_db(context, project_id, resource, limit,
user_id=None):
# TODO(melwitt): We won't have per project resources after nova-network
# is removed.
per_user = (user_id and
resource not in db_api.quota_get_per_project_resources())
quota_ref = (api_models.ProjectUserQuota() if per_user
else api_models.Quota())
if per_user:
quota_ref.user_id = user_id
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
try:
quota_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.QuotaExists(project_id=project_id,
resource=resource)
return quota_ref
@staticmethod
@db_api.api_context_manager.writer
def _update_limit_in_db(context, project_id, resource, limit,
user_id=None):
# TODO(melwitt): We won't have per project resources after nova-network
# is removed.
per_user = (user_id and
resource not in db_api.quota_get_per_project_resources())
model = api_models.ProjectUserQuota if per_user else api_models.Quota
query = context.session.query(model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if per_user:
query = query.filter_by(user_id=user_id)
result = query.update({'hard_limit': limit})
if not result:
if per_user:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
@staticmethod
@db_api.api_context_manager.writer
def _create_class_in_db(context, class_name, resource, limit):
# NOTE(melwitt): There's no unique constraint on the QuotaClass model,
# so check for duplicate manually.
try:
Quotas._get_class_from_db(context, class_name, resource)
except exception.QuotaClassNotFound:
pass
else:
raise exception.QuotaClassExists(class_name=class_name,
resource=resource)
quota_class_ref = api_models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
quota_class_ref.save(context.session)
return quota_class_ref
@staticmethod
@db_api.api_context_manager.writer
def _update_class_in_db(context, class_name, resource, limit):
result = context.session.query(api_models.QuotaClass).\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
update({'hard_limit': limit})
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
# TODO(melwitt): Remove this method in version 2.0 of the object.
@base.remotable
def reserve(self, expire=None, project_id=None, user_id=None,
**deltas):
# Honor the expected attributes even though we're not reserving
# anything anymore. This will protect against things exploding if
# someone has an Ocata compute host running by accident, for example.
self.reservations = None
self.project_id = project_id
self.user_id = user_id
self.obj_reset_changes()
# TODO(melwitt): Remove this method in version 2.0 of the object.
@base.remotable
def commit(self):
pass
# TODO(melwitt): Remove this method in version 2.0 of the object.
@base.remotable
def rollback(self):
pass
@base.remotable_classmethod
def limit_check(cls, context, project_id=None, user_id=None, **values):
"""Check quota limits."""
return quota.QUOTAS.limit_check(
context, project_id=project_id, user_id=user_id, **values)
@base.remotable_classmethod
def limit_check_project_and_user(cls, context, project_values=None,
user_values=None, project_id=None,
user_id=None):
"""Check values against quota limits."""
return quota.QUOTAS.limit_check_project_and_user(context,
project_values=project_values, user_values=user_values,
project_id=project_id, user_id=user_id)
# NOTE(melwitt): This can be removed once no old code can call count().
@base.remotable_classmethod
def count(cls, context, resource, *args, **kwargs):
"""Count a resource."""
count = quota.QUOTAS.count_as_dict(context, resource, *args, **kwargs)
key = 'user' if 'user' in count else 'project'
return count[key][resource]
@base.remotable_classmethod
def count_as_dict(cls, context, resource, *args, **kwargs):
"""Count a resource and return a dict."""
return quota.QUOTAS.count_as_dict(
context, resource, *args, **kwargs)
@base.remotable_classmethod
def check_deltas(cls, context, deltas, *count_args, **count_kwargs):
"""Check usage delta against quota limits.
This does a Quotas.count_as_dict() followed by a
Quotas.limit_check_project_and_user() using the provided deltas.
:param context: The request context, for access checks
:param deltas: A dict of {resource_name: delta, ...} to check against
the quota limits
:param count_args: Optional positional arguments to pass to
count_as_dict()
:param count_kwargs: Optional keyword arguments to pass to
count_as_dict()
:param check_project_id: Optional project_id for scoping the limit
check to a different project than in the
context
:param check_user_id: Optional user_id for scoping the limit check to a
different user than in the context
:raises: exception.OverQuota if the limit check exceeds the quota
limits
"""
# We can't do f(*args, kw=None, **kwargs) in python 2.x
check_project_id = count_kwargs.pop('check_project_id', None)
check_user_id = count_kwargs.pop('check_user_id', None)
check_kwargs = collections.defaultdict(dict)
for resource in deltas:
# If we already counted a resource in a batch count, avoid
# unnecessary re-counting and avoid creating empty dicts in
# the defaultdict.
if (resource in check_kwargs.get('project_values', {}) or
resource in check_kwargs.get('user_values', {})):
continue
count = cls.count_as_dict(context, resource, *count_args,
**count_kwargs)
for res in count.get('project', {}):
if res in deltas:
total = count['project'][res] + deltas[res]
check_kwargs['project_values'][res] = total
for res in count.get('user', {}):
if res in deltas:
total = count['user'][res] + deltas[res]
check_kwargs['user_values'][res] = total
if check_project_id is not None:
check_kwargs['project_id'] = check_project_id
if check_user_id is not None:
check_kwargs['user_id'] = check_user_id
try:
cls.limit_check_project_and_user(context, **check_kwargs)
except exception.OverQuota as exc:
# Report usage in the exception when going over quota
key = 'user' if 'user' in count else 'project'
exc.kwargs['usages'] = count[key]
raise exc
@base.remotable_classmethod
def create_limit(cls, context, project_id, resource, limit, user_id=None):
try:
db.quota_get(context, project_id, resource, user_id=user_id)
except exception.QuotaNotFound:
cls._create_limit_in_db(context, project_id, resource, limit,
user_id=user_id)
else:
raise exception.QuotaExists(project_id=project_id,
resource=resource)
@base.remotable_classmethod
def update_limit(cls, context, project_id, resource, limit, user_id=None):
try:
cls._update_limit_in_db(context, project_id, resource, limit,
user_id=user_id)
except exception.QuotaNotFound:
db.quota_update(context, project_id, resource, limit,
user_id=user_id)
@classmethod
def create_class(cls, context, class_name, resource, limit):
try:
db.quota_class_get(context, class_name, resource)
except exception.QuotaClassNotFound:
cls._create_class_in_db(context, class_name, resource, limit)
else:
raise exception.QuotaClassExists(class_name=class_name,
resource=resource)
@classmethod
def update_class(cls, context, class_name, resource, limit):
try:
cls._update_class_in_db(context, class_name, resource, limit)
except exception.QuotaClassNotFound:
db.quota_class_update(context, class_name, resource, limit)
# NOTE(melwitt): The following methods are not remotable and return
# dict-like database model objects. We are using classmethods to provide
# a common interface for accessing the api/main databases.
@classmethod
def get(cls, context, project_id, resource, user_id=None):
try:
quota = cls._get_from_db(context, project_id, resource,
user_id=user_id)
except exception.QuotaNotFound:
quota = db.quota_get(context, project_id, resource,
user_id=user_id)
return quota
@classmethod
def get_all(cls, context, project_id):
api_db_quotas = cls._get_all_from_db(context, project_id)
main_db_quotas = db.quota_get_all(context, project_id)
return api_db_quotas + main_db_quotas
@classmethod
def get_all_by_project(cls, context, project_id):
api_db_quotas_dict = cls._get_all_from_db_by_project(context,
project_id)
main_db_quotas_dict = db.quota_get_all_by_project(context, project_id)
for k, v in api_db_quotas_dict.items():
main_db_quotas_dict[k] = v
return main_db_quotas_dict
@classmethod
def get_all_by_project_and_user(cls, context, project_id, user_id):
api_db_quotas_dict = cls._get_all_from_db_by_project_and_user(
context, project_id, user_id)
main_db_quotas_dict = db.quota_get_all_by_project_and_user(
context, project_id, user_id)
for k, v in api_db_quotas_dict.items():
main_db_quotas_dict[k] = v
return main_db_quotas_dict
@classmethod
def destroy_all_by_project(cls, context, project_id):
try:
cls._destroy_all_in_db_by_project(context, project_id)
except exception.ProjectQuotaNotFound:
db.quota_destroy_all_by_project(context, project_id)
@classmethod
def destroy_all_by_project_and_user(cls, context, project_id, user_id):
try:
cls._destroy_all_in_db_by_project_and_user(context, project_id,
user_id)
except exception.ProjectUserQuotaNotFound:
db.quota_destroy_all_by_project_and_user(context, project_id,
user_id)
@classmethod
def get_class(cls, context, class_name, resource):
try:
qclass = cls._get_class_from_db(context, class_name, resource)
except exception.QuotaClassNotFound:
qclass = db.quota_class_get(context, class_name, resource)
return qclass
@classmethod
def get_default_class(cls, context):
try:
qclass = cls._get_all_class_from_db_by_name(
context, db_api._DEFAULT_QUOTA_NAME)
except exception.QuotaClassNotFound:
qclass = db.quota_class_get_default(context)
return qclass
@classmethod
def get_all_class_by_name(cls, context, class_name):
api_db_quotas_dict = cls._get_all_class_from_db_by_name(context,
class_name)
main_db_quotas_dict = db.quota_class_get_all_by_name(context,
class_name)
for k, v in api_db_quotas_dict.items():
main_db_quotas_dict[k] = v
return main_db_quotas_dict
@base.NovaObjectRegistry.register
class QuotasNoOp(Quotas):
# TODO(melwitt): Remove this method in version 2.0 of the object.
def reserve(context, expire=None, project_id=None, user_id=None,
**deltas):
pass
# TODO(melwitt): Remove this method in version 2.0 of the object.
def commit(self, context=None):
pass
# TODO(melwitt): Remove this method in version 2.0 of the object.
def rollback(self, context=None):
pass
def check_deltas(cls, context, deltas, *count_args, **count_kwargs):
pass
@db_api.require_context
@db_api.pick_context_manager_reader
def _get_main_per_project_limits(context, limit):
return context.session.query(main_models.Quota).\
filter_by(deleted=0).\
limit(limit).\
all()
@db_api.require_context
@db_api.pick_context_manager_reader
def _get_main_per_user_limits(context, limit):
return context.session.query(main_models.ProjectUserQuota).\
filter_by(deleted=0).\
limit(limit).\
all()
@db_api.require_context
@db_api.pick_context_manager_writer
def _destroy_main_per_project_limits(context, project_id, resource):
context.session.query(main_models.Quota).\
filter_by(deleted=0).\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
soft_delete(synchronize_session=False)
@db_api.require_context
@db_api.pick_context_manager_writer
def _destroy_main_per_user_limits(context, project_id, resource, user_id):
context.session.query(main_models.ProjectUserQuota).\
filter_by(deleted=0).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
filter_by(resource=resource).\
soft_delete(synchronize_session=False)
@db_api.api_context_manager.writer
def _create_limits_in_api_db(context, db_limits, per_user=False):
for db_limit in db_limits:
user_id = db_limit.user_id if per_user else None
Quotas._create_limit_in_db(context, db_limit.project_id,
db_limit.resource, db_limit.hard_limit,
user_id=user_id)
def migrate_quota_limits_to_api_db(context, count):
# Migrate per project limits
main_per_project_limits = _get_main_per_project_limits(context, count)
done = 0
try:
# Create all the limits in a single transaction.
_create_limits_in_api_db(context, main_per_project_limits)
except exception.QuotaExists:
# NOTE(melwitt): This can happen if the migration is interrupted after
# limits were created in the api db but before they were deleted from
# the main db, and the migration is re-run.
pass
# Delete the limits separately.
for db_limit in main_per_project_limits:
_destroy_main_per_project_limits(context, db_limit.project_id,
db_limit.resource)
done += 1
if done == count:
return len(main_per_project_limits), done
# Migrate per user limits
count -= done
main_per_user_limits = _get_main_per_user_limits(context, count)
try:
# Create all the limits in a single transaction.
_create_limits_in_api_db(context, main_per_user_limits, per_user=True)
except exception.QuotaExists:
# NOTE(melwitt): This can happen if the migration is interrupted after
# limits were created in the api db but before they were deleted from
# the main db, and the migration is re-run.
pass
# Delete the limits separately.
for db_limit in main_per_user_limits:
_destroy_main_per_user_limits(context, db_limit.project_id,
db_limit.resource, db_limit.user_id)
done += 1
return len(main_per_project_limits) + len(main_per_user_limits), done
@db_api.require_context
@db_api.pick_context_manager_reader
def _get_main_quota_classes(context, limit):
return context.session.query(main_models.QuotaClass).\
filter_by(deleted=0).\
limit(limit).\
all()
@db_api.pick_context_manager_writer
def _destroy_main_quota_classes(context, db_classes):
for db_class in db_classes:
context.session.query(main_models.QuotaClass).\
filter_by(deleted=0).\
filter_by(id=db_class.id).\
soft_delete(synchronize_session=False)
@db_api.api_context_manager.writer
def _create_classes_in_api_db(context, db_classes):
for db_class in db_classes:
Quotas._create_class_in_db(context, db_class.class_name,
db_class.resource, db_class.hard_limit)
def migrate_quota_classes_to_api_db(context, count):
main_quota_classes = _get_main_quota_classes(context, count)
done = 0
try:
# Create all the classes in a single transaction.
_create_classes_in_api_db(context, main_quota_classes)
except exception.QuotaClassExists:
# NOTE(melwitt): This can happen if the migration is interrupted after
# classes were created in the api db but before they were deleted from
# the main db, and the migration is re-run.
pass
# Delete the classes in a single transaction.
_destroy_main_quota_classes(context, main_quota_classes)
found = done = len(main_quota_classes)
return found, done
|
|
# Copyright 2017-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import jmespath
import six
from c7n.exceptions import PolicyExecutionError, PolicyValidationError
from c7n import utils
from .core import Action
class ModifyVpcSecurityGroupsAction(Action):
"""Common action for modifying security groups on a vpc attached resources.
Security groups for add or remove can be specified via group id or
name. Group removal also supports symbolic names such as
'matched', 'network-location' or 'all'. 'matched' uses the
annotations/output of the 'security-group' filter
filter. 'network-location' uses the annotations of the
'network-location' interface filter for `SecurityGroupMismatch`.
Note a vpc attached resource requires at least one security group,
this action will use the sg specified in `isolation-group` to ensure
resources always have at least one security-group.
type: modify-security-groups
add: []
remove: [] | matched | network-location
isolation-group: sg-xyz
"""
schema_alias = True
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['modify-security-groups']},
'add': {'oneOf': [
{'type': 'string'},
{'type': 'array', 'items': {
'type': 'string'}}]},
'remove': {'oneOf': [
{'type': 'array', 'items': {
'type': 'string'}},
{'enum': [
'matched', 'network-location', 'all',
{'type': 'string'}]}]},
'isolation-group': {'oneOf': [
{'type': 'string'},
{'type': 'array', 'items': {
'type': 'string'}}]}},
'anyOf': [
{'required': ['isolation-group', 'remove', 'type']},
{'required': ['add', 'remove', 'type']},
{'required': ['add', 'type']}]
}
SYMBOLIC_SGS = set(('all', 'matched', 'network-location'))
sg_expr = None
vpc_expr = None
def validate(self):
sg_filter = self.manager.filter_registry.get('security-group')
if not sg_filter or not sg_filter.RelatedIdsExpression:
raise PolicyValidationError(self._format_error((
"policy:{policy} resource:{resource_type} does "
"not support {action_type} action")))
if self.get_action_group_names():
vpc_filter = self.manager.filter_registry.get('vpc')
if not vpc_filter or not vpc_filter.RelatedIdsExpression:
raise PolicyValidationError(self._format_error((
"policy:{policy} resource:{resource_type} does not support "
"security-group names only ids in action:{action_type}")))
self.vpc_expr = jmespath.compile(vpc_filter.RelatedIdsExpression)
if self.sg_expr is None:
self.sg_expr = jmespath.compile(
self.manager.filter_registry.get('security-group').RelatedIdsExpression)
if 'all' in self._get_array('remove') and not self._get_array('isolation-group'):
raise PolicyValidationError(self._format_error((
"policy:{policy} use of action:{action_type} with "
"remove: all requires specifying isolation-group")))
return self
def get_group_names(self, groups):
names = []
for g in groups:
if g.startswith('sg-'):
continue
elif g in self.SYMBOLIC_SGS:
continue
names.append(g)
return names
def get_action_group_names(self):
"""Return all the security group names configured in this action."""
return self.get_group_names(
list(itertools.chain(
*[self._get_array('add'),
self._get_array('remove'),
self._get_array('isolation-group')])))
def _format_error(self, msg, **kw):
return msg.format(
policy=self.manager.ctx.policy.name,
resource_type=self.manager.type,
action_type=self.type,
**kw)
def _get_array(self, k):
v = self.data.get(k, [])
if isinstance(v, six.string_types):
return [v]
return v
def get_groups_by_names(self, names):
"""Resolve security names to security groups resources."""
if not names:
return []
client = utils.local_session(
self.manager.session_factory).client('ec2')
sgs = self.manager.retry(
client.describe_security_groups,
Filters=[{
'Name': 'group-name', 'Values': names}]).get(
'SecurityGroups', [])
unresolved = set(names)
for s in sgs:
if s['GroupName'] in unresolved:
unresolved.remove(s['GroupName'])
if unresolved:
raise PolicyExecutionError(self._format_error(
"policy:{policy} security groups not found "
"requested: {names}, found: {groups}",
names=list(unresolved), groups=[g['GroupId'] for g in sgs]))
return sgs
def resolve_group_names(self, r, target_group_ids, groups):
"""Resolve any security group names to the corresponding group ids
With the context of a given network attached resource.
"""
names = self.get_group_names(target_group_ids)
if not names:
return target_group_ids
target_group_ids = list(target_group_ids)
vpc_id = self.vpc_expr.search(r)
if not vpc_id:
raise PolicyExecutionError(self._format_error(
"policy:{policy} non vpc attached resource used "
"with modify-security-group: {resource_id}",
resource_id=r[self.manager.resource_type.id]))
found = False
for n in names:
for g in groups:
if g['GroupName'] == n and g['VpcId'] == vpc_id:
found = g['GroupId']
if not found:
raise PolicyExecutionError(self._format_error((
"policy:{policy} could not resolve sg:{name} for "
"resource:{resource_id} in vpc:{vpc}"),
name=n,
resource_id=r[self.manager.resource_type.id], vpc=vpc_id))
target_group_ids.remove(n)
target_group_ids.append(found)
return target_group_ids
def resolve_remove_symbols(self, r, target_group_ids, rgroups):
"""Resolve the resources security groups that need be modified.
Specifically handles symbolic names that match annotations from policy filters
for groups being removed.
"""
if 'matched' in target_group_ids:
return r.get('c7n:matched-security-groups', ())
elif 'network-location' in target_group_ids:
for reason in r.get('c7n:NetworkLocation', ()):
if reason['reason'] == 'SecurityGroupMismatch':
return list(reason['security-groups'])
elif 'all' in target_group_ids:
return rgroups
return target_group_ids
def get_groups(self, resources):
"""Return lists of security groups to set on each resource
For each input resource, parse the various add/remove/isolation-
group policies for 'modify-security-groups' to find the resulting
set of VPC security groups to attach to that resource.
Returns a list of lists containing the resulting VPC security groups
that should end up on each resource passed in.
:param resources: List of resources containing VPC Security Groups
:return: List of lists of security groups per resource
"""
resolved_groups = self.get_groups_by_names(self.get_action_group_names())
return_groups = []
for idx, r in enumerate(resources):
rgroups = self.sg_expr.search(r) or []
add_groups = self.resolve_group_names(
r, self._get_array('add'), resolved_groups)
remove_groups = self.resolve_remove_symbols(
r,
self.resolve_group_names(
r, self._get_array('remove'), resolved_groups),
rgroups)
isolation_groups = self.resolve_group_names(
r, self._get_array('isolation-group'), resolved_groups)
for g in remove_groups:
if g in rgroups:
rgroups.remove(g)
for g in add_groups:
if g not in rgroups:
rgroups.append(g)
if not rgroups:
rgroups = list(isolation_groups)
return_groups.append(rgroups)
return return_groups
|
|
'''Some custom parmatters useful for various things. See the readme.'''
from .base import ParmatterBase
from ..utilities import args_kwargs_from_args
from ..blank import make_blank
from ..minilang import parse_spec, parse_format_str
import parse as _parse # avoid name conflicts with parse methods
#NOTE: the parse module seems to have some trouble with string fields and spaces around them. don't implicitly trust it.
class StaticParmatter(ParmatterBase):
'''A parsing formatter with a designated format string.'''
def __init__(self, format_str, *args, **kwargs):
self._format_str = format_str
self.set_parser(self._format_str, dict(s=str))
super().__init__(*args, **kwargs)
def format(self, *args, **kwargs):
'''ParmatterBase.format overridden to remove format_str from the signature.'''
return super().format(self._format_str, *args, **kwargs)
def unformat(self, string):
'''ParmatterBase.unformat overridden to use compiled parser.'''
return self._parser.parse(string)
def set_parser(self, format_str, extra_types=dict(s=str)):
'''Sets a static parser for the parmatter.'''
self._parser = _parse.compile(format_str, extra_types)
class FloatIntParmatter(StaticParmatter):
'''A parsing formatter which has the option of using a new custom spec, "fd".
The "fd" spec indicates a float value that could also be read as an int.
Example usage:
>>> list(FloatIntParmatter().unformat('{:fd}', '1'))
1.0
>>> list(FloatIntParmatter().unformat('{:fd}', '1.0'))
1.0
>>> FloatIntParmatter().format('{:.1fd}', 1)
'1.0'
'''
def format_field(self, value, spec):
'''Replace fd with f when formatting is carried out so the the fd
format behaves exactly like f during formatting.'''
spec_tup = parse_spec(spec, strict=False)
spec = spec_tup._replace(type=spec_tup.type.replace('fd', 'f')).join()
return super().format_field(value, spec)
# float or int regex
@staticmethod
@_parse.with_pattern(r'[+-]?((\.\d+)|(\d+\.\d*)|\d+)')
def _fd(s):
'''Method used by the parse module to populate re.Result
output for the fd formatting spec.
'''
return float(s)
def set_parser(self, format_str, extra_types=dict(s=str)):
'''Sets a static parser for the parmatter, including new fd spec.'''
if 'fd' not in extra_types:
extra_types.update(fd=FloatIntParmatter._fd)
self._parser = _parse.compile(format_str, extra_types)
class BlankParmatter(StaticParmatter):
'''A parsing formatter which has the option of using a new custom spec, "blank".
The "blank" spec indicates a value that could also be read as whitespace.
Example usage:
>>> list(BlankParmatter().unformat('{:dblank}', '1'))
1
>>> list(BlankParmatter().unformat('{:dblank}', ' '))
0
>>> list(BlankParmatter().unformat('{:fblank}', '1.0'))
1.0
>>> BlankParmatter().format('{:.1fblank}', 0.0)
' '
>>> BlankParmatter().format('{:.1fblank}', 1.1)
'1.1'
'''
# regex for "all blank space"
blank_pattern = r'\s*'
# for initializing different format type codes, use a mapping to
# associate a function decorated by with_pattern(blank_pattern);
# only used when "blank"
@staticmethod
def _blank_handler(f, pattern):
'''Handler factory for special spec types.'''
@_parse.with_pattern(pattern)
def _handler(s):
if s.split():
return f(s)
else:
return f()
return _handler
# to be replaced below:
def blank_type_to_func(handler_factory, pattern):
return {k:handler_factory(v, pattern) for k,v in
{'':str, 'fd':float, 's':str,
'd':int, 'f':float, 'n':int}.items()}
# replacing function above:
blank_type_to_func = blank_type_to_func(_blank_handler.__func__, blank_pattern)
def format_field(self, value, spec):
'''Replace value with a Blank object when formatting is carried out. The
Blank object type knows how to deal with the "blank" spec type. If the value
is just white space, a blank_initializer based on the spec type is passed
instead.
'''
spec_tup = parse_spec(spec, strict=False)
if 'blank' in spec_tup.type:
try:
# is it a string?
blanketyblank = value.strip()
except AttributeError:
# not a string; no problem
pass
else:
# falsey stripped string?
if not blanketyblank:
# for looking up blank_initializer
spec_type = spec_tup.type.replace('blank', '')
# replace value (eg 0 for types such as int and float)
value = BlankParmatter.blank_type_to_func[spec_type]()
# falsey objects from make_blank will appear blank when formatted
value = make_blank(value)
return super().format_field(value, spec)
def set_parser(self, format_str, extra_types=dict(s=str)):
'''Add new blank spec suffixes to the parser's extra_types argument.'''
# Need to add a different blank spec handler for each of the different kinds of
# format spec types (d, n, f, s, etc etc) in the format_str
# First parse the format_str into usable form (borrowing code from parse module)
# note: '{{' and '}}' come in from parse_format_str as separate parts
fields = (part for part in parse_format_str(format_str)
if part and part[0] == '{' and part[-1] == '}')
# gather the non-blank spec types and add a handler for each
for field in fields:
no_brackets = field[1:-1]
try:
spec_tup = parse_spec(no_brackets.split(':', 1)[1], strict=False)
except IndexError:
raise ValueError('No format specification was provided for the parser.')
# get version of the spec without the word "blank"
spec_tup_new = spec_tup._replace(type=spec_tup.type.replace('blank', ''))
# get the correct with_pattern blank initializer for the spec type
try:
blank_initializer = BlankParmatter.blank_type_to_func[spec_tup_new.type]
except KeyError as err:
raise KeyError('The spec type {!r} does not have an associated initializer.'
''.format(spec_tup.type)) from err
# pass the initializer (decorated by with_pattern) to the extra_types dict for the parser
extra_types.update({spec_tup.type: lambda s: blank_initializer(s)})
# the original format_str is unaffected
super().set_parser(format_str, extra_types)
class DefaultParmatter(ParmatterBase):
'''A parsing formatter with any default namespace.
For keys use ints for indexes of positional arguments, strs
for fields with names.'''
def __init__(self, default_namespace, *args, **kwargs):
self.default_namespace = default_namespace
super().__init__(*args, **kwargs)
def get_value(self, key, args, kwargs):
try:
return super().get_value(key, args, kwargs)
except (KeyError, IndexError) as normal_exc:
try:
return self.default_namespace[key]
except KeyError:
ExcType = type(normal_exc)
lookup_type = {KeyError:'key', IndexError:'index'}[ExcType]
raise ExcType('No default argument was provided for this formatter, {} = {}.'.format(lookup_type, repr(key))) from None
class AttrParmatter(ParmatterBase):
'''A Parmatter that looks in args object attributes for values.
The args are inspected in order. First one wins.
Callable attributes are ignored.'''
def get_value(self, key, args, kwargs):
# sentinel marks lookup failure
sentinel = object()
# get the normal value
try:
value_norm = super().get_value(key, args, kwargs)
# no value; error stored to be raised later if no attribute value
except (KeyError, IndexError) as exc:
value_norm = sentinel
normal_exc = exc
# return result if the key can't be an attribute
else:
# docs say key is either str or int
if isinstance(key, int):
return value_norm
# assume no attribute values
value_attr = sentinel
# look for attribute values
for arg in args:
# try to get the attribute value
value_attr = getattr(arg, key, sentinel)
# check if found one (no callables!)
if not callable(value_attr) and value_attr is not sentinel:
break
else:
# discard any methods
value_attr = sentinel
continue
# if no value; raise error as usual
if value_norm is value_attr is sentinel:
raise normal_exc
# if two values, there is an unresolvable name conflict
if value_norm is not sentinel and value_attr is not sentinel:
raise ValueError('The name {} is both an attribute of first argument {} object and a key in the keyword arguments. Cannot resolve.'.format(key, type(args[0]).__name__))
return {value_norm:value_attr,value_attr:value_norm}[sentinel]
class PositionalDefaultParmatter(DefaultParmatter):
'''A formatter with a default positional namespace.'''
def __init__(self, *values, default_namespace={}, **kwargs):
default_namespace.update({i:value for i,value in enumerate(values)})
super().__init__(default_namespace, **kwargs)
@staticmethod
def args_parse(*args):
'''Form an alternate argument order to create a formatter.
args = '{}', 0, {a=2, b=3}
args, kwargs = PositionalDefaultParmatter.args_parse(*args)
f = PositionalDefaultParmatter(*args, **kwargs)
'''
namespace_slice = slice(-1,None,-1)
args, kwargs = args_kwargs_from_args(args, slc=namespace_slice, asdict=True, ignore_conflicts=True, terminate_on_failure=True)
kwargs = dict(default_namespace = kwargs)
return args, kwargs
class KeywordParmatter(StaticParmatter,DefaultParmatter,AttrParmatter):
'''A static formatter with a default keyword namespace that looks in args object
attributes for values. The args are inspected in order. First one wins.
Callable attributes are ignored.'''
def __init__(self, format_str, default_namespace, *args, **kwargs):
super().__init__(format_str, default_namespace, *args, **kwargs)
class VersatileParmatter(StaticParmatter,PositionalDefaultParmatter,AttrParmatter):
'''A static formatter with a default positional namespace that looks in args object
attributes for values. The args are inspected in order. First one wins.
Callable attributes are ignored.'''
def __init__(self, format_str, *values, default_namespace={}, **kwargs):
super().__init__(format_str, *values, default_namespace=default_namespace, **kwargs)
|
|
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Heat exception subclasses"""
import sys
from oslo_log import log as logging
import six
from six import reraise as raise_
from heat.common.i18n import _
from heat.common.i18n import _LE
_FATAL_EXCEPTION_FORMAT_ERRORS = False
LOG = logging.getLogger(__name__)
# TODO(kanagaraj-manickam): Expose this to user via REST API
ERROR_CODE_MAP = {
'99001': _("Service %(service_name)s is not available for resource "
"type %(resource_type)s, reason: %(reason)s")
}
@six.python_2_unicode_compatible
class HeatException(Exception):
"""Base Heat Exception.
To correctly use this class, inherit from it and define a 'msg_fmt'
property. That msg_fmt will get formatted with the keyword arguments
provided to the constructor.
"""
message = _("An unknown exception occurred.")
# error_code helps to provide an unique number for a given exception
# and is encoded in XXYYY format.
# Here, XX - For each of the entity type like stack, resource, etc
# an unique number will be provided. All exceptions for a entity will
# have same XX code.
# YYY - Specific error code for a given exception.
error_code = None
def __init__(self, **kwargs):
self.kwargs = kwargs
try:
if self.error_code in ERROR_CODE_MAP:
self.msg_fmt = ERROR_CODE_MAP[self.error_code]
self.message = self.msg_fmt % kwargs
if self.error_code:
self.message = 'HEAT-E%s %s' % (self.error_code, self.message)
except KeyError:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in six.iteritems(kwargs):
LOG.error(_LE("%(name)s: %(value)s"),
{'name': name, 'value': value}) # noqa
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise_(exc_info[0], exc_info[1], exc_info[2])
def __str__(self):
return self.message
def __deepcopy__(self, memo):
return self.__class__(**self.kwargs)
class MissingCredentialError(HeatException):
msg_fmt = _("Missing required credential: %(required)s")
class AuthorizationFailure(HeatException):
msg_fmt = _("Authorization failed.")
class NotAuthenticated(HeatException):
msg_fmt = _("You are not authenticated.")
class Forbidden(HeatException):
msg_fmt = _("You are not authorized to use %(action)s.")
def __init__(self, action='this action'):
super(Forbidden, self).__init__(action=action)
# NOTE(bcwaldon): here for backwards-compatibility, need to deprecate.
class NotAuthorized(Forbidden):
msg_fmt = _("You are not authorized to complete this action.")
class Invalid(HeatException):
msg_fmt = _("Data supplied was not valid: %(reason)s")
class UserParameterMissing(HeatException):
msg_fmt = _("The Parameter (%(key)s) was not provided.")
class UnknownUserParameter(HeatException):
msg_fmt = _("The Parameter (%(key)s) was not defined in template.")
class InvalidTemplateVersion(HeatException):
msg_fmt = _("The template version is invalid: %(explanation)s")
class InvalidTemplateSection(HeatException):
msg_fmt = _("The template section is invalid: %(section)s")
class InvalidTemplateParameter(HeatException):
msg_fmt = _("The Parameter (%(key)s) has no attributes.")
class InvalidTemplateAttribute(HeatException):
msg_fmt = _("The Referenced Attribute (%(resource)s %(key)s)"
" is incorrect.")
class InvalidTemplateReference(HeatException):
msg_fmt = _('The specified reference "%(resource)s" (in %(key)s)'
' is incorrect.')
class EntityNotFound(HeatException):
msg_fmt = _("The %(entity)s (%(name)s) could not be found.")
def __init__(self, entity=None, name=None, **kwargs):
self.entity = entity
self.name = name
super(EntityNotFound, self).__init__(entity=entity, name=name,
**kwargs)
class PhysicalResourceNameAmbiguity(HeatException):
msg_fmt = _(
"Multiple physical resources were found with name (%(name)s).")
class InvalidTenant(HeatException):
msg_fmt = _("Searching Tenant %(target)s "
"from Tenant %(actual)s forbidden.")
class StackExists(HeatException):
msg_fmt = _("The Stack (%(stack_name)s) already exists.")
class HeatExceptionWithPath(HeatException):
msg_fmt = _("%(error)s%(path)s%(message)s")
def __init__(self, error=None, path=None, message=None):
self.error = error or ''
self.path = []
if path is not None:
if isinstance(path, list):
self.path = path
elif isinstance(path, six.string_types):
self.path = [path]
result_path = ''
for path_item in self.path:
if isinstance(path_item, int) or path_item.isdigit():
result_path += '[%s]' % path_item
elif len(result_path) > 0:
result_path += '.%s' % path_item
else:
result_path = path_item
self.error_message = message or ''
super(HeatExceptionWithPath, self).__init__(
error=('%s: ' % self.error if self.error != '' else ''),
path=('%s: ' % result_path if len(result_path) > 0 else ''),
message=self.error_message
)
def error(self):
return self.error
def path(self):
return self.path
def error_message(self):
return self.error_message
class StackValidationFailed(HeatExceptionWithPath):
pass
class InvalidSchemaError(HeatException):
msg_fmt = _("%(message)s")
class ResourceNotFound(EntityNotFound):
msg_fmt = _("The Resource (%(resource_name)s) could not be found "
"in Stack %(stack_name)s.")
class SnapshotNotFound(EntityNotFound):
msg_fmt = _("The Snapshot (%(snapshot)s) for Stack (%(stack)s) "
"could not be found.")
class InvalidGlobalResource(HeatException):
msg_fmt = _("There was an error loading the definition of the global "
"resource type %(type_name)s.")
class ResourceTypeUnavailable(HeatException):
error_code = '99001'
class InvalidBreakPointHook(HeatException):
msg_fmt = _("%(message)s")
class InvalidRestrictedAction(HeatException):
msg_fmt = _("%(message)s")
class ResourceNotAvailable(HeatException):
msg_fmt = _("The Resource (%(resource_name)s) is not available.")
class ClientNotAvailable(HeatException):
msg_fmt = _("The client (%(client_name)s) is not available.")
class WatchRuleNotFound(EntityNotFound):
"""Keep this for AWS compatiblility."""
msg_fmt = _("The Watch Rule (%(watch_name)s) could not be found.")
class ResourceFailure(HeatExceptionWithPath):
def __init__(self, exception_or_error, resource, action=None):
self.resource = resource
self.action = action
if action is None and resource is not None:
self.action = resource.action
path = []
res_path = []
if resource is not None:
res_path = [resource.stack.t.get_section_name('resources'),
resource.name]
if isinstance(exception_or_error, Exception):
if isinstance(exception_or_error, ResourceFailure):
self.exc = exception_or_error.exc
error = exception_or_error.error
message = exception_or_error.error_message
path = exception_or_error.path
else:
self.exc = exception_or_error
error = six.text_type(type(self.exc).__name__)
message = six.text_type(self.exc)
path = res_path
else:
self.exc = None
res_failed = 'Resource %s failed: ' % action.upper()
if res_failed in exception_or_error:
(error, message, new_path) = self._from_status_reason(
exception_or_error)
path = res_path + new_path
else:
path = res_path
error = None
message = exception_or_error
super(ResourceFailure, self).__init__(error=error, path=path,
message=message)
def _from_status_reason(self, status_reason):
"""Split the status_reason up into parts.
Given the following status_reason:
"Resource DELETE failed: Exception : resources.AResource: foo"
we are going to return:
("Exception", "resources.AResource", "foo")
"""
parsed = [sp.strip() for sp in status_reason.split(':')]
if len(parsed) >= 4:
error = parsed[1]
message = ': '.join(parsed[3:])
path = parsed[2].split('.')
else:
error = ''
message = status_reason
path = []
return (error, message, path)
class NotSupported(HeatException):
msg_fmt = _("%(feature)s is not supported.")
class ResourceActionNotSupported(HeatException):
msg_fmt = _("%(action)s is not supported for resource.")
class ResourceActionRestricted(HeatException):
msg_fmt = _("%(action)s is restricted for resource.")
class ResourcePropertyConflict(HeatException):
msg_fmt = _('Cannot define the following properties '
'at the same time: %(props)s.')
def __init__(self, *args, **kwargs):
if args:
kwargs.update({'props': ", ".join(args)})
super(ResourcePropertyConflict, self).__init__(**kwargs)
class ResourcePropertyDependency(HeatException):
msg_fmt = _('%(prop1)s cannot be specified without %(prop2)s.')
class ResourcePropertyValueDependency(HeatException):
msg_fmt = _('%(prop1)s property should only be specified '
'for %(prop2)s with value %(value)s.')
class PropertyUnspecifiedError(HeatException):
msg_fmt = _('At least one of the following properties '
'must be specified: %(props)s')
def __init__(self, *args, **kwargs):
if args:
kwargs.update({'props': ", ".join(args)})
super(PropertyUnspecifiedError, self).__init__(**kwargs)
class UpdateReplace(Exception):
"""Raised when resource update requires replacement."""
def __init__(self, resource_name='Unknown'):
msg = _("The Resource %s requires replacement.") % resource_name
super(Exception, self).__init__(six.text_type(msg))
class ResourceUnknownStatus(HeatException):
msg_fmt = _('%(result)s - Unknown status %(resource_status)s due to '
'"%(status_reason)s"')
def __init__(self, result=_('Resource failed'),
status_reason=_('Unknown'), **kwargs):
super(ResourceUnknownStatus, self).__init__(
result=result, status_reason=status_reason, **kwargs)
class ResourceInError(HeatException):
msg_fmt = _('Went to status %(resource_status)s '
'due to "%(status_reason)s"')
def __init__(self, status_reason=_('Unknown'), **kwargs):
super(ResourceInError, self).__init__(status_reason=status_reason,
**kwargs)
class UpdateInProgress(Exception):
def __init__(self, resource_name='Unknown'):
msg = _("The resource %s is already being updated.") % resource_name
super(Exception, self).__init__(six.text_type(msg))
class HTTPExceptionDisguise(Exception):
"""Disguises HTTP exceptions.
They can be handled by the webob fault application in the wsgi pipeline.
"""
def __init__(self, exception):
self.exc = exception
self.tb = sys.exc_info()[2]
class EgressRuleNotAllowed(HeatException):
msg_fmt = _("Egress rules are only allowed when "
"Neutron is used and the 'VpcId' property is set.")
class Error(HeatException):
msg_fmt = "%(message)s"
def __init__(self, msg):
super(Error, self).__init__(message=msg)
class NotFound(HeatException):
def __init__(self, msg_fmt=_('Not found')):
self.msg_fmt = msg_fmt
super(NotFound, self).__init__()
class InvalidContentType(HeatException):
msg_fmt = _("Invalid content type %(content_type)s")
class RequestLimitExceeded(HeatException):
msg_fmt = _('Request limit exceeded: %(message)s')
class StackResourceLimitExceeded(HeatException):
msg_fmt = _('Maximum resources per stack exceeded.')
class ActionInProgress(HeatException):
msg_fmt = _("Stack %(stack_name)s already has an action (%(action)s) "
"in progress.")
class StopActionFailed(HeatException):
msg_fmt = _("Failed to stop stack (%(stack_name)s) on other engine "
"(%(engine_id)s)")
class EventSendFailed(HeatException):
msg_fmt = _("Failed to send message to stack (%(stack_name)s) "
"on other engine (%(engine_id)s)")
class UnsupportedObjectError(HeatException):
msg_fmt = _('Unsupported object type %(objtype)s')
class OrphanedObjectError(HeatException):
msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object')
class IncompatibleObjectVersion(HeatException):
msg_fmt = _('Version %(objver)s of %(objname)s is not supported')
class ObjectActionError(HeatException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class ReadOnlyFieldError(HeatException):
msg_fmt = _('Cannot modify readonly field %(field)s')
class DeploymentConcurrentTransaction(HeatException):
msg_fmt = _('Concurrent transaction for deployments of server %(server)s')
class ObjectFieldInvalid(HeatException):
msg_fmt = _('Field %(field)s of %(objname)s is not an instance of Field')
class KeystoneServiceNameConflict(HeatException):
msg_fmt = _("Keystone has more than one service with same name "
"%(service)s. Please use service id instead of name")
class SIGHUPInterrupt(HeatException):
msg_fmt = _("System SIGHUP signal received.")
class NoActionRequired(Exception):
pass
|
|
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import glob
import os
import re
import shutil
from pathlib import Path
from pex.interpreter import PythonInterpreter
from wheel import pep425tags
from pants.backend.native.targets.native_library import NativeLibrary
from pants.backend.native.tasks.link_shared_libraries import SharedLibrary
from pants.backend.python.subsystems.python_native_code import PythonNativeCode
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TargetDefinitionException, TaskError
from pants.base.workunit import WorkUnitLabel
from pants.build_graph.address import Address
from pants.python.pex_build_util import is_local_python_dist
from pants.python.python_requirement import PythonRequirement
from pants.python.setup_py_runner import SetupPyRunner
from pants.task.task import Task
from pants.util.collections import assert_single_element
from pants.util.dirutil import safe_mkdir_for, split_basename_and_dirname
from pants.util.memo import memoized_property
from pants.util.strutil import safe_shlex_join
# TODO: make this a SimpleCodegenTask!!!
class BuildLocalPythonDistributions(Task):
"""Create python distributions (.whl) from python_dist targets."""
options_scope = "python-create-distributions"
# NB: these are all the immediate subdirectories of the target's results directory.
# This contains any modules from a setup_requires().
_SETUP_REQUIRES_SITE_SUBDIR = "setup_requires_site"
# This will contain the sources used to build the python_dist().
_DIST_SOURCE_SUBDIR = "python_dist_subdir"
setup_requires_pex_filename = "setup-requires.pex"
# This defines the output directory when building the dist, so we know where the output wheel is
# located. It is a subdirectory of `_DIST_SOURCE_SUBDIR`.
_DIST_OUTPUT_DIR = "dist"
@classmethod
def product_types(cls):
# Note that we don't actually place the products in the product map. We stitch
# them into the build graph instead. This is just to force the round engine
# to run this task when dists need to be built.
return [PythonRequirementLibrary, "local_wheels"]
@classmethod
def prepare(cls, options, round_manager):
round_manager.require_data(PythonInterpreter)
round_manager.optional_product(SharedLibrary)
@classmethod
def implementation_version(cls):
return super().implementation_version() + [("BuildLocalPythonDistributions", 3)]
@classmethod
def subsystem_dependencies(cls):
return super().subsystem_dependencies() + (
SetupPyRunner.Factory.scoped(cls),
PythonNativeCode.scoped(cls),
)
class BuildLocalPythonDistributionsError(TaskError):
pass
@memoized_property
def _python_native_code_settings(self):
return PythonNativeCode.scoped_instance(self)
def _build_setup_py_runner(self, extra_reqs=None, interpreter=None, pex_file_path=None):
return SetupPyRunner.Factory.create(
scope=self, extra_reqs=extra_reqs, interpreter=interpreter, pex_file_path=pex_file_path
)
# TODO: This should probably be made into an @classproperty (see PR #5901).
@property
def cache_target_dirs(self):
return True
def _get_setup_requires_to_resolve(self, dist_target):
if not dist_target.setup_requires:
return None
reqs_to_resolve = set()
for setup_req_lib_addr in dist_target.setup_requires:
for maybe_req_lib in self.context.build_graph.resolve(setup_req_lib_addr):
if isinstance(maybe_req_lib, PythonRequirementLibrary):
for req in maybe_req_lib.requirements:
reqs_to_resolve.add(req)
if not reqs_to_resolve:
return None
return reqs_to_resolve
@classmethod
def _get_output_dir(cls, results_dir):
return os.path.join(results_dir, cls._DIST_SOURCE_SUBDIR)
@classmethod
def _get_dist_dir(cls, results_dir):
return os.path.join(cls._get_output_dir(results_dir), cls._DIST_OUTPUT_DIR)
def execute(self):
dist_targets = self.context.targets(is_local_python_dist)
if dist_targets:
interpreter = self.context.products.get_data(PythonInterpreter)
shared_libs_product = self.context.products.get(SharedLibrary)
with self.invalidated(dist_targets, invalidate_dependents=True) as invalidation_check:
for vt in invalidation_check.invalid_vts:
self._prepare_and_create_dist(interpreter, shared_libs_product, vt)
local_wheel_products = self.context.products.get("local_wheels")
for vt in invalidation_check.all_vts:
dist = self._get_whl_from_dir(vt.results_dir)
req_lib_addr = Address.parse(f"{vt.target.address.spec}__req_lib")
self._inject_synthetic_dist_requirements(dist, req_lib_addr)
# Make any target that depends on the dist depend on the synthetic req_lib,
# for downstream consumption.
for dependent in self.context.build_graph.dependents_of(vt.target.address):
self.context.build_graph.inject_dependency(dependent, req_lib_addr)
dist_dir, dist_base = split_basename_and_dirname(dist)
local_wheel_products.add(vt.target, dist_dir).append(dist_base)
def _get_native_artifact_deps(self, target):
native_artifact_targets = []
if target.dependencies:
for dep_tgt in target.dependencies:
if not NativeLibrary.produces_ctypes_native_library(dep_tgt):
raise TargetDefinitionException(
target,
"Target '{}' is invalid: the only dependencies allowed in python_dist() targets "
"are C or C++ targets with a ctypes_native_library= kwarg.".format(
dep_tgt.address.spec
),
)
native_artifact_targets.append(dep_tgt)
return native_artifact_targets
def _copy_sources(self, dist_tgt, dist_target_dir):
# Copy sources and setup.py over to vt results directory for packaging.
# NB: The directory structure of the destination directory needs to match 1:1
# with the directory structure that setup.py expects.
all_sources = list(dist_tgt.sources_relative_to_target_base())
for src_relative_to_target_base in all_sources:
src_rel_to_results_dir = os.path.join(dist_target_dir, src_relative_to_target_base)
safe_mkdir_for(src_rel_to_results_dir)
abs_src_path = os.path.join(
get_buildroot(), dist_tgt.address.spec_path, src_relative_to_target_base
)
shutil.copyfile(abs_src_path, src_rel_to_results_dir)
def _add_artifacts(self, dist_target_dir, shared_libs_product, native_artifact_targets):
all_shared_libs = []
for tgt in native_artifact_targets:
product_mapping = shared_libs_product.get(tgt)
base_dir = assert_single_element(product_mapping.keys())
shared_lib = assert_single_element(product_mapping[base_dir])
all_shared_libs.append(shared_lib)
for shared_lib in all_shared_libs:
basename = os.path.basename(shared_lib.path)
# NB: We convert everything to .so here so that the setup.py can just
# declare .so to build for either platform.
resolved_outname = re.sub(r"\..*\Z", ".so", basename)
dest_path = os.path.join(dist_target_dir, resolved_outname)
safe_mkdir_for(dest_path)
shutil.copyfile(shared_lib.path, dest_path)
return all_shared_libs
def _prepare_and_create_dist(self, interpreter, shared_libs_product, versioned_target):
dist_target = versioned_target.target
native_artifact_deps = self._get_native_artifact_deps(dist_target)
results_dir = versioned_target.results_dir
dist_output_dir = self._get_output_dir(results_dir)
all_native_artifacts = self._add_artifacts(
dist_output_dir, shared_libs_product, native_artifact_deps
)
# TODO: remove the triplication all of this validation across _get_native_artifact_deps(),
# check_build_for_current_platform_only(), and len(all_native_artifacts) > 0!
is_platform_specific = (
# We are including a platform-specific shared lib in this dist, so mark it as such.
len(all_native_artifacts) > 0
or self._python_native_code_settings.check_build_for_current_platform_only(
# NB: This doesn't reach into transitive dependencies, but that doesn't matter currently.
[dist_target]
+ dist_target.dependencies
)
)
versioned_target_fingerprint = versioned_target.cache_key.hash
setup_requires_dir = os.path.join(results_dir, self._SETUP_REQUIRES_SITE_SUBDIR)
setup_reqs_to_resolve = self._get_setup_requires_to_resolve(dist_target)
if setup_reqs_to_resolve:
self.context.log.debug(
"python_dist target(s) with setup_requires detected. "
"Installing setup requirements: {}\n\n".format(
[req.key for req in setup_reqs_to_resolve]
)
)
pex_file_path = os.path.join(
setup_requires_dir, f"setup-py-runner-{versioned_target_fingerprint}.pex"
)
setup_py_runner = self._build_setup_py_runner(
interpreter=interpreter, extra_reqs=setup_reqs_to_resolve, pex_file_path=pex_file_path
)
self.context.log.debug(f"Using pex file as setup.py interpreter: {setup_py_runner}")
self._create_dist(
dist_target,
dist_output_dir,
setup_py_runner,
versioned_target_fingerprint,
is_platform_specific,
)
# NB: "snapshot" refers to a "snapshot release", not a Snapshot.
def _generate_snapshot_bdist_wheel_argv(self, snapshot_fingerprint, is_platform_specific):
"""Create a command line to generate a wheel via `setup.py`.
Note that distutils will convert `snapshot_fingerprint` into a string suitable for a version
tag. Currently for versioned target fingerprints, this seems to convert all punctuation into
'.' and downcase all ASCII chars. See https://www.python.org/dev/peps/pep-0440/ for further
information on allowed version names.
NB: adds a '+' before the fingerprint to the build tag!
"""
egg_info_snapshot_tag_args = ["egg_info", f"--tag-build=+{snapshot_fingerprint}"]
bdist_whl_args = ["bdist_wheel"]
if is_platform_specific:
platform_args = ["--plat-name", pep425tags.get_platform()]
else:
platform_args = []
dist_dir_args = ["--dist-dir", self._DIST_OUTPUT_DIR]
return egg_info_snapshot_tag_args + bdist_whl_args + platform_args + dist_dir_args
def _create_dist(
self, dist_tgt, dist_target_dir, setup_py_runner, snapshot_fingerprint, is_platform_specific
):
"""Create a .whl file for the specified python_distribution target."""
self._copy_sources(dist_tgt, dist_target_dir)
setup_py_snapshot_version_argv = self._generate_snapshot_bdist_wheel_argv(
snapshot_fingerprint, is_platform_specific
)
cmd = safe_shlex_join(setup_py_runner.cmdline(setup_py_snapshot_version_argv))
with self.context.new_workunit(
"setup.py", cmd=cmd, labels=[WorkUnitLabel.TOOL]
) as workunit:
try:
setup_py_runner.run_setup_command(
source_dir=Path(dist_target_dir),
setup_command=setup_py_snapshot_version_argv,
stdout=workunit.output("stdout"),
stderr=workunit.output("stderr"),
)
except SetupPyRunner.CommandFailure as e:
raise self.BuildLocalPythonDistributionsError(
f"Installation of python distribution from target {dist_tgt} into directory "
f"{dist_target_dir} failed using the host system's compiler and linker: {e}"
)
# TODO: convert this into a SimpleCodegenTask, which does the exact same thing as this method!
def _inject_synthetic_dist_requirements(self, dist, req_lib_addr):
"""Inject a synthetic requirements library that references a local wheel.
:param dist: Path of the locally built wheel to reference.
:param req_lib_addr: :class: `Address` to give to the synthetic target.
:return: a :class: `PythonRequirementLibrary` referencing the locally-built wheel.
"""
whl_dir, base = split_basename_and_dirname(dist)
whl_metadata = base.split("-")
req_name = "==".join([whl_metadata[0], whl_metadata[1]])
req = PythonRequirement(req_name, repository=whl_dir)
self.context.build_graph.inject_synthetic_target(
req_lib_addr, PythonRequirementLibrary, requirements=[req]
)
@classmethod
def _get_whl_from_dir(cls, install_dir):
"""Return the absolute path of the whl in a setup.py install directory."""
dist_dir = cls._get_dist_dir(install_dir)
dists = glob.glob(os.path.join(dist_dir, "*.whl"))
if len(dists) == 0:
raise cls.BuildLocalPythonDistributionsError(
"No distributions were produced by python_create_distribution task.\n"
"dist_dir: {}, install_dir: {}".format(dist_dir, install_dir)
)
if len(dists) > 1:
# TODO: is this ever going to happen?
raise cls.BuildLocalPythonDistributionsError(
"Ambiguous local python distributions found: {}".format(dists)
)
return dists[0]
|
|
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tokenizer for use for the MLPerf transformer benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
import unicodedata
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo import compat as tf
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import ops
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import tokenizers
PAD = "<pad>"
PAD_ID = 0
EOS = "<EOS>"
EOS_ID = 1
RESERVED_TOKENS = [PAD, EOS]
# Set of characters that will be used in the function _escape_token() (see func
# docstring for more details).
# This set is added to the alphabet list to ensure that all escaped tokens can
# be encoded.
_ESCAPE_CHARS = set(u"\\_u;0123456789")
# Regex for the function _unescape_token(), the inverse of _escape_token().
# This is used to find "\u", "\\", and "\###;" substrings in the token.
_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);")
_UNDEFINED_UNICODE = u"\u3013"
# Set contains all letter and number characters.
_ALPHANUMERIC_CHAR_SET = set(
six.unichr(i)
for i in xrange(sys.maxunicode)
if (unicodedata.category(six.unichr(i)).startswith("L") or
unicodedata.category(six.unichr(i)).startswith("N")))
# min_count is the minimum number of times a subtoken must appear in the data
# before before it is added to the vocabulary. The value is found using binary
# search to obtain the target vocabulary size.
_MIN_MIN_COUNT = 1 # min value to use when binary searching for min_count
_MAX_MIN_COUNT = 1000 # max value to use when binary searching for min_count
def _native_to_unicode(s):
"""Convert string to unicode (required in Python 2)."""
if six.PY2:
return s if isinstance(s, unicode) else s.decode("utf-8")
else:
return s
def _load_vocab_file(vocab_file, reserved_tokens=None):
"""Load vocabulary while ensuring reserved tokens are at the top."""
reserved_tokens = []
subtoken_list = []
with tf.io.gfile.GFile(vocab_file, mode="r") as f:
for line in f:
subtoken = _native_to_unicode(line.strip())
subtoken = subtoken[1:-1] # Remove surrounding single-quotes
if subtoken in reserved_tokens:
continue
subtoken_list.append(_native_to_unicode(subtoken))
return reserved_tokens + subtoken_list
def _unicode_to_native(s):
"""Convert string from unicode to native format (required in Python 2)."""
if six.PY2:
return s.encode("utf-8") if isinstance(s, unicode) else s
else:
return s
def _unescape_token(token):
r"""Replaces escaped characters in the token with their unescaped versions.
Applies inverse transformations as _escape_token():
1. Replace "\u" with "_", and "\\" with "\".
2. Replace "\###;" with the unicode character the ### refers to.
Args:
token: escaped string
Returns:
unescaped string
"""
def match(m):
r"""Returns replacement string for matched object.
Matched objects contain one of the strings that matches the regex pattern:
r"\\u|\\\\|\\([0-9]+);"
The strings can be '\u', '\\', or '\###;' (### is any digit number).
m.group(0) refers to the entire matched string ('\u', '\\', or '\###;').
m.group(1) refers to the first parenthesized subgroup ('###').
m.group(0) exists for all match objects, while m.group(1) exists only for
the string '\###;'.
This function looks to see if m.group(1) exists. If it doesn't, then the
matched string must be '\u' or '\\' . In this case, the corresponding
replacement ('_' and '\') are returned. Note that in python, a single
backslash is written as '\\', and double backslash as '\\\\'.
If m.goup(1) exists, then use the integer in m.group(1) to return a
unicode character.
Args:
m: match object
Returns:
String to replace matched object with.
"""
# Check if the matched strings are '\u' or '\\'.
if m.group(1) is None:
return u"_" if m.group(0) == u"\\u" else u"\\"
# If m.group(1) exists, try and return unicode character.
try:
return six.unichr(int(m.group(1)))
except (ValueError, OverflowError) as _:
return _UNDEFINED_UNICODE
# Use match function to replace escaped substrings in the token.
return _UNESCAPE_REGEX.sub(match, token)
def _join_tokens_to_string(tokens):
"""Join a list of string tokens into a single string."""
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret)
class MlPerfTokenizer(tokenizers.BaseTokenizer):
"""Id->String only for MLPerf decoding."""
@classmethod
def Params(cls):
p = super(MlPerfTokenizer, cls).Params()
p.Define("vocab_filepath", None, "Specifies a filepath to the vocab.")
return p
def IdsToStrings(self, ids, lens):
p = self.params
return ops.ml_perf_subword_id_to_string(
ids, lens, vocab_filepath=p.vocab_filepath)
def __init__(self, params):
super(MlPerfTokenizer, self).__init__(params)
reserved_tokens = RESERVED_TOKENS
self.subtoken_list = _load_vocab_file(params.vocab_filepath,
reserved_tokens)
self.max_subtoken_length = 0
for subtoken in self.subtoken_list:
self.max_subtoken_length = max(self.max_subtoken_length, len(subtoken))
def _subtoken_ids_to_tokens(self, subtokens):
"""Convert list of int subtoken ids to a list of string tokens."""
escaped_tokens = "".join([
self.subtoken_list[s] for s in subtokens if s < len(self.subtoken_list)
])
escaped_tokens = escaped_tokens.split("_")
# All tokens in the vocabulary list have been escaped (see _escape_token())
# so each token must be unescaped when decoding.
ret = []
for token in escaped_tokens:
if token:
ret.append(_unescape_token(token))
return ret
def PythonIdsToStrings(self, ids, lens):
"""Unlike the normal IdsToStrings which is in-graph, this runs entirely in Python.
Uses the reference MLPerf tokenizer code.
Args:
ids: A matrix of shape [batch, seqlen].
ids[i, :] is the i-th sample's ids.
lens: A vector of shape [batch]. lens[i] is the sequence length of the
i-th sample. Only the first lens[i] tokens in ids[i, :] are valid
tokens for the i-th sequence.
Returns:
A list of seqlen decoded strings.
"""
resp = []
for i, row in enumerate(ids):
resp.append(self.decode(row[:lens[i]]))
return resp
def decode(self, subtokens):
"""Converts list of int subtokens ids into a string."""
if isinstance(subtokens, np.ndarray):
# Note that list(subtokens) converts subtokens to a python list, but the
# items remain as np.int32. This converts both the array and its items.
subtokens = subtokens.tolist()
if not subtokens:
return ""
assert isinstance(subtokens, list) and isinstance(subtokens[0], int), (
"Subtokens argument passed into decode() must be a list of integers.")
return _unicode_to_native(
_join_tokens_to_string(self._subtoken_ids_to_tokens(subtokens)))
|
|
#!/usr/bin/env python
"""
Created on Thu Dec 19 14:31:36 2013
Author: Oren Freifeld
Email: [email protected]
"""
import numpy as np
from pylab import plt
from scipy import sparse
from of.utils import *
from cpab.cpaNd import CpaSpace as CpaSpaceNd
from cpab.cpaNd.utils import null
from cpab.cpa2d.utils import *
from cpab.cpa2d.ConfigPlt import ConfigPlt
from cpab.cpa2d.Tessellation import Tessellation
class CpaSpace(CpaSpaceNd):
dim_domain=2
dim_range=2
nHomoCoo = dim_domain+1
lengthAvee = dim_domain * nHomoCoo
Ashape = dim_domain,nHomoCoo
def __init__(self,XMINS,XMAXS,nCs,
zero_v_across_bdry,
vol_preserve,warp_around=[False]*2,
conformal=False,
zero_vals=[],cpa_calcs=None,
tess=['II','I'][0],
valid_outside=None,
only_local=False,
cont_constraints_are_separable=None):
if cont_constraints_are_separable is None:
raise ObsoleteError("""
Expected True/False value for cont_constraints_are_separable;
got None instead""")
if tess == 'II' and valid_outside is not None:
print "tess='II' --> ignoring the value of valid_outside"
if tess == 'I':
if valid_outside is None:
raise ValueError("tess='I' so you must pass valid_outside=True/False" )
self.valid_outside=valid_outside
nCx,nCy=map(int,nCs)
debug_cont_constraints_are_separable=False
if cont_constraints_are_separable:
print 'Check if can actually use separable continuity:'
if any(zero_v_across_bdry):
cont_constraints_are_separable=False
print 'any(zero_v_across_bdry) is True'
if vol_preserve:
cont_constraints_are_separable=False
print 'vol_preserve is True'
if nCx!=nCy:
cont_constraints_are_separable=False
print 'nCx!=nCy'
if XMINS[0]!=XMINS[1]:
cont_constraints_are_separable=False
print 'XMINS[0]!=XMINS[1]'
if XMAXS[0]!=XMAXS[1]:
cont_constraints_are_separable=False
print 'XMAXS[0]!=XMAXS[1]'
if not cont_constraints_are_separable:
debug_cont_constraints_are_separable=False
print 'so I could not use separable continuity.'
else:
print '\nWill use separable continuity.\n'
super(CpaSpace,self).__init__(XMINS,XMAXS,nCs,
zero_v_across_bdry,
vol_preserve=vol_preserve,
warp_around=warp_around,
conformal=conformal,
zero_vals=zero_vals,
cpa_calcs=cpa_calcs,tess=tess,
valid_outside=valid_outside,
only_local=only_local,
cont_constraints_are_separable=cont_constraints_are_separable)
tessellation = Tessellation(nCx,nCy,self.nC,self.XMINS,self.XMAXS,tess=tess)
self.tessellation=tessellation
try:
# raise FileDoesNotExistError("fake file")
subspace=Pkl.load(self.filename_subspace,verbose=1)
B=subspace['B']
nConstraints=subspace['nConstraints']
nEdges=subspace['nEdges']
constraintMat=subspace['constraintMat']
try:
cont_constraints_are_separable=subspace['cont_constraints_are_separable']
except KeyError:
cont_constraints_are_separable=False
except FileDoesNotExistError:
nC = self.nC
verts1,verts2,H,nEdges,nConstraints = self.tessellation.create_verts_and_H(
dim_range=self.dim_range,valid_outside=valid_outside)
if cont_constraints_are_separable == False or debug_cont_constraints_are_separable:
L = create_cont_constraint_mat(H,verts1,verts2,nEdges,nConstraints,nC,
dim_domain=self.dim_domain,
dim_range=self.dim_range)
if cont_constraints_are_separable:
Lx = create_cont_constraint_mat_separable(H,verts1,verts2,nEdges,nConstraints,
nC,dim_domain=self.dim_domain,
dim_range=self.dim_range,tess=tess)
if len(zero_vals):
Lzerovals = create_constraint_mat_zerovals(nC,dim_domain=self.dim_domain,
dim_range=self.dim_range,
zero_vals=zero_vals)
L = np.vstack([L,Lzerovals])
nConstraints += Lzerovals.shape[0]
if any(zero_v_across_bdry):
# Lbdry = self.tessellation.create_constraint_mat_bdry(
# zero_v_across_bdry=self.zero_v_across_bdry)
#
# L = np.vstack([L,Lbdry])
if cont_constraints_are_separable == False or debug_cont_constraints_are_separable:
Lbdry = self.tessellation.create_constraint_mat_bdry(
zero_v_across_bdry=self.zero_v_across_bdry)
L = np.vstack([L,Lbdry])
if cont_constraints_are_separable:
Lb = self.tessellation.create_constraint_mat_bdry_separable(
zero_v_across_bdry=self.zero_v_across_bdry)
raise NotImplementedError(zero_v_across_bdry, cont_constraints_are_separable)
nConstraints += Lbdry.shape[0]
if self.warp_around[0] or self.warp_around[1]:
raise NotImplementedError
Lwa = create_constraint_mat_warp_around(cells_verts,
nC,dim_domain=self.dim_domain)
L = np.vstack([L,Lwa])
nConstraints += Lwa.shape[0]
if vol_preserve:
Lvol = create_constraint_mat_preserve_vol(nC,dim_domain=self.dim_domain)
L = np.vstack([L,Lvol])
nConstraints += Lvol.shape[0]
if conformal:
Lconf = create_constraint_mat_conformal(nC,dim_domain=self.dim_domain,dim_range=self.dim_range)
L = np.vstack([L,Lconf])
nConstraints += Lconf.shape[0]
if self.only_local==False:
if not cont_constraints_are_separable:
B=null(L)
else: # to solve a nuch smaller SVD and to get a sparser basis
if vol_preserve or any(zero_v_across_bdry):
raise NotImplementedError
B1=null(Lx)
# B1.shape is (nC*nHomoCoo)x dim_null_space
if debug_cont_constraints_are_separable:
B=null(L)
if B1.shape[0]!=B.shape[0]/2:
raise ValueError(B1.shape,B.shape)
if float(B1.shape[1])*self.dim_range != B.shape[1]:
raise ValueError(B1.shape,B.shape)
_B = np.zeros((B1.shape[0]*2,B1.shape[1]*self.dim_range),B1.dtype)
for j in range(B1.shape[1]):
Avees = B1[:,j] # length=self.nC*self.nHomoCoo
arr=Avees.reshape(self.nC,self.nHomoCoo)
for k in range(self.dim_range):
arr2=np.hstack([arr if m==k else np.zeros_like(arr) for m in range(self.dim_range)])
arr3=arr2.reshape(self.nC,self.lengthAvee)
arr4=arr3.flatten()
_B[:,j+k*B1.shape[1]]=arr4
if debug_cont_constraints_are_separable:
if B.shape != _B.shape:
raise ValueError(B.shape,_B.shape)
B=_B
else:
if tess != 'I':
raise NotImplementedError
B = None
if cont_constraints_are_separable:
L=Lx
constraintMat=sparse.csr_matrix(L)
Pkl.dump(self.filename_subspace,{'B':B,'cont_constraints_are_separable':cont_constraints_are_separable,
'nConstraints':nConstraints,
'nEdges':nEdges,
'constraintMat':constraintMat},
override=True)
# Since B encodes the null space of, it follows that
# np.allclose(L.dot(B),0)==True
super(CpaSpace,self).__finish_init__(tessellation=tessellation,
constraintMat=constraintMat,
nConstraints=nConstraints,
nInterfaces=nEdges,
B=B,zero_vals=zero_vals)
self.cont_constraints_are_separable=cont_constraints_are_separable
self.x_dense = self._calcs.x_dense
self.x_dense_grid = self._calcs.x_dense_grid
self.x_dense_img = self._calcs.x_dense_img
self.x_dense_grid_img = self._calcs.x_dense_grid_img
self.grid_shape = self.x_dense_grid_img[0].shape
verts=self.tessellation.cells_verts_homo_coo
if 0: # testing
for i in range(0,self.nC):
for j in range(0,i):
verts1=verts[i]
verts2=verts[j]
shared=[]
for v1 in verts1:
for v2 in verts2:
if (v1==v2).all():
shared.append(v1)
shared = np.asarray(shared).T
if len(shared)==0:
continue
# theta =self.get_zeros_theta()
for m in range(self.d):
# theta[j]=1
Avees=self.get_zeros_PA()
Avees[:]=self.B[:,m]
# self.theta2Avees(Avees=Avees,theta=theta)
As=self.Avees2As(Avees=Avees)
Ai=As[i]
Aj=As[j]
#Ai.dot(shared) is 3 x 3 = dim x #verts_per_side
# At the moment, the problem is w/ the last entry of the 4 vert (100,100,0,1)
if not np.allclose((Ai-Aj).dot(shared),0):
ipshell('FAILED ALL CLOSE TEST')
raise ValueError
def get_x_dense(self):
return self.x_dense
def get_x_dense_grid(self):
return self.x_dense_grid
def get_x_dense_img(self):
return self.x_dense_img
def get_x_dense_grid_img(self):
return self.x_dense_grid_img
def __repr__(self):
s = "cpa space (tess type {}):".format(self.tess)
s += '\n\tCells: {}x{} (nC={})'.format(self.tessellation.nCx,self.tessellation.nCy,self.tessellation.nC)
s += '\n\td: {} D: {}'.format(self.d,self.D)
if any(self.zero_v_across_bdry):
if not all(self.zero_v_across_bdry):
raise NotImplementedError("Mixed bdry types")
s += '\n\tzero bdry cond: True'
s += '\n\tvolume-preserving: {}'.format(self.vol_preserve)
if self.tess=='I':
s+='\n\tvalid extention: {}'.format(self.valid_outside)
return s
def calc_tess(self,permute=False):
raise ObsoleteError
pts = self.get_x_dense_img()
cell_idx = np.empty(len(pts),dtype=np.int32)
self.calc_cell_idx(pts,cell_idx)
if permute:
p=np.random.permutation(self.nC)
cell_idx2=np.zeros_like(cell_idx)
for c in range(self.nC):
cell_idx2[cell_idx==c]=p[c]
cell_idx=cell_idx2
if self.XMINS.any():
raise NotImplementedError
Nx,Ny=self.XMAXS
img_idx=cell_idx.reshape(Ny,Nx)
return img_idx
def quiver(self,x,v,scale,ds=16,color='k',negate_vy=False,pivot='middle',
head=True,width=None):
"""
If width is None, its its value will be dictated by the value of
head
"""
if head:
headlength=5
headwidth=3
headaxislength=4.5
if width is None:
width=.005
else:
headlength=0
headwidth=0
headaxislength=0
if width is None:
width=.003
if x is None:
raise ValueError
# if x is None:
# x=self.xx
# y=self.yy
# if x.size != v[:,0].size:
# x=self.x_img
# y=self.y_img
#
# else:
# if x.ndim != 2:
# raise ValueError(x.shape)
# if x.shape[1]!=2:
# raise ValueError(x.shape)
#
# x,y=x[:,0].copy(),x[:,1].copy()
# if x.size != v[:,0].size:
# raise ValueError(x.shape,v.shape)
if x.size != v.size:
raise ValueError(x.shape,v.shape)
if v.ndim != 2:
raise ValueError(v.shape)
if v.shape[1]!=2:
raise ValueError(v.shape)
if x.shape != v.shape:
if x.ndim !=3 or x.shape[0]!=2:
raise ValueError(x.shape)
# x = np.asarray([x[0].flatten(),x[1].flatten()]).T
v = np.asarray([v.cpu[:,0].reshape(x.shape[1],x.shape[2]),
v.cpu[:,1].reshape(x.shape[1],x.shape[2])])
if x.shape != v.shape:
raise ValueError(x.shape,v.shape)
# if x.ndim != 2:
# raise ValueError(x.shape)
# if y.ndim != 2:
# raise ValueError(x.shape)
# try:
# vx = v[:,0].reshape(x.shape)
# vy = v[:,1].reshape(x.shape)
# except:
# raise ValueError(v.shape,x.shape)
# if x.shape[1]!=2:
# raise NotImplementedError(x.shape)
# if v.shape[1]!=2:
# raise NotImplementedError(x.shape)
if x.ndim !=3 and x.shape[1]!=2:
raise ValueError(x.shape)
if v.ndim !=3 and v.shape[1]!=2:
raise ValueError(v.shape)
# _x,_y = x.T
# vx,vy = v.T
if x.ndim == 2:
_x,_y = x.T
_u,_v = v.T
else:
_x,_y = x
_u,_v = v
if negate_vy:
_v = -_v
# print scale,ds
# 1/0
if _x.ndim==2:
plt.quiver(_x[::ds,::ds],_y[::ds,::ds],_u[::ds,::ds],_v[::ds,::ds],
angles='xy', scale_units='xy',scale=scale,
pivot=pivot,
color=color,
headlength=headlength,
headwidth=headwidth,
headaxislength=headaxislength,
width=width
)
else:
plt.quiver(_x[::ds],_y[::ds],_u[::ds],_v[::ds],
angles='xy', scale_units='xy',scale=scale,
pivot=pivot,
color=color,
headlength=headlength,
headwidth=headwidth,
headaxislength=headaxislength,
width=width
)
def plot_cells(self,color='k',lw=0.5,offset=(0,0)):
ox,oy=offset
if self.tess == 'II':
for c in xrange(self.nC):
xmin,ymin=self._xmins[c]
xmax,ymax=self._xmaxs[c]
# if (xmin == self.XMINS[0] or
# ymin == self.XMINS[1] or
# xmax == self.XMAXS[0] or
# ymax == self.XMAXS[1]):
# plt.plot([xmin,xmax,xmax,xmin,xmin],
# [ymin,ymin,ymax,ymax,ymin], color=color,lw=lw*10)
# else:
plt.plot(np.asarray([xmin,xmax,xmax,xmin,xmin])+ox,
np.asarray([ymin,ymin,ymax,ymax,ymin])+oy, color=color,lw=lw)
else:
for c in xrange(self.nC):
verts=self.tessellation.cells_verts_homo_coo[c,:,:-1]
x=np.asarray([verts[0,0],verts[1,0],verts[2,0],verts[0,0]])
y=np.asarray([verts[0,1],verts[1,1],verts[2,1],verts[0,1]])
plt.plot(x+ox,y+oy, color=color,lw=lw)
def inbound(self,x,i_c,out):
"""
Assumed:
x is 2xnPts
i_c is the index of the cell in quesiton.
Checks, for each element of x, whether it is in the i_c cell.
Result is computed in-place in the last input argument.
"""
raise ObsoleteError("Use compute_inbound instead")
if __name__ == '__main__':
import pylab
from pylab import plt
import of.plt
from cpa.prob_and_stats.CpaCovs import CpaCovs
from cpa.prob_and_stats.cpa_simple_mean import cpa_simple_mean
from cpa.cpa2d.calcs import *
from of import my_mayavi
from mayavi.mlab import mesh
if computer.has_good_gpu_card:
pylab.ion()
# plt.close('all')
plt.clf()
XMINS=[0,0]
XMAXS=[512,512]
# XMAXS=[256,256]
# XMAXS=[256/2,256/2]
nCx,nCy=1,1
nCx,nCy=2,2
# nCx,nCy=3,3
#
## nCx,nCy=10,3
nCx,nCy=3,3
# nCx,nCy=4,4
# nCx,nCy=3,3
# nCx,nCy=6,6
#### nCx,nCy=7,7
# nCx,nCy=16,16
# nCx,nCy=8,8
### nCx,nCy=9,9
# nCx,nCy=10,10
##
# nCx,nCy=16,16
# nCx,nCy=16,16
# nCx,nCy=8,8
tess=['II','I'][1]
if 1 and computer.has_good_gpu_card:
if tess == 'II':
nCx,nCy=16,16
if tess == 'I':
nCx,nCy=8,8
nCx,nCy=16,16
# nCx,nCy=10,10
# nCx,nCy=1,1
# nCx,nCy=6,6 # for tri, this doesn't work well
# nCx,nCy=7,7
# nCx,nCy=8,8
zero_v_across_bdry=[True,True]
zero_v_across_bdry=[False,False]
# zero_v_across_bdry=[True,True]
#
vol_preserve = [False,True][0]
warp_around = [False]*2
Nx=XMAXS[0]
Ny=XMAXS[1]
config_plt = ConfigPlt(Nx=Nx,Ny=Ny)
Ngrids= [ Nx , Ny]
cpa_calcs=CpaCalcs(XMINS=XMINS,XMAXS=XMAXS,Ngrids=Ngrids,use_GPU_if_possible=True)
cpa_space=CpaSpace(XMINS,XMAXS,[nCx,nCy],zero_v_across_bdry,vol_preserve,
warp_around,
cpa_calcs=cpa_calcs,
# zero_vals=[(0,1)],
tess=tess,
valid_outside=0)
del cpa_calcs
if cpa_space.d==0:
raise ValueError('dim is 0')
print cpa_space
cpa_covs = CpaCovs(cpa_space,scale_spatial=1.0 * 1*10*0,
scale_value=0.01*10*2*4*10/100,
left_blk_rel_scale=1.0/100,
right_vec_scale=1)
mu = cpa_simple_mean(cpa_space)
Avees=cpa_space.theta2Avees(mu)
np.random.seed(10)
theta = np.random.multivariate_normal(mean=mu,cov=cpa_covs.cpa_cov)
cpa_space.theta2Avees(theta,Avees)
cpa_space.update_pat(Avees=Avees)
pts=CpuGpuArray(cpa_space.x_dense_img)
# yy,xx=np.mgrid[-100:cpa_space.XMAXS[1]+100:1,
# -100:cpa_space.XMAXS[0]+100:1]
# pts = np.vstack([xx.flatten(),yy.flatten()]).T.copy().astype(np.float)
cell_idx = CpuGpuArray.zeros(len(pts),dtype=np.int32)
cpa_space.calc_cell_idx(pts,cell_idx)
cell_idx.gpu2cpu()
v_dense = CpuGpuArray.zeros_like(pts)
print 'calc v:'
tic = time.clock()
cpa_space.calc_v(pts=pts,out=v_dense)
toc = time.clock()
print 'time', toc-tic
params_flow_int = get_params_flow_int()
# params_flow_int.nTimeSteps *=10
params_flow_int.dt *=100
params_flow_int.nStepsODEsolver=10
src = CpuGpuArray(cpa_space.x_dense_img)
transformed = CpuGpuArray.empty_like(src)
print params_flow_int
print '#pts=',len(pts)
tic=time.clock()
cpa_space.calc_T_fwd(pts=src,out=transformed,**params_flow_int)
toc = time.clock()
print "time (done in gpu, not cpu/gpu transfer')",toc-tic
v_dense.gpu2cpu() # for display
pts.gpu2cpu() # for display
# ds=16
ds=8
pts0 = cpa_space.x_dense_grid_img[:,::ds,::ds].reshape(cpa_space.dim_domain,-1).T
pts0 = CpuGpuArray(pts0.copy())
1/0
trajs_full = cpa_space.calc_trajectory(pts=pts0,mysign=1,**params_flow_int)
# v_at_trajs_full = np.zeros_like(trajs_full)
# for _pts,_v in zip(trajs_full,v_at_trajs_full):
# cpa_space.calc_v(pat=pat, pts=_pts, out=_v)
pts_grid=cpa_space.x_dense_grid_img
# pts_grid = np.asarray([xx,yy]).copy()
grid_shape = pts_grid[0].shape
fig = plt.figure()
plt.subplot(234)
# plt.imshow(cell_idx.reshape(Ny,Nx))
plt.imshow(cell_idx.cpu.reshape(grid_shape))
plt.subplot(231)
scale=[2*30,1.5*4][vol_preserve]
cpa_space.quiver(pts_grid,v_dense,scale, ds=16/2)
config_plt()
plt.subplot(232)
plt.imshow(v_dense.cpu[:,0].reshape(grid_shape),interpolation='Nearest',
vmin=v_dense.cpu[:,:].min(),vmax=v_dense.cpu[:,:].max());plt.colorbar()
# cpa_space.plot_cells()
config_plt()
plt.subplot(233)
plt.imshow(v_dense.cpu[:,1].reshape(grid_shape),interpolation='Nearest',
vmin=v_dense.cpu[:,:].min(),vmax=v_dense.cpu[:,:].max());plt.colorbar()
# cpa_space.plot_cells()
config_plt()
plt.subplot(235)
plt.imshow(v_dense.cpu[:,0].reshape(grid_shape),interpolation='Nearest',
vmin=v_dense.cpu[:,:].min(),vmax=v_dense.cpu[:,:].max());plt.colorbar()
cpa_space.plot_cells(color='k')
config_plt()
plt.subplot(236)
plt.imshow(v_dense.cpu[:,1].reshape(grid_shape),interpolation='Nearest',
vmin=v_dense.cpu[:,:].min(),vmax=v_dense.cpu[:,:].max());plt.colorbar()
cpa_space.plot_cells(color='k')
config_plt()
# 1/0
if 0:
my_mayavi.mayavi_mlab_close_all()
xx=cpa_space.x_dense_grid_img[0]
yy=cpa_space.x_dense_grid_img[1]
my_mayavi.mayavi_mlab_figure_bgwhite('vx')
mesh(xx,yy,0 *xx,opacity=0.25)
mesh(xx,yy,v_dense[:,0].reshape(xx.shape))
my_mayavi.mayavi_mlab_figure_bgwhite('vy')
mesh(xx,yy,0 *xx,opacity=0.25)
mesh(xx,yy,v_dense[:,1].reshape(xx.shape))
# plt.figure()
# i = 317
# cpa_space.quiver(trajs_full[:,i],v_at_trajs_full[:,i],scale=10, ds=10)
# cpa_space.quiver(trajs_full.reshape(-1,2),v_at_trajs_full.reshape(-1,2),scale=20, ds=10)
# config_plt()
# for t in range(1,params_flow_int.nTimeSteps+1,5):
for t in [params_flow_int.nTimeSteps+1]:
break
print t
plt.clf()
trajs = trajs_full[:t].copy()
v_at_traj = v_at_trajs_full[t-1]
pts1=trajs[-1]
# v_at_T = cpa_space.calc_v(pat=pat,
# pts = pts1 ,
# out=None )
for num in [221,222,223,224]:
plt.subplot(num)
if num in [224]:
cpa_space.quiver(cpa_space.xx_img,v_dense,
# scale=[2*5,1.5*4][vol_preserve],
scale=[2*10,1.5*4][vol_preserve],
ds=16*2)
if num in [223]:
cpa_space.quiver(pts1,v_at_traj,scale=10, ds=1)
if num in [222]:
plt.plot(pts0[:,0],pts0[:,1],'ro',ms=1)
if num in [222,223]:
nTraj = trajs.shape[1]
for i in range(nTraj):
traj = trajs[:,i]
plt.plot(traj[:,0],traj[:,1],'b',lw=.5)
if num in [221,222]:
plt.plot(pts1[:,0],pts1[:,1],'go',ms=1)
config_plt()
if num==221:
# plt.title('T(x;t)')
plt.title(r"$T(x;t)$")
if num==222:
# plt.title("{T(x;t'): t' in [0,t]}")
plt.title(r"$\{T(x;\tau): \tau\in [0,t]\}$")
if num==223:
plt.title(r"$v(T(x;t))$")
if num == 224:
plt.title(r"$v(\cdot)$")
of.plt.maximize_figure()
fig_filename = (os.path.join(HOME,'tmp','{0:04}.png'.format(t)))
print fig_filename
plt.savefig(fig_filename,dpi=300)
if 0 and computer.has_good_gpu_card:
# ipshell('debug')
raw_input("Press Enter to finish.")
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from _socket import gaierror
from django.conf import settings
from django.contrib.auth import logout
from django.http.response import JsonResponse, Http404, HttpResponse
from django.shortcuts import render
from django.views import View
from django.views.generic import RedirectView
from process_management.models import SystemEvent, SystemEventViewTransformer
from process_management.tools import helpers as helpers
class IndexView(View):
def get(self, request):
try:
servers = [helpers._connect_server(node_settings) for name, node_settings in settings.NODES.items()]
groups = []
process_count = {
'running': 0,
'not_running': 0
}
for server in servers:
processes = server.supervisor.getAllProcessInfo()
counting = [True if process['statename'] == 'RUNNING' else False for process in processes]
process_count['running'] += counting.count(True)
process_count['not_running'] += counting.count(False)
for process in processes:
if process['group'] not in groups and process['name'] not in process['group']:
groups.append(process['group'])
group_count = len(groups)
node_count = len(servers)
except gaierror as e:
return render(request, 'error/node_not_reachable.html', {'message': e})
except Exception as e:
raise Http404(e)
se_qs = SystemEvent.objects.all().order_by('-created')
sevt = SystemEventViewTransformer(se_qs)
return render(request, 'components/index.html', {'nodes': node_count, 'groups': group_count, 'processes': process_count, 'title': 'Dashboard', 'system_event_view_transformer': sevt})
class AllNodesView(View):
def get(self, request):
try:
servers = {name: {'server': helpers._connect_server(node_settings)} for name, node_settings in
settings.NODES.items()}
for name, _dict in servers.items():
processes_info = _dict['server'].supervisor.getAllProcessInfo()
for process_info in processes_info:
start = datetime.datetime.fromtimestamp(process_info['start'])
now = datetime.datetime.fromtimestamp(process_info['now'])
uptime = now - start
process_info['uptime'] = uptime
servers[name]['processes'] = processes_info
state = _dict['server'].supervisor.getState()['statename']
servers[name]['state'] = state
if state == 'RUNNING':
servers[name]['state_class_postfix'] = 'success'
else:
servers[name]['state_class_postfix'] = 'danger'
_dict.pop('server', None)
except gaierror as e:
return render(request, 'error/node_not_reachable.html', {'message': e})
except Exception as e:
raise Http404(e)
return render(request, 'components/all_nodes.html', {'servers': servers, 'title': 'All Nodes'})
class SingleNodeView(View):
def get(self, request, node):
try:
node_settings = settings.NODES[node]
server = helpers._connect_server(node_settings)
processes = server.supervisor.getAllProcessInfo()
counts = {
'RUNNING': 0,
'FATAL': 0,
'STOPPED': 0,
'OTHERS': 0
}
for process in processes:
start = datetime.datetime.fromtimestamp(process['start'])
now = datetime.datetime.fromtimestamp(process['now'])
process['uptime'] = now - start
# If name == group the process does not belong to a group
if process['group'] == process['name']:
process['group'] = ''
# Count nr of processes in different states
if process['statename'] in counts.keys():
counts[process['statename']] += 1
else:
counts['OTHERS'] += 1
# Clean up process dictionary
keys_to_pop = ['start', 'now', 'description', 'state', 'logfile', 'stdout_logfile', 'stderr_logfile']
for key in keys_to_pop:
process.pop(key)
except gaierror as e:
return render(request, 'error/node_not_reachable.html', {'message': e})
except Exception as e:
raise Http404(e)
return render(request, 'components/single_node.html', {'node': node, 'processes': processes, 'state_counts': counts, 'title': node})
class SingleGroupView(View):
# Displays every process running in a single group, regardless of which node it runs in
# Shows information about the process, such as PID, node, name, etc.
def get(self, request, group):
servers = {name: {'server': helpers._connect_server(node_settings)} for name, node_settings in
settings.NODES.items()}
group_processes = []
for node, _dict in servers.items():
processes = _dict['server'].supervisor.getAllProcessInfo()
for process in processes:
if process['group'] == group:
process['node'] = node
start = datetime.datetime.fromtimestamp(process['start'])
now = datetime.datetime.fromtimestamp(process['now'])
process['uptime'] = now - start
group_processes.append(process)
return render(request, 'components/single_group.html', {'group': group, 'processes': group_processes})
class AjaxRestartProcessView(View):
def post(self, request):
process_name = request.POST.get('process_name', None)
node = request.POST.get('node', None)
group = request.POST.get('group', None)
if not process_name or not node or group is None:
return JsonResponse({'success': False, 'message': 'Please supply a process and node name'})
if group == '':
full_name = process_name
else:
full_name = '{}:{}'.format(group, process_name)
try:
node_settings = settings.NODES[node]
server = helpers._connect_server(node_settings)
started = False
stopped = helpers._stop_process(server, full_name)
if stopped:
started = server.supervisor.startProcess(full_name)
else:
return JsonResponse({'success': False, 'message': 'Could not stop process'})
if not started:
return JsonResponse({'success': False, 'message': 'Could not start process'})
new_state = helpers._get_process_state(server, full_name)
except gaierror as e:
return JsonResponse({'success': False, 'message': e})
except Exception as e:
return JsonResponse({'success': False, 'message': e})
SystemEvent.restarted_process(request.user, full_name, node)
return JsonResponse({'success': True, 'message': '{}: Restarted successfully'.format(full_name), 'state': new_state})
class AjaxStopProcessView(View):
def post(self, request):
node = request.POST.get('node', None)
process_name = request.POST.get('process_name', None)
group = request.POST.get('group', None)
if not process_name or not node or group is None:
return JsonResponse({'success': False, 'message': 'Please supply a process and node name'})
if group == '':
full_name = process_name
else:
full_name = '{}:{}'.format(group, process_name)
try:
node_settings = settings.NODES[node]
server = helpers._connect_server(node_settings)
stopped = helpers._stop_process(server, full_name)
if not stopped:
return JsonResponse({'success': False, 'message': 'Could not stop process'})
new_state = helpers._get_process_state(server, full_name)
except gaierror as e:
return JsonResponse({'success': False, 'message': e})
except Exception as e:
return JsonResponse({'success': False, 'message': e})
SystemEvent.stopped_process(request.user, full_name, node)
return JsonResponse({'success': True, 'message': '{}: Stopped successfully'.format(full_name), 'state': new_state})
class AjaxRestartAllProcessesView(View):
def post(self, request):
node = request.POST.get('node', None)
if not node:
return JsonResponse({'success': False, 'message': 'Please supply a node name'})
try:
node_settings = settings.NODES[node]
server = helpers._connect_server(node_settings)
processes = server.supervisor.getAllProcessInfo()
for process in processes:
if process['group'] == '':
full_name = process['name']
else:
full_name = '{}:{}'.format(process['group'], process['name'])
stopped = helpers._stop_process(server, full_name)
if stopped:
started = server.supervisor.startProcess(full_name)
else:
return JsonResponse({'success': False, 'message': 'Could not stop process'})
if not started:
return JsonResponse({'success': False, 'message': 'Could not start process'})
except gaierror as e:
return JsonResponse({'success': False, 'message': e})
except Exception as e:
return JsonResponse({'success': False, 'message': e})
SystemEvent.free_text(request.user, 'Restarted all processes on {}'.format(node))
return JsonResponse({'success': True, 'message': 'Successfully restarted all processes on {}'.format(node)})
class AjaxStopAllProcessesView(View):
def post(self, request):
node = request.POST.get('node', None)
if not node:
return JsonResponse({'success': False, 'message': 'Please supply a node name'})
try:
node_settings = settings.NODES[node]
server = helpers._connect_server(node_settings)
processes = server.supervisor.getAllProcessInfo()
for process in processes:
if process['group'] == '':
full_name = process['name']
else:
full_name = '{}:{}'.format(process['group'], process['name'])
stopped = helpers._stop_process(server, full_name)
if not stopped:
return JsonResponse({'success': False, 'message': 'Could not stop process'})
except gaierror as e:
return JsonResponse({'success': False, 'message': e})
except Exception as e:
return JsonResponse({'success': False, 'message': e})
SystemEvent.free_text(request.user, 'Stopped all processes on {}'.format(node))
return JsonResponse({'success': True, 'message': 'Successfully stopped all processes on {}'.format(node)})
class LogoutView(RedirectView):
"""
A view that logout user and redirect to homepage.
"""
permanent = False
query_string = True
pattern_name = 'home'
def get_redirect_url(self, *args, **kwargs):
"""
Logout user and redirect to target url.
"""
if self.request.user.is_authenticated():
logout(self.request)
return super(LogoutView, self).get_redirect_url(*args, **kwargs)
|
|
'''
ilf core utilities
'''
import sys
import re
import math
import json
import io
import pandas as pd
from itertools import chain
import pytricia as pt
from .numbers import IP4PROTOCOLS, IP4SERVICES
# -- Helpers
def lowest_bit(num):
bit, low = -1, (num & -num)
if not low:
return 0
while(low):
low >>= 1
bit += 1
return bit
def binarr(n):
return [n >> i & 1 for i in range(n.bit_length() - 1, -1, -1)]
def is_power2(n):
'check if n is power of 2, note: 2**0 is 1 is valid'
return (n>0 and (n & (n-1) == 0))
def pp2portstr(port, proto):
'convert port, protocol numbers to port string'
return str(Ival.port_proto(int(port), int(proto)))
class Ip4Protocol(object):
'translate between ipv4 protocol number and associated name'
def __init__(self):
self._num_toname = {} # e.g. 6 -> 'tcp'
self._num_todesc = {} # e.g. 6 -> 'Transmission Control'
self._name_tonum = {} # e.e. 'tcp' -> 6
for k, (name, desc) in IP4PROTOCOLS.items():
self._num_toname[k] = name
self._num_todesc[k] = desc
self._name_tonum[name] = k # TODO: assumes name's are unique
def getprotobyname(self, name):
'turn protocol name into its ip protocol number'
err = 'invalid ipv4 protocol name: {!r}'
rv = self._name_tonum.get(name.lower(), None)
if rv is None:
raise ValueError(err.format(name))
return rv
def getnamebyproto(self, num):
'turn ipv4 protocol number into its name'
err = 'invalid ipv4 protocol number {}'
rv = self._num_toname.get(num, None)
if rv is None:
raise ValueError(err.format(num))
return rv
class Ip4Service(object):
'translate between ipv4 service name and associated portstrings'
def __init__(self):
self._service_toports = {} # e.g https -> ['443/tcp', '443/udp']
self._port_toservice = {} # 'port/proto' -> ip4-service-name
for portstr, service in IP4SERVICES.items():
self._port_toservice[portstr] = service
self._service_toports.setdefault(service, []).append(portstr)
def getportsbyserv(self, name):
'translate service name (eg https) to a list of portstrings'
rv = self._service_toports.get(name.lower(), [])
return rv
def getservbyport(self, portstr):
'translate a portstring to a service name'
rv = self._port_toservice.get(portstr.lower(), '')
return rv
def set_service(self, service, portstrings):
'set known ports for a service, eg http->[80/tcp]'
# TODO: check validity, remove spaces etc ...
service = service.strip().lower()
portstrings = [portstr.strip().lower() for portstr in portstrings]
self._service_toports[service] = portstrings
for portstr in portstrings:
self._port_toservice[portstr] = service
IPP = Ip4Protocol() # for use w/ Ival (ipv4 only)
class Ival(object):
'helper class that abstracts PORTSTR or IP'
INVALID, IP, PORTSTR = (0, 1, 2) # types of Ival's
TYPE = {0: 'INVALID', 1: 'IP', 2: 'PORTSTR'}
TYPES = (INVALID, IP, PORTSTR)
def __init__(self, type_, start, length):
'create Ival from specified type & start, length'
self.type = type_
self.start = start
self.length = length
# -- alternate constructors
@classmethod
def ip_pfx(cls, value):
'Create Ival IP from a.b.c.d/e'
if value == 'any':
return cls(cls.IP, 0, 2**32)
x = value.split('/', 1)
err = 'Invalid ip prefix {!r}'
plen = 32 if len(x) == 1 else int(x[1])
if plen < 0 or plen > 32:
raise ValueError(err.format(value))
x = list(map(int, x[0].split('.')))
if len(x) < 1 or len(x) > 4:
raise ValueError(err.format(value))
elif len(x) < 4:
x = (x + [0, 0, 0, 0])[0:4]
for digit in x:
if digit < 0 or digit > 255:
raise ValueError(err.format(value))
return cls(cls.IP, x[0]*2**24 + x[1]*2**16 + x[2]*2**8 + x[3],
2**(32-plen))
@classmethod
def port_pfx(cls, value):
'create Ival PORTSTR from port expressed as prefix a.b.c.d/e'
return Ival.ip_pfx(value).switch(cls.PORTSTR)
@classmethod
def port_str(cls, value):
'Create Ival from <port>/<proto>'
value = value.lower().strip()
err = 'Invalid port string {!r}'
if value == 'any/any' or value == 'any':
return cls(cls.PORTSTR, 0, 2**32)
x = value.split('/') # port(range)/proto-name
if len(x) != 2:
raise ValueError(err.format(value))
x[0:1] = x[0].split('-') # only split port(range) on '-'
x = [y.strip() for y in x]
if len(x) == 2:
# port/proto or any/proto
proto_num = IPP.getprotobyname(x[1])
if x[0] == 'any':
length = 2**16
base = 0
else:
length = 1
base = int(x[0])
if base < 0 or base > 2**16 - 1:
raise ValueError(err.format(value))
return cls(cls.PORTSTR, proto_num * 2**16 + base, length)
elif len(x) == 3:
# start-stop/proto-name
proto_num = IPP.getprotobyname(x[2])
start, stop = int(x[0]), int(x[1])
if start > stop:
start, stop = stop, start
length = stop - start + 1
if start < 0 or start > 2**16 - 1:
raise ValueError(err.format(value))
if stop < 0 or stop > 2**16 - 1:
raise ValueError(err.format(value))
return cls(cls.PORTSTR, proto_num * 2**16 + start, length)
@classmethod
def port_proto(cls, port, proto):
'Create Ival from <port>, <proto>'
port = int(port)
proto = int(proto)
err = 'Invalid port protocol numbers {!r}, {!r}'
if proto < 0 or proto > 255 or port < 0 or port > 2**16 - 1:
raise ValueError(err.format(port, proto))
return cls(cls.PORTSTR, port + proto * 2**16, 1)
# -- comparisons
def __repr__(self):
return '({!r}, {!r})'.format(self.TYPE[self.type], str(self))
def __str__(self):
if self.type == self.IP:
if self.length == 2**32:
return '0.0.0.0/0' # 'any'
elif self.length == 1:
plen = ''
else:
plen = '/{}'.format(32 - int(math.log(
1 + self.length)//math.log(2)))
d1 = (self.start // 2**24) & 0xFF
d2 = (self.start // 2**16) & 0xFF
d3 = (self.start // 2**8) & 0xFF
d4 = (self.start) & 0xFF
return '{}.{}.{}.{}{}'.format(d1, d2, d3, d4, plen)
elif self.type == self.PORTSTR:
if self.length == 2**32:
return 'any/any'
elif self.length == 2**16:
ports = 'any'
elif self.length == 1:
ports = str(self.start & 0xFFFF)
else:
start = self.start & 0xFFFF
ports = '{}-{}'.format(start, start + self.length - 1)
proto = int((self.start // 2**16) & 0xFF)
name = IPP.getnamebyproto(proto)
return '{}/{}'.format(ports, name)
else:
return 'invalid'
def __len__(self):
return self.length
def __contains__(self, other):
return self.type == other.type and\
self.start <= other.start and\
self.start + self.length >= other.start + other.length
def __hash__(self):
'needed because of __eq__, donot modify obj when hashed'
return hash(self.values())
def __ne__(self, other):
return self.values() != other.values()
def __eq__(self, other):
# max intervals (len is 2**32) are equal regardless of start value
if self.type == other.type and self.length == 2**32:
return other.length == self.length
return self.values() == other.values()
def __lt__(self, other):
return self.values() < other.values()
def __le__(self, other):
'self starts to the left of other or is smaller'
return self.values() <= other.values()
def __gt__(self, other):
'self starts to the right of other'
return self.values() > other.values()
def __ge__(self, other):
'self starts to the right of other'
return self.values() >= other.values()
def __iter__(self):
'iterate through the interval with new ivals of len=1'
self.idx = -1
return self
def __next__(self):
self.idx += 1
if self.idx < self.length:
return Ival(self.type, self.start + self.idx, 1)
raise StopIteration
# -- methods
def values(self, values=None):
'get the values of the ival object'
return (self.type, self.start, self.length)
def is_valid(self):
'return True if valid, False otherwise'
if self.type not in self.TYPES:
return False
if self.start < 0 or self.start > 2**32 - 1:
return False
if self.length < 0 or self.length > 2**32 - 1:
return False
return True
def prefix(self):
'return an new IP-typed Ival for this ival'
ival = self.network()
ival.type = Ival.IP
return ival
def network(self):
'return new ival for the first value'
# keeps the prefix (ival) length, only mask start if its IP
# is a no-op for types != 'IP' (!)
mask = 2**32 - self.length
start = self.start & mask if self.type == Ival.IP else self.start
return Ival(self.type, start, self.length)
def broadcast(self):
'return new ival for the last value'
# TODO: Ival('0/0').broadcast() == Ival('255.255.255.255') ??
# should broadcast yield an address/32 or address/pfxlen ??
imask = self.length - 1
start = self.start | imask if self.type == Ival.IP else self.start
return Ival(self.type, start, self.length)
def address(self):
'return new ival with length 1 for start value'
return Ival(self.type, self.start, 1)
def mask(self):
'return the mask as quad dotted string'
if self.type == self.IP:
mask = 2**32 - self.length
d1 = (mask // 2**24) & 0xFF
d2 = (mask // 2**16) & 0xFF
d3 = (mask // 2**8) & 0xFF
d4 = (mask) & 0xFF
return '{}.{}.{}.{}'.format(d1, d2, d3, d4)
raise ValueError('type {!r} not a prefix'.format(self.TYPE[self.type]))
def imask(self):
'return the inverse mask as quad dotted string'
if self.type == self.IP:
imask = self.length - 1
d1 = (imask // 2**24) & 0xFF
d2 = (imask // 2**16) & 0xFF
d3 = (imask // 2**8) & 0xFF
d4 = (imask) & 0xFF
return '{}.{}.{}.{}'.format(d1, d2, d3, d4)
raise ValueError('type {!r} not a prefix'.format(self.TYPE[self.type]))
def is_any(self):
return self.length == 2**32 # any-interval has max length
def port(self):
'return new Ival with type set as PORTSTR'
return Ival(Ival.PORTSTR, self.start, self.length)
def switch(self, ival_type):
'switch Ival.type to ival_type'
if ival_type not in self.TYPES:
raise ValueError('Unknown Ival type {!r}'.format(ival_type))
self.type = ival_type
return self
# -- summarization
def splice(self, ival_type=None):
'return a list of new prefix-like intervals, override type if given'
if ival_type and ival_type not in self.TYPES:
raise ValueError('Unknown Ival type {!r}'.format(ival_type))
rv = []
start, length = self.start, self.length
ival_type = ival_type if ival_type else self.type
maxx = start + length
while start < maxx:
lbit = lowest_bit(start)
hbit = length.bit_length()
maxlen = 2**lbit
newlen = maxlen if length > maxlen else 2**(hbit-1)
rv.append((start, newlen))
start, length = start + newlen, length - newlen
return [Ival(ival_type, x, y) for x, y in rv]
@classmethod
def combine(cls, x, y):
'if possible, return a combined ival, None otherwise'
# border cases
if x is None and y is None:
return None
elif y is None:
return cls(*x.values())
elif x is None:
return cls(*y.values())
elif x.type != y.type:
return None
# x,y two valid Ivals of same type
# - intervals are the same
if x == y:
return cls(*x.values())
# - interval inside the other interval
if x in y:
return cls(*y.values())
if y in x:
return cls(*x.values())
# ensure x starts to the left of y
x, y = (x, y) if x.start <= y.start else (y, x)
# type dependent situations
if x.type == cls.PORTSTR:
# combine adjacent intervals
if x.start + x.length == y.start:
return cls(x.type, x.start, x.length + y.length)
# combine partially overlapping intervals
if x.start + x.length > y.start:
ivlen = max(x.start + x.length, y.start + y.length) - x.start
return cls(x.type, x.start, ivlen)
if x.type == cls.IP:
# pfxs can only be combined if:
# - intervals are adjacent
# - lengths are equal
# - lowest start address does not change with doubling of mask
if x.length == y.length and x.start + x.length == y.start:
# x.start MUST be the network() address of the ival!
if x.start == x.start & (2**32 - 2*x.length):
return cls(x.type, x.start, 2*x.length)
return None # no joy
@classmethod
def summary(cls, ivals):
'summarize a (heterogeneous) list of port/prefix-intervals'
# reverse since this sorts on type, start & length in ascending order
# originals go back on the heap, new ivals go onto rv
heap = list(reversed(sorted(i.network() for i in ivals)))
rv = []
while len(heap):
x = heap.pop()
y = heap.pop() if len(heap) else None
if y:
z = cls.combine(x, y) # z is None if not combined
if z:
heap.append(z) # combined range back on heap
continue # start again
else:
heap.append(y) # push back for later combine attempt
y = rv.pop() if len(rv) else None
if y:
z = cls.combine(x, y) # y is None when x combines x+y
if z:
heap.append(z) # combined range back on heap
else:
rv.append(y) # could not combine, both goto rv and
rv.append(x) # make sure to keep rv ordering intact
else:
rv.append(x)
return [Ival(*i.values()) for i in rv] # ensure new objs are returned
@classmethod
def pfx_summary(cls, ivals):
'summarize the IP-s in ivals, returns only IP-pfxs'
return cls.summary(i for i in ivals if i.type == cls.IP)
@classmethod
def port_summary(cls, ivals):
'summarize the PORTSTR-s in ivals, returns only PORTSTRs'
return cls.summary(i for i in ivals if i.type == cls.PORTSTR)
@classmethod
def portpfx_summary(cls, ivals):
'summarize PORTSTR-s and return them as ip prefixes'
PORTSTR, IP = cls.PORTSTR, cls.IP
portpfxs = [y for x in ivals if x.type==PORTSTR for y in x.splice(IP)]
return cls.summary(portpfxs)
class Ip4FilterError(Exception):
pass
class Ip4Match(object):
__slots__ = 'rule action name object'.split()
def __init__(self, rule, action, name, obj):
self.rule = rule
self.action = action
self.name = name
self.object = obj
class Ip4Filter(object):
'''
A class for ip session lookup's via src, dst & portstring
- action() -> yields associated action or nomatch value
- match() -> yields match dict or nomatch value
- get() -> match dict {
'rule': Matched rule number
'name' : the name of rule (or '')
'action': the rule's action
'object': the rule's python object (or None)
}
'''
def __init__(self, nomatch=None):
self._src = pt.PyTricia() # pfx -> set(rids) - Ival(src ip pfx)
self._dst = pt.PyTricia() # pfx -> set(rids) - Ival(dst ip pfx)
self._srv = pt.PyTricia() # pfx'-> set(rids) - Ival(dport/protocol)
self._act = {} # rid -> action (lower cased)
self._obj = {} # rid -> any python object
self._tag = {} # rid -> name tag of rule if any, else ''
self._nomatch = nomatch # return value when there is no match at all
def __len__(self):
'the number of rules in the filter'
return len(self._act)
def _lines(self, csv=False):
'return filter as lines for printing'
# {rule_id: {src:[..], dst:[..], srv: [..], name: str, action: str, obj: obj}}
rules = sorted(self.as_dict.items()) # rules dict -> ordered [(k,v)]
fields = 'rule name src dst srv action obj'.split()
fmt = '{!s:<5} {!s:<15} {!s:21} {!s:21} {!s:16} {!s:7} {!s}'
fmt = '{},{},{},{},{},{},{}' if csv else fmt
lines = [fmt.format(*fields)] # csv-header of field names
for rid, rule in rules:
maxl = max(len(rule['src']), len(rule['dst']), len(rule['srv']))
for lnr in range(0, maxl):
rid = rid if lnr == 0 else ''
tag = rule['name'] if lnr == 0 else ''
src = rule['src'][lnr] if lnr < len(rule['src']) else ''
dst = rule['dst'][lnr] if lnr < len(rule['dst']) else ''
prt = rule['srv'][lnr] if lnr < len(rule['srv']) else ''
act = rule['action'] if lnr == 0 else ''
obj = json.dumps(rule['obj']) if lnr == 0 else ''
obj = '' if obj in ['null', '""'] else obj
lines.append(fmt.format(rid, tag, src, dst, prt, act, obj))
return lines
def _set_rid(self, rid, tbl, ival):
'set/add to rule-id on single prefix in specific table'
pfx = str(ival)
try:
if tbl.has_key(pfx): # find the exact prefix
tbl[pfx].add(rid) # add to existing prefix
else:
tbl[pfx] = set([rid]) # it's a new prefix
# propagate rid to more specifics
for kid in tbl.children(pfx): # propagate rid to more specifics
tbl[kid].add(rid)
# adopt rid's matched by less specific parent (if any)
parent = tbl.parent(pfx)
if parent:
tbl[pfx] = tbl[pfx].union(tbl[parent])
except ValueError as e:
fmt = 'invalid prefix? {}: {}'
print(fmt.format(pfx, repr(e)), file=sys.stderr)
sys.exit(1)
return self
def _add(self, rid, srcs, dsts, srvs, name='', action='', obj=None):
'add Ivals to a new rule or just add to an existing rule'
for ival in Ival.pfx_summary(srcs):
self._set_rid(rid, self._src, ival)
for ival in Ival.pfx_summary(dsts):
self._set_rid(rid, self._dst, ival)
for ival in Ival.portpfx_summary(srvs):
self._set_rid(rid, self._srv, ival)
# name,action are strings; action always lowercase
name = '' if name is None else str(name).strip()
action = '' if action is None else str(action).strip().lower()
# set attributes if not already present
self._act.setdefault(rid, action)
self._obj.setdefault(rid, obj)
self._tag.setdefault(rid, name)
return self
# -- build methods
@classmethod
def compile(cls, fname):
from . import comp
return comp.compile(fname)
def add(self, rid, srcs, dsts, srvs, action='', name='', obj=None):
'add src-list, dst-list and or list of srvs to a new/old rule'
# sanity check arguments
if not isinstance(rid, int):
raise TypeError('expected an int, not {!r}'.format(rid))
for x in [srcs, dsts, srvs]:
if not isinstance(x, (list, tuple)):
raise TypeError('expected a list, not {!r}'.format(x))
srcs = [Ival.ip_pfx(x) for x in srcs]
dsts = [Ival.ip_pfx(x) for x in dsts]
srvs = [Ival.port_str(x) for x in srvs]
return self._add(rid, srcs, dsts, srvs, name, action, obj)
def ruleset(self, src=None, dst=None, srv=None):
'return the set of rule ids matched by src and/or dst and/or service'
# - finds matching rule sets by prefix lookups per item
# returns the minimum rule nr of intersection
try:
rv = [] # collect required matches
if src is not None:
rv.append(self._src[src])
if dst is not None:
rv.append(self._dst[dst])
if srv is not None:
# encode as pfx to index a PyTricia table
pfx = str(Ival.port_str(srv).switch(Ival.IP))
rv.append(self._srv[pfx])
if len(rv):
return set.intersection(*rv)
return set()
except (KeyError, ValueError):
return set()
except TypeError: # invalid value supplied
print('ruleset type error on', src, dst, srv)
return set()
# -- usage methods
def match(self, src, dst, srv=None):
'return a match object or the nomatch value'
rids = self.ruleset(src, dst, srv)
if len(rids) == 0:
return self._nomatch
rid = min(rids)
return Ip4Match(rid,
self._act.get(rid, None),
self._tag.get(rid, ''),
self._obj.get(rid, None))
# -- to/from CSV
@property
def as_dict(self):
'reconstruct the rules in a dict of dicts'
# {rule nr: {src:[..], dst:[..], srv: [..], action: str, name: str, obj: {..}}}
rules = {}
for pfx in self._src.keys():
for rulenr in self._src[pfx]:
rules.setdefault(rulenr, {}).setdefault('src', []).append(pfx)
try:
for pfx in self._dst.keys():
for rulenr in self._dst[pfx]:
rules[rulenr].setdefault('dst', []).append(pfx)
for pfx in self._srv.keys(): # portstr encoded as a pfx
for rulenr in self._srv[pfx]:
rules[rulenr].setdefault('srv', []).append(pfx)
for rulenr, action in self._act.items():
rules[rulenr]['action'] = action
for rulenr, obj in self._obj.items():
rules[rulenr]['obj'] = obj
for rulenr, name in self._tag.items():
rules[rulenr]['name'] = name
except KeyError as e:
errfmt = 'Error in rule {}:{}'
raise Exception(errfmt.format(rulenr, repr(e)))
for r, rule in rules.items():
# first summarize auto-added more specifics (for set calculations)
rule['src'] = Ival.summary(map(Ival.ip_pfx, rule['src']))
rule['dst'] = Ival.summary(map(Ival.ip_pfx, rule['dst']))
rule['srv'] = Ival.summary(map(Ival.port_pfx, rule['srv']))
# next stringify the ivals
rule['src'] = list(map(str, rule['src']))
rule['dst'] = list(map(str, rule['dst']))
rule['srv'] = list(map(str, rule['srv']))
return rules
def to_csv(self):
'write ruleset to csv-file'
rv = []
for line in self._lines(csv=True):
rv.append(line)
return '\n'.join(rv)
def from_csv(self, text):
'read ruleset from csv-text'
inp = io.StringIO(text + '\n')
try:
df = pd.read_csv(inp, skipinitialspace=True)
except pd.errors.EmptyDataError:
df = pd.DataFrame() # empty dataframe
df.columns = [re.sub(r'(\s|\.)+', '_', n) for n in df.columns]
if len(df.index) == 0:
raise IOError('Ip4Filter cannot read {!r}'.format(fname))
required_columns = 'rule name src dst srv action obj'.split()
missing = [x for x in required_columns if x not in df.columns.values]
if len(missing):
raise ValueError('Ip4Filter is missing columns {}'.format(missing))
try:
df['rule'].fillna(method='ffill', inplace=True)
df.fillna(value='', inplace=True)
df['rule'] = df['rule'].astype(int)
for idx, row in df.iterrows():
rid = row['rule']
srcs = [Ival.ip_pfx(x) for x in row['src'].split()]
dsts = [Ival.ip_pfx(x) for x in row['dst'].split()]
ports = [Ival.port_str(x) for x in row['srv'].split()]
act = row['action']
name = row['name']
obj = json.loads(row['obj']) if len(row['obj']) else ''
self._add(rid, srcs, dsts, ports, name=name, action=act, obj=obj)
except Exception as e:
sys.exit(repr(e))
return self
|
|
"""
Unit tests for the basin hopping global minimization algorithm.
"""
from __future__ import division, print_function, absolute_import
import copy
from numpy.testing import assert_almost_equal, assert_equal, assert_
from pytest import raises as assert_raises
import numpy as np
from numpy import cos, sin
from scipy.optimize import basinhopping, OptimizeResult
from scipy.optimize._basinhopping import (
Storage, RandomDisplacement, Metropolis, AdaptiveStepsize)
def func1d(x):
f = cos(14.5 * x - 0.3) + (x + 0.2) * x
df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2)
return f, df
def func1d_nograd(x):
f = cos(14.5 * x - 0.3) + (x + 0.2) * x
df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2)
return f, df
def func2d_nograd(x):
f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
return f
def func2d(x):
f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
df = np.zeros(2)
df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
df[1] = 2. * x[1] + 0.2
return f, df
def func2d_easyderiv(x):
f = 2.0*x[0]**2 + 2.0*x[0]*x[1] + 2.0*x[1]**2 - 6.0*x[0]
df = np.zeros(2)
df[0] = 4.0*x[0] + 2.0*x[1] - 6.0
df[1] = 2.0*x[0] + 4.0*x[1]
return f, df
class MyTakeStep1(RandomDisplacement):
"""use a copy of displace, but have it set a special parameter to
make sure it's actually being used."""
def __init__(self):
self.been_called = False
super(MyTakeStep1, self).__init__()
def __call__(self, x):
self.been_called = True
return super(MyTakeStep1, self).__call__(x)
def myTakeStep2(x):
"""redo RandomDisplacement in function form without the attribute stepsize
to make sure still everything works ok
"""
s = 0.5
x += np.random.uniform(-s, s, np.shape(x))
return x
class MyAcceptTest(object):
"""pass a custom accept test
This does nothing but make sure it's being used and ensure all the
possible return values are accepted
"""
def __init__(self):
self.been_called = False
self.ncalls = 0
self.testres = [False, 'force accept', True, np.bool_(True),
np.bool_(False), [], {}, 0, 1]
def __call__(self, **kwargs):
self.been_called = True
self.ncalls += 1
if self.ncalls - 1 < len(self.testres):
return self.testres[self.ncalls - 1]
else:
return True
class MyCallBack(object):
"""pass a custom callback function
This makes sure it's being used. It also returns True after 10
steps to ensure that it's stopping early.
"""
def __init__(self):
self.been_called = False
self.ncalls = 0
def __call__(self, x, f, accepted):
self.been_called = True
self.ncalls += 1
if self.ncalls == 10:
return True
class TestBasinHopping(object):
def setup_method(self):
""" Tests setup.
Run tests based on the 1-D and 2-D functions described above.
"""
self.x0 = (1.0, [1.0, 1.0])
self.sol = (-0.195, np.array([-0.195, -0.1]))
self.tol = 3 # number of decimal places
self.niter = 100
self.disp = False
# fix random seed
np.random.seed(1234)
self.kwargs = {"method": "L-BFGS-B", "jac": True}
self.kwargs_nograd = {"method": "L-BFGS-B"}
def test_TypeError(self):
# test the TypeErrors are raised on bad input
i = 1
# if take_step is passed, it must be callable
assert_raises(TypeError, basinhopping, func2d, self.x0[i],
take_step=1)
# if accept_test is passed, it must be callable
assert_raises(TypeError, basinhopping, func2d, self.x0[i],
accept_test=1)
def test_1d_grad(self):
# test 1d minimizations with gradient
i = 0
res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_2d(self):
# test 2d minimizations with gradient
i = 1
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
assert_(res.nfev > 0)
def test_njev(self):
# test njev is returned correctly
i = 1
minimizer_kwargs = self.kwargs.copy()
# L-BFGS-B doesn't use njev, but BFGS does
minimizer_kwargs["method"] = "BFGS"
res = basinhopping(func2d, self.x0[i],
minimizer_kwargs=minimizer_kwargs, niter=self.niter,
disp=self.disp)
assert_(res.nfev > 0)
assert_equal(res.nfev, res.njev)
def test_jac(self):
# test jacobian returned
minimizer_kwargs = self.kwargs.copy()
# BFGS returns a Jacobian
minimizer_kwargs["method"] = "BFGS"
res = basinhopping(func2d_easyderiv, [0.0, 0.0],
minimizer_kwargs=minimizer_kwargs, niter=self.niter,
disp=self.disp)
assert_(hasattr(res.lowest_optimization_result, "jac"))
#in this case, the jacobian is just [df/dx, df/dy]
_, jacobian = func2d_easyderiv(res.x)
assert_almost_equal(res.lowest_optimization_result.jac, jacobian, self.tol)
def test_2d_nograd(self):
# test 2d minimizations without gradient
i = 1
res = basinhopping(func2d_nograd, self.x0[i],
minimizer_kwargs=self.kwargs_nograd,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_all_minimizers(self):
# test 2d minimizations with gradient. Nelder-Mead, Powell and COBYLA
# don't accept jac=True, so aren't included here.
i = 1
methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP']
minimizer_kwargs = copy.copy(self.kwargs)
for method in methods:
minimizer_kwargs["method"] = method
res = basinhopping(func2d, self.x0[i],
minimizer_kwargs=minimizer_kwargs,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_all_nograd_minimizers(self):
# test 2d minimizations without gradient. Newton-CG requires jac=True,
# so not included here.
i = 1
methods = ['CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP',
'Nelder-Mead', 'Powell', 'COBYLA']
minimizer_kwargs = copy.copy(self.kwargs_nograd)
for method in methods:
minimizer_kwargs["method"] = method
res = basinhopping(func2d_nograd, self.x0[i],
minimizer_kwargs=minimizer_kwargs,
niter=self.niter, disp=self.disp)
tol = self.tol
if method == 'COBYLA':
tol = 2
assert_almost_equal(res.x, self.sol[i], decimal=tol)
def test_pass_takestep(self):
# test that passing a custom takestep works
# also test that the stepsize is being adjusted
takestep = MyTakeStep1()
initial_step_size = takestep.stepsize
i = 1
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp,
take_step=takestep)
assert_almost_equal(res.x, self.sol[i], self.tol)
assert_(takestep.been_called)
# make sure that the built in adaptive step size has been used
assert_(initial_step_size != takestep.stepsize)
def test_pass_simple_takestep(self):
# test that passing a custom takestep without attribute stepsize
takestep = myTakeStep2
i = 1
res = basinhopping(func2d_nograd, self.x0[i],
minimizer_kwargs=self.kwargs_nograd,
niter=self.niter, disp=self.disp,
take_step=takestep)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_pass_accept_test(self):
# test passing a custom accept test
# makes sure it's being used and ensures all the possible return values
# are accepted.
accept_test = MyAcceptTest()
i = 1
# there's no point in running it more than a few steps.
basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=10, disp=self.disp, accept_test=accept_test)
assert_(accept_test.been_called)
def test_pass_callback(self):
# test passing a custom callback function
# This makes sure it's being used. It also returns True after 10 steps
# to ensure that it's stopping early.
callback = MyCallBack()
i = 1
# there's no point in running it more than a few steps.
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=30, disp=self.disp, callback=callback)
assert_(callback.been_called)
assert_("callback" in res.message[0])
assert_equal(res.nit, 10)
def test_minimizer_fail(self):
# test if a minimizer fails
i = 1
self.kwargs["options"] = dict(maxiter=0)
self.niter = 10
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp)
# the number of failed minimizations should be the number of
# iterations + 1
assert_equal(res.nit + 1, res.minimization_failures)
def test_niter_zero(self):
# gh5915, what happens if you call basinhopping with niter=0
i = 0
res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=0, disp=self.disp)
def test_seed_reproducibility(self):
# seed should ensure reproducibility between runs
minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
f_1 = []
def callback(x, f, accepted):
f_1.append(f)
basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
niter=10, callback=callback, seed=10)
f_2 = []
def callback2(x, f, accepted):
f_2.append(f)
basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
niter=10, callback=callback2, seed=10)
assert_equal(np.array(f_1), np.array(f_2))
class Test_Storage(object):
def setup_method(self):
self.x0 = np.array(1)
self.f0 = 0
minres = OptimizeResult()
minres.x = self.x0
minres.fun = self.f0
self.storage = Storage(minres)
def test_higher_f_rejected(self):
new_minres = OptimizeResult()
new_minres.x = self.x0 + 1
new_minres.fun = self.f0 + 1
ret = self.storage.update(new_minres)
minres = self.storage.get_lowest()
assert_equal(self.x0, minres.x)
assert_equal(self.f0, minres.fun)
assert_(not ret)
def test_lower_f_accepted(self):
new_minres = OptimizeResult()
new_minres.x = self.x0 + 1
new_minres.fun = self.f0 - 1
ret = self.storage.update(new_minres)
minres = self.storage.get_lowest()
assert_(self.x0 != minres.x)
assert_(self.f0 != minres.fun)
assert_(ret)
class Test_RandomDisplacement(object):
def setup_method(self):
self.stepsize = 1.0
self.displace = RandomDisplacement(stepsize=self.stepsize)
self.N = 300000
self.x0 = np.zeros([self.N])
def test_random(self):
# the mean should be 0
# the variance should be (2*stepsize)**2 / 12
# note these tests are random, they will fail from time to time
x = self.displace(self.x0)
v = (2. * self.stepsize) ** 2 / 12
assert_almost_equal(np.mean(x), 0., 1)
assert_almost_equal(np.var(x), v, 1)
class Test_Metropolis(object):
def setup_method(self):
self.T = 2.
self.met = Metropolis(self.T)
def test_boolean_return(self):
# the return must be a bool. else an error will be raised in
# basinhopping
ret = self.met(f_new=0., f_old=1.)
assert isinstance(ret, bool)
def test_lower_f_accepted(self):
assert_(self.met(f_new=0., f_old=1.))
def test_KeyError(self):
# should raise KeyError if kwargs f_old or f_new is not passed
assert_raises(KeyError, self.met, f_old=1.)
assert_raises(KeyError, self.met, f_new=1.)
def test_accept(self):
# test that steps are randomly accepted for f_new > f_old
one_accept = False
one_reject = False
for i in range(1000):
if one_accept and one_reject:
break
ret = self.met(f_new=1., f_old=0.5)
if ret:
one_accept = True
else:
one_reject = True
assert_(one_accept)
assert_(one_reject)
def test_GH7495(self):
# an overflow in exp was producing a RuntimeWarning
# create own object here in case someone changes self.T
met = Metropolis(2)
with np.errstate(over='raise'):
met.accept_reject(0, 2000)
class Test_AdaptiveStepsize(object):
def setup_method(self):
self.stepsize = 1.
self.ts = RandomDisplacement(stepsize=self.stepsize)
self.target_accept_rate = 0.5
self.takestep = AdaptiveStepsize(takestep=self.ts, verbose=False,
accept_rate=self.target_accept_rate)
def test_adaptive_increase(self):
# if few steps are rejected, the stepsize should increase
x = 0.
self.takestep(x)
self.takestep.report(False)
for i in range(self.takestep.interval):
self.takestep(x)
self.takestep.report(True)
assert_(self.ts.stepsize > self.stepsize)
def test_adaptive_decrease(self):
# if few steps are rejected, the stepsize should increase
x = 0.
self.takestep(x)
self.takestep.report(True)
for i in range(self.takestep.interval):
self.takestep(x)
self.takestep.report(False)
assert_(self.ts.stepsize < self.stepsize)
def test_all_accepted(self):
# test that everything works OK if all steps were accepted
x = 0.
for i in range(self.takestep.interval + 1):
self.takestep(x)
self.takestep.report(True)
assert_(self.ts.stepsize > self.stepsize)
def test_all_rejected(self):
# test that everything works OK if all steps were rejected
x = 0.
for i in range(self.takestep.interval + 1):
self.takestep(x)
self.takestep.report(False)
assert_(self.ts.stepsize < self.stepsize)
|
|
import base64
import hashlib
import json
import os
import re
import smtplib
import sys
import urllib
from django.core.context_processors import csrf
from django.core.validators import validate_email
from django.db.utils import IntegrityError
from django.http import *
from django.shortcuts import render_to_response
from django.utils.http import urlquote_plus
from django.views.decorators.csrf import csrf_exempt
from multiprocessing import Pool
from browser.utils import *
from core.db.manager import DataHubManager
from inventory.models import *
p = os.path.abspath(os.path.dirname(__file__))
'''
@author: Anant Bhardwaj
@date: Feb 12, 2012
'''
kEmail = "SESSION_EMAIL"
kUsername = "SESSION_USERNAME"
# for async calls
pool = Pool(processes=1)
'''
LOGIN/REGISTER/RESET
'''
def is_valid_username (username):
try:
if len(username) >3 and re.match(r'\w+', username).group() == username:
return True
except:
pass
return False
def login_required (f):
def wrap (request, *args, **kwargs):
if kEmail not in request.session.keys():
redirect_url = urlquote_plus(request.get_full_path())
return HttpResponseRedirect("/account/login?redirect_url=%s" %(redirect_url))
return f(request, *args, **kwargs)
wrap.__doc__ = f.__doc__
wrap.__name__ = f.__name__
return wrap
def login_form (request, redirect_url='/', errors=[]):
c = {'redirect_url':redirect_url, 'errors':errors, 'values':request.GET}
c.update(csrf(request))
return render_to_response('login.html', c)
def register_form (request, redirect_url='/', errors=[]):
c = {'redirect_url':redirect_url, 'errors':errors, 'values':request.GET}
c.update(csrf(request))
return render_to_response('register.html', c)
def login (request):
redirect_url = '/'
if('redirect_url' in request.GET.keys()):
redirect_url = urllib.unquote_plus(request.GET['redirect_url'])
if not redirect_url or redirect_url == '':
redirect_url = '/'
if request.method == "POST":
errors = []
login_email = ''
if('redirect_url' in request.POST.keys()):
redirect_url = urllib.unquote_plus(request.POST['redirect_url'])
email = None
try:
login_id = request.POST["login_id"].lower()
login_password = hashlib.sha1(request.POST["login_password"]).hexdigest()
# find the user email in the username, if it's there.
try:
validate_email(login_id.lower().strip())
email = login_id.lower().strip()
except:
pass
user = None
if email:
user = User.objects.get(email=login_id, password=login_password)
else:
user = User.objects.get(username=login_id, password=login_password)
clear_session(request)
request.session[kEmail] = user.email
request.session[kUsername] = user.username
redirect_url = redirect_url + urllib.unquote_plus('?auth_user=%s' %(user.username))
return HttpResponseRedirect(redirect_url)
except User.DoesNotExist:
try:
if email:
User.objects.get(email=login_id)
else:
User.objects.get(username=login_id)
errors.append(
'Wrong password. Please try again.<br /><br />'
'<a class="blue bold" href="/account/forgot">Click Here</a> '
'to reset your password.')
except User.DoesNotExist:
errors.append(
'Could not find any account associated with login_id: '
'%s.<br /><br /><a class="blue bold" '
'href="/account/register?redirect_url=%s">Click Here</a> '
'to create an account.' %(login_id,
urllib.quote_plus(redirect_url)))
return login_form(
request, redirect_url = urllib.quote_plus(redirect_url),
errors = errors)
except:
errors.append('Login failed.')
return login_form(
request, redirect_url = urllib.quote_plus(redirect_url),
errors = errors)
else:
try:
if request.session[kUsername]:
redirect_url = redirect_url + urllib.unquote_plus('?auth_user=%s' %(request.session[kUsername]))
return HttpResponseRedirect(redirect_url)
else:
return login_form(request, urllib.quote_plus(redirect_url))
except:
return login_form(request, urllib.quote_plus(redirect_url))
def register (request):
redirect_url = '/'
if('redirect_url' in request.GET.keys()):
redirect_url = urllib.unquote_plus(request.GET['redirect_url'])
if request.method == "POST":
errors = []
email = ''
try:
error = False
if('redirect_url' in request.POST.keys()):
redirect_url = urllib.unquote_plus(request.POST['redirect_url'])
username = request.POST["username"].lower()
email = request.POST["email"].lower()
password = request.POST["password"]
try:
validate_email(email.strip())
except:
errors.append("Invalid Email.")
error = True
if(not is_valid_username(username)):
errors.append("Invalid Username.")
error = True
if(password == ""):
errors.append("Empty Password.")
error = True
try:
user = User.objects.get(username=username)
errors.append("Username already taken.")
error = True
except User.DoesNotExist:
pass
if not error:
hashed_password = hashlib.sha1(password).hexdigest()
try:
DataHubManager.create_user(username=username, password=hashed_password)
except Exception, e:
print e
pass
try:
DataHubManager.change_password(username=username, password=hashed_password)
except Exception, e:
errors.append(str(e))
error = True
if(error):
return register_form(request, redirect_url = urllib.quote_plus(redirect_url), errors = errors)
user = User(username=username, email=email, password=hashed_password)
user.save()
clear_session(request)
request.session[kEmail] = user.email
request.session[kUsername] = user.username
encrypted_email = encrypt_text(user.email)
subject = "Welcome to DataHub"
msg_body = '''
Dear %s,
Thanks for registering to DataHub.
Please click the link below to start using DataHub:
%s://%s/account/verify/%s
''' % (
user.email,
'https' if request.is_secure() else 'http',
request.get_host(),
encrypted_email)
pool.apply_async(send_email, [user.email, subject, msg_body])
redirect_url = redirect_url + urllib.unquote_plus('?auth_user=%s' %(user.username))
return HttpResponseRedirect(redirect_url)
except IntegrityError:
errors.append(
'Account with the email address <a href="mailto:%s">%s</a> already exists.<br /> <br />Please <a class="blue bold" href="/account/login?login_email=%s">Sign In</a>.'
% (email, email, urllib.quote_plus(email)))
return register_form(request, redirect_url = urllib.quote_plus(redirect_url), errors = errors)
except Exception, e:
errors.append("Error %s." %(str(e)))
return register_form(request, redirect_url = urllib.quote_plus(redirect_url), errors = errors)
else:
return register_form(request, redirect_url = urllib.quote_plus(redirect_url))
def clear_session (request):
request.session.flush()
if kEmail in request.session.keys():
del request.session[kEmail]
if kUsername in request.session.keys():
del request.session[kUsername]
def logout (request):
clear_session(request)
c = {
'msg_title': 'Thank you for using DataHub!',
'msg_body': 'Your have been logged out.<br /><br /><a href="/account/login">Click Here</a> to sign in again.'
}
c.update(csrf(request))
return render_to_response('confirmation.html', c)
def forgot (request):
if request.method == "POST":
errors = []
try:
user_email = request.POST["email"].lower()
user = User.objects.get(email=user_email)
encrypted_email = encrypt_text(user_email)
subject = "DataHub Password Reset"
msg_body = '''
Dear %s,
Please click the link below to reset your DataHub password:
%s://%s/account/reset/%s
''' % (
user.email,
'https' if request.is_secure() else 'http',
request.get_host(),
encrypted_email)
pool.apply_async(send_email, [user_email, subject, msg_body])
c = {
'msg_title': 'DataHub Reset Password',
'msg_body': 'A link to reset your password has been sent to your email address.'
}
c.update(csrf(request))
return render_to_response('confirmation.html', c)
except User.DoesNotExist:
errors.append(
"Invalid Email Address.")
except Exception, e:
errors.append(
'Error: %s.'
'Please try again or send an email to '
'<a href="mailto:[email protected]">[email protected]</a>.' %(str(e)))
c = {'errors': errors, 'values': request.POST}
c.update(csrf(request))
return render_to_response('forgot.html', c)
else:
c = {'values': request.GET}
c.update(csrf(request))
return render_to_response('forgot.html', c)
def verify (request, encrypted_email):
errors = []
c = {'msg_title': 'DataHub Account Verification'}
try:
user_email = decrypt_text(encrypted_email)
user = User.objects.get(email=user_email)
c.update({
'msg_body': 'Thanks for verifying your email address!<br /> <br /><a href="/">Click Here</a> to start using DataHub.'
})
clear_session(request)
request.session[kEmail] = user.email
request.session[kUsername] = user.username
except:
errors.append(
'Wrong verify code in the URL. '
'Please try again or send an email to '
'<a href="mailto:[email protected]">[email protected]</a>')
c.update({'errors': errors})
c.update(csrf(request))
return render_to_response('confirmation.html', c)
def reset (request, encrypted_email):
errors = []
error = False
if request.method == "POST":
try:
user_email = request.POST["user_email"].lower()
password = request.POST["new_password"]
password2 = request.POST["new_password2"]
if password == "":
errors.append("Empty Password.")
error = True
if password2 != password:
errors.append("Password and Confirm Password don't match.")
error = True
if not error:
hashed_password = hashlib.sha1(password).hexdigest()
user = User.objects.get(email=user_email)
try:
DataHubManager.create_user(username=user.username, password=hashed_password)
except Exception, e:
pass
try:
DataHubManager.change_password(username=user.username, password=hashed_password)
except Exception, e:
errors.append(str(e))
error = True
if error:
c = {
'user_email': user_email,
'encrypted_email': encrypted_email,
'errors': errors
}
c.update(csrf(request))
return render_to_response('reset.html', c)
else:
hashed_password = hashlib.sha1(password).hexdigest()
user = User.objects.get(email=user_email)
user.password = hashed_password
user.save()
c = {
'msg_title': 'DataHub Reset Password',
'msg_body': 'Your password has been changed successfully.<br /> <br />'
'<a href="/account/login" class="blue bold">Click Here</a>'
' to sign in.'
}
c.update(csrf(request))
return render_to_response('confirmation.html', c)
except:
errors.append(
'Some unknown error happened. '
'Please try again or send an email to '
'<a href="mailto:[email protected]">[email protected]</a>')
c = {'errors': errors}
c.update(csrf(request))
return render_to_response('reset.html', c)
else:
try:
user_email = decrypt_text(encrypted_email)
User.objects.get(email=user_email)
c = {
'user_email': user_email,
'encrypted_email': encrypted_email
}
c.update(csrf(request))
return render_to_response('reset.html', c)
except:
errors.append(
'Wrong reset code in the URL. '
'Please try again or send an email to '
'<a href="mailto:[email protected]">[email protected]</a>')
c = {'msg_title': 'DataHub Reset Password', 'errors': errors}
c.update(csrf(request))
return render_to_response('confirmation.html', c)
def get_login(request):
login = None
try:
login = request.session[kUsername]
except:
pass
return login
@login_required
def jdbc_password(request):
# this is not safe. Will be fixed using OIDC connect - ARC 2015-07-06
login = request.session[kUsername]
user = User.objects.get(username=login)
return HttpResponse(user.password)
|
|
import os
import sys
import time
import logging
import signal
import tempfile
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler, FileSystemEventHandler
from django import conf
from django.conf import settings
from django.utils.module_loading import import_module
from django.db.models import get_models, get_app
import subprocess
try:
FONTFORGE_EXISTS = True
from libs import original_fontforge as fontforge
except:
FONTFORGE_EXISTS = False
DEFAULT_FONT_FORMATS = ('ttf', 'otf', 'eot', 'svg', 'woff')
class EventHandler(FileSystemEventHandler):
def __init__(self, watcher, *args, **kwargs):
self.watcher = watcher
super(EventHandler, self).__init__( *args, **kwargs)
def on_any_event(self, event):
self.watcher.process_changes(event)
class Watcher(object):
handler = None
command = None
blocked = False
stout_prefix = 'fontforge'
configs = []
def __init__(self, command=None, *args, **kwargs):
#self.handler = WatcherHandler(self)
self.command = command
self.observer = Observer()
self.event_handler = EventHandler(self)
# self.notifier.max_user_watches=16384
self.process_settings()
paths = self.get_watched_paths()
for appname, path in paths:
try:
self.observer.schedule(self.event_handler, path, recursive=True)
self.print_head('Watching \033[94m%s\033[0m' % (appname))
except Exception, e:
self.print_error('Watching %s error : %s' % (appname, str(e)))
def process_changes(self, event):
if event.src_path.endswith('.sfd'):
if FONTFORGE_EXISTS:
from subprocess import call
call(["python", "manage.py", "fontforge_generate"])
# self.generate_font()
else:
self.print_error("Python bindings for fontforge are not installed (try sudo apt-get install python-fontforge)")
def process_settings(self):
reload(conf)
reload(fontforge)
self.configs = []
settings = conf.settings
if not hasattr(settings, 'FONTFORGE_WATCHER') and 'watcher' in settings.FONTFORGE_WATCHER:
self.print_error('settings.FONTFORGE_WATCHER is missing ')
else:
configs = settings.FONTFORGE_WATCHER
for config in configs:
try:
source = config[0]
folder_output = config[1]
css_output = config[2] if len(config) >= 3 else None
classname = config[3] if len(config) >= 4 else None
content = None
if not os.path.isfile(source):
source = os.path.join(dirname(settings.DJANGO_ROOT), config[0])
if not os.path.isfile(source):
self.print_error('Source is missing "%s"' % source)
source = None
if source:
f = open(source, 'r')
content = f.read()
f.close()
if not os.path.isdir(folder_output):
folder_output = os.path.join(dirname(settings.DJANGO_ROOT), folder_output)
if not os.path.isdir(folder_output):
self.print_error('Folder output is missing "%s"' % folder_output)
folder_output = None
css_output_dir = os.path.dirname(css_output)
if not os.path.isdir(css_output_dir):
css_output_dir = os.path.join(dirname(settings.DJANGO_ROOT), css_output_dir)
css_output = os.path.join(dirname(settings.DJANGO_ROOT), css_output)
if not os.path.isdir(css_output_dir):
self.print_error('CSS output folder is missing "%s"' % css_output)
css_output = None
if source and folder_output:
self.configs.append([source, folder_output, css_output, classname, content])
except Exception, e:
self.print_error('Invalid config for fontforge watcher "%s"' % str(e))
def generate(self, compress=True):
self.generate_font(compress=compress)
def generate_font(self, compress=True):
self.process_settings()
for config in self.configs:
f = open(config[0], 'r')
content = f.read()
f.close()
if True:#content != config[4]:
self.print_head('Changes detected (%s)' % config[0])
config[4] = content
try:
source = config[0]
name = os.path.basename(source).split('.')[0]
folder_output = os.path.join(config[1], name)
css_output = config[2]
classname = config[3]
font = fontforge.open(source)
if css_output and classname:
css = """
@font-face {
font-family: '%(font_name)s';
src: url('../fonts/%(font_name)s/%(font_name)s.eot');
src: url('../fonts/%(font_name)s/%(font_name)s.eot?#iefix') format('eot'),
url('../fonts/%(font_name)s/%(font_name)s.woff') format('woff'),
url('../fonts/%(font_name)s/%(font_name)s.svg#%(font_name)s') format('svg'),
url('../fonts/%(font_name)s/%(font_name)s.ttf') format('truetype');
font-style: normal;
font-weight: normal;
}
.%(font_classname)s {
position: relative;
display: inline-block;
top: 1px;
font-family: '%(font_name)s';
font-style: normal;
font-weight: normal;
line-height: 1;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
""" % {
'font_name' : name,
'font_classname' : classname
}
for glyph in font.glyphs():
if not glyph.glyphname.startswith('uni'):
print glyph.glyphname
css += """.%(font_classname)s-%(glyph_name)s:before { content: "\\%(glyph_unicode)s"; }\n""" % {
'font_classname' : classname,
'glyph_name' : glyph.glyphname,
'glyph_unicode' : "%04X" % (glyph.unicode)
}
for format in DEFAULT_FONT_FORMATS:
folder = os.path.join(folder_output, name)
filename = "%s.%s" % (folder, format)
self.print_process('Compiling %s' % filename)
if not os.path.exists(folder):
os.makedirs(folder)
font.generate(filename)
self.print_success('Done')
self.print_process('Pushing font css glyphs into %s' % css_output)
try:
os.remove(css_output)
except:
pass
css_file = open("%s" % css_output, "w+")
css_file.write(css)
css_file.close()
self.print_success("Done (%s chars)." % len(css))
except Exception, e:
self.print_error('Error during font generation : %s' % (str(e)))
else:
self.print_head("No changes")
def get_watched_paths(self):
app_paths = []
for config in self.configs:
source_dir = os.path.abspath(os.path.dirname(config[0]))
app_paths.append(
(config[0], source_dir)
)
return app_paths
def sigterm(self, signum, frame):
self.observer.stop()
self.observer.join()
exit(0)
def watch(self, paths=[]):
signal.signal(signal.SIGTERM, self.sigterm)
signal.signal(signal.SIGINT , self.sigterm)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
self.observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
self.observer.stop()
self.observer.join()
def print_r(self, pattern, str):
output = pattern % (self.stout_prefix, str)
if self.command:
self.command.stdout.write(output)
self.command.stdout.flush()
else:
print output
def print_head(self, str):
self.print_r("\033[95m[%s]\033[0m %s", str)
def print_process(self, str):
self.print_r("\033[95m[%s]\033[0m \033[93m%s\033[0m", str)
def print_success(self, str):
self.print_r("\033[95m[%s]\033[0m \033[92m%s\033[0m", str)
def print_error(self, str):
self.print_r("\033[95m[%s]\033[0m \033[91m%s\033[0m", str)
|
|
'''analyze WORKING/samples2/train.csv (the training data) to select cities used to build local models
Summarize samples2/train.csv, select 12 cities
and use the summary to create a table of cities ordered by number of trades
INVOCATION
python select-cities2.py n_cities [--test] [--trace]
INPUTS
WORKING/samples2/train.csv
OUTPUTS
WORKING/select-cities2/city-medianprice-ntrades.csv
WORKING/select-cities2/city-medianprice-ntrades-all.txt
WORKING/select-cities2/city-medianprice-ntrades-selected.txt
WORKING/select-cities2/0log.txt
'''
from __future__ import division
import argparse
import collections
import json
import numpy as np
import os
import pandas as pd
import pdb
from pprint import pprint
import random
import sys
import arg_type
from Bunch import Bunch
import columns_table
from ColumnsTable import ColumnsTable
import dirutility
import layout_transactions
from Logger import Logger
from Path import Path
from Report import Report
from Timer import Timer
def make_control(argv):
'return a Bunch'
print argv
parser = argparse.ArgumentParser()
parser.add_argument('--test', action='store_true')
parser.add_argument('--trace', action='store_true')
arg = parser.parse_args(argv)
arg.me = parser.prog.split('.')[0]
if arg.trace:
pdb.set_trace()
random_seed = 123
random.seed(random_seed)
np.random.seed(random_seed)
dir_working = Path().dir_working()
dir_out = os.path.join(dir_working, arg.me + ('-test' if arg.test else ''))
dirutility.assure_exists(dir_out)
base = 'city_medianprice_ntrades'
base_all = base + '_all'
base_selected = base + '_selected'
return Bunch(
arg=arg,
path_in_column_defs=os.path.join('column_defs.json'),
path_in_samples=os.path.join(dir_working, 'samples2', 'train.csv'),
path_out_csv_all=os.path.join(dir_out, base_all + '.csv'),
path_out_csv_selected=os.path.join(dir_out, base_selected + '.csv'),
path_out_report_all=os.path.join(dir_out, base_all + '.txt'),
path_out_report_selected=os.path.join(dir_out, base_selected + '.txt'),
path_out_log=os.path.join(dir_out, '0log.txt'),
random_seed=random_seed,
timer=Timer(),
)
def etl(path_in, nrows, test):
'return DataFrames with columns city, median price, n trades: all and those selected'
'''return (median_price OrderedDict[city] float, n_cities OrderedDict[city] float)'''
city_column = layout_transactions.city
price_column = layout_transactions.price
extracted = pd.read_csv(
path_in,
nrows=nrows,
usecols=[city_column, price_column],
low_memory=False
)
print 'read %d samples from file %s' % (len(extracted), path_in)
# build columns for the DataFrame result
distinct_cities = set(extracted[city_column])
selected_n_trades = (
277, 296, 303, 351, # about half the median
638, 640, 642, 660, # about the median number of trades (median is 641)
4480, 5613, 10610, 22303, # largest number of trades
)
cities = []
median_prices = np.empty(len(distinct_cities))
n_trades = np.empty(len(distinct_cities))
selecteds = []
for i, city in enumerate(distinct_cities):
mask = extracted[city_column] == city
in_city = extracted.loc[mask]
assert len(in_city) > 0, city
cities.append(city)
median_prices[i] = in_city.median()
n_trades[i] = len(in_city)
selecteds.append(True if test else len(in_city) in selected_n_trades)
# check that the counting by city is reasonable
print 'sorted(n_trades)'
print sorted(n_trades)
print 'median', np.median(n_trades)
if not test:
assert sum(n_trades) == len(extracted)
for selected_n_trade in selected_n_trades:
assert selected_n_trade in n_trades, selected_n_trade
result_all = pd.DataFrame(
data={
'city': cities,
'median_price': median_prices,
'n_trades': n_trades,
'selected': selecteds,
},
index=cities,
)
result_selected = result_all.loc[result_all.selected]
result_all_sorted = result_all.sort_values('n_trades')
result_selected_sorted = result_selected.sort_values('n_trades')
print result_selected_sorted
return result_all_sorted, result_selected_sorted
def do_work(control):
'create csv file that summarizes all actual and predicted prices'
def make_indices(ordered_dict):
'return OrderedDict[key] <index relative to median value of ordered_dict>'
values = np.empty(len(ordered_dict), dtype=float)
for i, value in enumerate(ordered_dict.values()):
values[i] = value
median_value = np.median(values)
result = collections.OrderedDict()
for k, v in ordered_dict.iteritems():
result[k] = v / median_value
return result, median_value
df_all, df_selected = etl(
control.path_in_samples,
10 if control.arg.test else None,
control.arg.test,
)
df_all.to_csv(control.path_out_csv_all)
df_selected.to_csv(control.path_out_csv_selected)
with open(control.path_in_column_defs, 'r') as f:
column_defs = json.load(f)
pprint(column_defs)
def make_generate_data(df):
'yield each input deail line as a dict-like object'
def generate_data():
for i, row in df.iterrows():
yield row
return generate_data
def create_and_write(df, path, header_lines, selected_columns):
'create report and write it'
lines = columns_table.columns_table(
make_generate_data(df)(),
selected_columns,
column_defs,
header_lines,
)
with open(path, 'w') as f:
for line in lines:
f.write(line)
create_and_write(
df_all,
control.path_out_report_all,
['Count of Trades in All Cities', 'Ordered by Count of Number of Trades'],
['city', 'median_price', 'n_trades', 'selected'],
)
create_and_write(
df_selected,
control.path_out_report_selected,
['Count of Trades in Selected Cities', 'Ordered by Count of Number of Trades'],
['city', 'median_price', 'n_trades'],
)
def main(argv):
control = make_control(argv)
sys.stdout = Logger(control.path_out_log) # now print statements also write to the log file
print control
lap = control.timer.lap
do_work(control)
lap('work completed')
if control.arg.test:
print 'DISCARD OUTPUT: test'
print control
print 'done'
return
if __name__ == '__main__':
if False:
# avoid pyflakes warnings
pdb.set_trace()
pprint()
main(sys.argv[1:])
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
"""
(c) 2014 Ronan Delacroix
Doremi DCP2000 CLI Only Utility - Main File
:author: Ronan Delacroix
"""
import sys
import os
import cmd
import shlex
import tbx.text
from . import server as server
from . import requests
import six
class CLI(cmd.Cmd, object):
### Generic
prompt = 'DoremiAPICLI> '
intro = '\n<< Welcome to Doremi API CLI >>\n'
doc_leader = '\n<<Doremi API CLI Help Section>>\n'
def __init__(self, address, port, debug=False, format='text'):
"""
Constructor
"""
#hack : for windows 7 usage, need to set show-all-if-ambiguous to on for readline
import readline
import rlcompleter
readline.parse_and_bind("set show-all-if-ambiguous on")
if six.PY3:
import importlib
importlib.reload(sys)
else:
reload(sys) ## So as to enable setdefaultencoding
if 'libedit' in readline.__doc__:
readline.parse_and_bind("bind ^I rl_complete")
readline.parse_and_bind("bind '\t' rl_complete")
else:
readline.parse_and_bind("tab: complete")
#
self.address = address
self.port = port
self.debug = debug
self.client = None
self.format = format
super(CLI, self).__init__(completekey='tab')
def preloop(self):
"""
At launch...
"""
print("Connection...")
try:
self.client = server.DoremiServer(self.address, port=self.port, debug=self.debug)
except:
print("Connection to %s:%s failed." % (self.address, self.port))
self.do_exit('')
exit(1)
print("Connected to Doremi DCP2000 server on %s:%s" % (self.address, self.port))
super(CLI, self).preloop()
def postloop(self):
"""
End...
"""
print('\nGoodbye!\n')
super(CLI, self).postloop()
### Commands specifics
def do_help(self, arg):
"""
Dirty code : this function is copied from cmd.py...
But I added some magic to have the doc of the API.
"""
if arg:
# XXX check arg syntax
if requests.get(arg):
self.stdout.write("\n%s Help : \n" % str(arg))
self.stdout.write("\tSummary :\n\t\t%s\n\n" % requests.get(arg).name)
self.stdout.write("\tParameters :\n\t\t%s\n\n" % (str(', '.join(requests.get(arg).element_names))))
return
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n" % str(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n" % str(self.nohelp % (arg,)))
return
func()
else:
names = [n.strip() for n in self.get_names()]
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]] = 1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd = name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif requests.get(name[3:]):
cmds_doc.append(cmd)
elif hasattr(self, name):
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
self.stdout.write("%s\n" % str(self.doc_leader))
self.print_topics(self.doc_header, cmds_doc, 15, 80)
self.print_topics(self.misc_header, help.keys(), 15, 80)
self.print_topics(self.undoc_header, cmds_undoc, 15, 80)
def get_names(self):
"""
Return all the function names of the current class + the API function names.
"""
names = super(CLI, self).get_names()
names.extend([str("do_" + c) for c in requests.list_names()])
return ["%s " % name for name in names]
def call_api(self, command, args=[]):
"""
Call an API command
"""
try:
return self.client.command(command, *args)
except Exception as e:
print("Wrong parameters : %s" % e)
except Exception as e:
print("ERROR : %s" % e)
return
def default(self, line):
"""
When a command/do_function does not exists in that class, try to see if it exists in the API and if yes, calls it.
"""
cmd, args, line = self.parseline(line)
if requests.get(cmd):
args = shlex.split(args)
result = self.call_api(cmd, args)
if result:
print("\nResults : \n")
result = tbx.text.pretty_render(result, format=self.format, indent=1)
print(result)
return
return super(CLI, self).default(line)
def completedefault(self, text, line, begidx, endidx):
"""Method called to complete an input line when no command-specific
complete_*() method is available.
By default, it returns an empty list.
"""
command = shlex.split(line)[0].replace('\x00', '').strip()
list_param = []
if requests.get_by_name(command):
c = requests.get_by_name(command)
list_param = c.element_names
arg_list = shlex.split(line)[1:]
list_param = list_param[len(arg_list):]
if len(list_param) > 0:
print("\n\tMissing : " +' '.join(['<'+k+'>' for k in list_param]),)
return ['']
### Shell access
def do_shell(self, s):
"""
Allows to call shell commands
"""
os.system(s)
def help_shell(self):
"""
Help on shell commands.
"""
print("Execute shell commands")
### Exiting CLI
def do_exit(self, s):
"""
Exit
"""
print("Exiting Doremi API CLI.")
return True
def help_exit(self):
print("Exiting Doremi API CLI.")
print("You can also use the Ctrl-D shortcut.")
do_quit = do_exit
help_quit = help_exit
do_EOF = do_exit
help_EOF = help_exit
|
|
# -*- coding: utf-8 -*-
import os
import json
import errno
import pickle
import gzip
import numpy as np
import networkx as nx
import cv2
from opensfm import io
from opensfm import config
from opensfm import context
class DataSet:
"""
Dataset representing directory with images, extracted , feature descriptors (SURF, SIFT), etc.
Methods to retrieve *base directory* for data file(s) have suffix ``_path``, methods to retrieve path of specified
data file have suffix ``_file``.
"""
def __init__(self, data_path):
"""
Create dataset instance. Empty directories (for EXIF, matches, etc) will be created if they don't exist
already.
:param data_path: Path to directory containing dataset
"""
self.data_path = data_path
self._load_config()
# Load list of images.
image_list_file = os.path.join(self.data_path, 'image_list.txt')
if os.path.isfile(image_list_file):
with open(image_list_file) as fin:
lines = fin.read().splitlines()
self.set_image_list(lines)
else:
self.set_image_path(os.path.join(self.data_path, 'images'))
def _load_config(self):
config_file = os.path.join(self.data_path, 'config.yaml')
self.config = config.load_config(config_file)
def images(self):
"""Return list of file names of all images in this dataset"""
return self.image_list
def __image_file(self, image):
"""
Return path of image with given name
:param image: Image file name (**with extension**)
"""
return self.image_files[image]
def load_image(self, image):
return open(self.__image_file(image))
def image_as_array(self, image):
"""Return image pixels as 3-dimensional numpy array (R G B order)"""
IMREAD_COLOR = cv2.IMREAD_COLOR if context.OPENCV3 else cv2.CV_LOAD_IMAGE_COLOR
return cv2.imread(self.__image_file(image), IMREAD_COLOR)[:,:,::-1] # Turn BGR to RGB
@staticmethod
def __is_image_file(filename):
return filename.split('.')[-1].lower() in {'jpg', 'jpeg', 'png', 'tif', 'tiff', 'pgm', 'pnm', 'gif'}
def set_image_path(self, path):
"""Set image path and find the all images in there"""
self.image_list = []
self.image_files = {}
if os.path.exists(path):
for name in os.listdir(path):
if self.__is_image_file(name):
self.image_list.append(name)
self.image_files[name] = os.path.join(path, name)
def set_image_list(self, image_list):
self.image_list = []
self.image_files = {}
for line in image_list:
path = os.path.join(self.data_path, line)
name = os.path.basename(path)
self.image_list.append(name)
self.image_files[name] = path
def __exif_path(self):
"""Return path of extracted exif directory"""
return os.path.join(self.data_path, 'exif')
def __exif_file(self, image):
"""
Return path of exif information for given image
:param image: Image name, with extension (i.e. 123.jpg)
"""
return os.path.join(self.__exif_path(), image + '.exif')
def load_exif(self, image):
"""
Return extracted exif information, as dictionary, usually with fields:
================ ===== ===================================
Field Type Description
================ ===== ===================================
width int Width of image, in pixels
height int Height of image, in pixels
focal_prior float Focal length (real) / sensor width
================ ===== ===================================
:param image: Image name, with extension (i.e. 123.jpg)
"""
with open(self.__exif_file(image), 'r') as fin:
return json.load(fin)
def save_exif(self, image, data):
io.mkdir_p(self.__exif_path())
with open(self.__exif_file(image), 'w') as fout:
io.json_dump(data, fout)
def feature_type(self):
"""Return the type of local features (e.g. AKAZE, SURF, SIFT)
"""
feature_name = self.config.get('feature_type', 'sift').lower()
if self.config.get('feature_root', False): feature_name = 'root_' + feature_name
return feature_name
def descriptor_type(self):
"""Return the type of the descriptor (if exists)
"""
if self.feature_type() == 'akaze':
return self.config.get('akaze_descriptor', '')
else:
return ''
def __feature_path(self):
"""Return path of feature descriptors and FLANN indices directory"""
__feature_path = self.feature_type()
if len(self.descriptor_type()) > 0:
__feature_path += '_' + self.descriptor_type()
return os.path.join(self.data_path, __feature_path)
def __feature_file(self, image):
"""
Return path of feature file for specified image
:param image: Image name, with extension (i.e. 123.jpg)
"""
return os.path.join(self.__feature_path(), image + '.' + self.feature_type() + '.npz')
def __save_features(self, filepath, image, points, descriptors, colors=None):
io.mkdir_p(self.__feature_path())
feature_type = self.config.get('feature_type')
if ((feature_type == 'AKAZE' and self.config.get('akaze_descriptor') in ['MLDB_UPRIGHT', 'MLDB']) or
(feature_type == 'HAHOG' and self.config.get('hahog_normalize_to_uchar', False))):
feature_data_type = np.uint8
else:
feature_data_type = np.float32
np.savez(filepath,
points=points.astype(np.float32),
descriptors=descriptors.astype(feature_data_type),
colors=colors)
def features_exist(self, image):
return os.path.isfile(self.__feature_file(image))
def load_features(self, image):
feature_type = self.config.get('feature_type')
s = np.load(self.__feature_file(image))
if feature_type == 'HAHOG' and self.config.get('hahog_normalize_to_uchar', False):
descriptors = s['descriptors'].astype(np.float32)
else:
descriptors = s['descriptors']
return s['points'], descriptors, s['colors'].astype(float)
def save_features(self, image, points, descriptors, colors):
self.__save_features(self.__feature_file(image), image, points, descriptors, colors)
def feature_index_exists(self, image):
return os.path.isfile(self.__feature_index_file(image))
def __feature_index_file(self, image):
"""
Return path of FLANN index file for specified image
:param image: Image name, with extension (i.e. 123.jpg)
"""
return os.path.join(self.__feature_path(), image + '.' + self.feature_type() + '.flann')
def load_feature_index(self, image, features):
index = cv2.flann.Index() if context.OPENCV3 else cv2.flann_Index()
index.load(features, self.__feature_index_file(image))
return index
def save_feature_index(self, image, index):
index.save(self.__feature_index_file(image))
def __preemptive_features_file(self, image):
"""
Return path of preemptive feature file (a short list of the full feature file)
for specified image
:param image: Image name, with extension (i.e. 123.jpg)
"""
return os.path.join(self.__feature_path(), image + '_preemptive.' + self.feature_type() + '.npz')
def load_preemtive_features(self, image):
s = np.load(self.__preemptive_features_file(image))
return s['points'], s['descriptors']
def save_preemptive_features(self, image, points, descriptors):
self.__save_features(self.__preemptive_features_file(image), image, points, descriptors)
def matcher_type(self):
"""Return the type of matcher
"""
matcher_type = self.config.get('matcher_type', 'BruteForce')
if 'BruteForce' in matcher_type:
if self.feature_type() == 'akaze' and (self.config.get('akaze_descriptor', 5) >= 4):
matcher_type = 'BruteForce-Hamming'
self.config['matcher_type'] = matcher_type
return matcher_type # BruteForce, BruteForce-L1, BruteForce-Hamming
def __matches_path(self):
"""Return path of matches directory"""
return os.path.join(self.data_path, 'matches')
def __matches_file(self, image):
"""File for matches for an image"""
return os.path.join(self.__matches_path(), '{}_matches.pkl.gz'.format(image))
def matches_exists(self, image):
return os.path.isfile(self.__matches_file(image))
def load_matches(self, image):
with gzip.open(self.__matches_file(image), 'rb') as fin:
matches = pickle.load(fin)
return matches
def save_matches(self, image, matches):
io.mkdir_p(self.__matches_path())
with gzip.open(self.__matches_file(image), 'wb') as fout:
pickle.dump(matches, fout)
def find_matches(self, im1, im2):
if self.matches_exists(im1):
im1_matches = self.load_matches(im1)
if im2 in im1_matches:
return im1_matches[im2]
if self.matches_exists(im2):
im2_matches = self.load_matches(im2)
if im1 in im2_matches:
if len(im2_matches[im1]):
return im2_matches[im1][:, [1, 0]]
return []
def __tracks_graph_file(self):
"""Return path of tracks file"""
return os.path.join(self.data_path, 'tracks.csv')
def load_tracks_graph_as_list(self):
"""Return tranks graph as a list of edges"""
track_list = []
images = self.images()
image_inv = {}
for i, im in enumerate(images):
image_inv[im] = int(i)
with open(self.__tracks_graph_file()) as fin:
for line in fin:
image, track_id, observation, x, y = line.split('\t')
if int(track_id) >= len(track_list):
track_list.append([])
track_list[int(track_id)].append([image_inv[image], int(observation)])
return track_list
def load_tracks_graph(self):
"""Return graph (networkx data structure) of tracks"""
with open(self.__tracks_graph_file()) as fin:
return load_tracks_graph(fin)
def save_tracks_graph(self, graph):
with open(self.__tracks_graph_file(), 'w') as fout:
save_tracks_graph(fout, graph)
def __reconstruction_file(self, filename):
"""Return path of reconstruction file"""
return os.path.join(self.data_path, filename or 'reconstruction.json')
def load_reconstruction(self, filename=None):
with open(self.__reconstruction_file(filename)) as fin:
reconstructions = io.reconstructions_from_json(json.load(fin))
return reconstructions
def save_reconstruction(self, reconstruction, filename=None, indent=4):
with open(self.__reconstruction_file(filename), 'w') as fout:
io.json_dump(io.reconstructions_to_json(reconstruction), fout)
def __reference_lla_path(self):
return os.path.join(self.data_path, 'reference_lla.json')
def invent_reference_lla(self, images=None):
lat, lon, alt = 0.0, 0.0, 0.0
wlat, wlon, walt = 0.0, 0.0, 0.0
if images is None: images = self.images()
for image in images:
d = self.load_exif(image)
if 'gps' in d and 'latitude' in d['gps'] and 'longitude' in d['gps']:
w = 1.0 / d['gps'].get('dop', 15)
lat += w * d['gps']['latitude']
lon += w * d['gps']['longitude']
wlat += w
wlon += w
if 'altitude' in d['gps']:
alt += w * d['gps']['altitude']
walt += w
if wlat: lat /= wlat
if wlon: lon /= wlon
if walt: alt /= walt
reference = {'latitude': lat, 'longitude': lon, 'altitude': 0} # Set altitude manually.
self.save_reference_lla(reference)
return reference
def save_reference_lla(self, reference):
with open(self.__reference_lla_path(), 'w') as fout:
json.dump(reference, fout)
def load_reference_lla(self):
with open(self.__reference_lla_path(), 'r') as fin:
return json.load(fin)
def __camera_models_file(self):
"""Return path of camera model file"""
return os.path.join(self.data_path, 'camera_models.json')
def load_camera_models(self):
"""Return camera models data"""
with open(self.__camera_models_file(), 'r') as fin:
obj = json.load(fin)
return io.cameras_from_json(obj)
def save_camera_models(self, camera_models):
"""Save camera models data"""
with open(self.__camera_models_file(), 'w') as fout:
obj = io.cameras_to_json(camera_models)
io.json_dump(obj, fout)
def __camera_models_overrides_file(self):
"""Return path of camera model overrides file"""
return os.path.join(self.data_path, 'camera_models_overrides.json')
def camera_models_overrides_exists(self):
return os.path.isfile(self.__camera_models_overrides_file())
def load_camera_models_overrides(self):
"""Return camera models overrides data"""
with open(self.__camera_models_overrides_file(), 'r') as fin:
obj = json.load(fin)
return io.cameras_from_json(obj)
def profile_log(self):
"Filename where to write timings."
return os.path.join(self.data_path, 'profile.log')
def __navigation_graph_file(self):
"Return the path of the navigation graph."
return os.path.join(self.data_path, 'navigation_graph.json')
def save_navigation_graph(self, navigation_graphs):
with open(self.__navigation_graph_file(), 'w') as fout:
io.json_dump(navigation_graphs, fout)
def __ply_file(self):
return os.path.join(self.data_path, 'reconstruction.ply')
def save_ply(self, reconstruction):
"""Save a reconstruction in PLY format"""
ply = io.reconstruction_to_ply(reconstruction)
with open(self.__ply_file(), 'w') as fout:
fout.write(ply)
def load_tracks_graph(fileobj):
g = nx.Graph()
for line in fileobj:
image, track, observation, x, y, R, G, B = line.split('\t')
g.add_node(image, bipartite=0)
g.add_node(track, bipartite=1)
g.add_edge(
image, track,
feature=(float(x), float(y)),
feature_id=int(observation),
feature_color=(float(R), float(G), float(B)))
return g
def save_tracks_graph(fileobj, graph):
for node, data in graph.nodes(data=True):
if data['bipartite'] == 0:
image = node
for track, data in graph[image].items():
x, y = data['feature']
fid = data['feature_id']
r, g, b = data['feature_color']
fileobj.write('%s\t%s\t%d\t%g\t%g\t%g\t%g\t%g\n' % (
str(image), str(track), fid, x, y, r, g, b))
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ManagedClusterVersionOperations(object):
"""ManagedClusterVersionOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~service_fabric_managed_clusters_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
location, # type: str
cluster_version, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedClusterCodeVersionResult"
"""Gets information about a Service Fabric managed cluster code version available in the specified location.
Gets information about an available Service Fabric managed cluster code version.
:param location: The location for the cluster code versions. This is different from cluster
location.
:type location: str
:param cluster_version: The cluster code version.
:type cluster_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterCodeVersionResult, or the result of cls(response)
:rtype: ~service_fabric_managed_clusters_management_client.models.ManagedClusterCodeVersionResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterCodeVersionResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'clusterVersion': self._serialize.url("cluster_version", cluster_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterCodeVersionResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ServiceFabric/locations/{location}/managedClusterVersions/{clusterVersion}'} # type: ignore
def get_by_environment(
self,
location, # type: str
cluster_version, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedClusterCodeVersionResult"
"""Gets information about a Service Fabric cluster code version available for the specified environment.
Gets information about an available Service Fabric cluster code version by environment.
:param location: The location for the cluster code versions. This is different from cluster
location.
:type location: str
:param cluster_version: The cluster code version.
:type cluster_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterCodeVersionResult, or the result of cls(response)
:rtype: ~service_fabric_managed_clusters_management_client.models.ManagedClusterCodeVersionResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterCodeVersionResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
environment = "Windows"
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get_by_environment.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'environment': self._serialize.url("environment", environment, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'clusterVersion': self._serialize.url("cluster_version", cluster_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterCodeVersionResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_environment.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ServiceFabric/locations/{location}/environments/{environment}/managedClusterVersions/{clusterVersion}'} # type: ignore
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.ManagedClusterCodeVersionResult"]
"""Gets the list of Service Fabric cluster code versions available for the specified location.
Gets all available code versions for Service Fabric cluster resources by location.
:param location: The location for the cluster code versions. This is different from cluster
location.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ManagedClusterCodeVersionResult, or the result of cls(response)
:rtype: list[~service_fabric_managed_clusters_management_client.models.ManagedClusterCodeVersionResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ManagedClusterCodeVersionResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[ManagedClusterCodeVersionResult]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ServiceFabric/locations/{location}/managedClusterVersions'} # type: ignore
def list_by_environment(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.ManagedClusterCodeVersionResult"]
"""Gets the list of Service Fabric cluster code versions available for the specified environment.
Gets all available code versions for Service Fabric cluster resources by environment.
:param location: The location for the cluster code versions. This is different from cluster
location.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ManagedClusterCodeVersionResult, or the result of cls(response)
:rtype: list[~service_fabric_managed_clusters_management_client.models.ManagedClusterCodeVersionResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ManagedClusterCodeVersionResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
environment = "Windows"
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.list_by_environment.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'environment': self._serialize.url("environment", environment, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[ManagedClusterCodeVersionResult]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_environment.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ServiceFabric/locations/{location}/environments/{environment}/managedClusterVersions'} # type: ignore
|
|
"""802.1x implementation for FAUCET."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2017 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.lib import hub
from chewie import chewie
from faucet.valve_util import kill_on_exception
def get_mac_str(valve_index, port_num):
"""Gets the mac address string for the valve/port combo
Args:
valve_index (int): The internally used id of the valve.
port_num (int): port number
Returns:
str
"""
two_byte_port_num = ("%04x" % port_num)
two_byte_port_num_formatted = two_byte_port_num[:2] + ':' + two_byte_port_num[2:]
return '00:00:00:%02x:%s' % (valve_index, two_byte_port_num_formatted)
class FaucetDot1x: # pylint: disable=too-many-instance-attributes
"""Wrapper for experimental Chewie 802.1x authenticator."""
exc_logname = None
def __init__(self, logger, exc_logname, metrics, send_flow_msgs):
self.logger = logger
self.metrics = metrics
self.exc_logname = exc_logname
self.mac_to_port = {} # {"00:00:00:00:00:02" : (valve_0, port_1)}
self.dp_id_to_valve_index = {}
self.thread = None
self._send_flow_msgs = send_flow_msgs
self._valves = None
self._dot1x_speaker = None
self._auth_acl_name = None
self._noauth_acl_name = None
def _create_dot1x_speaker(self, dot1x_intf, chewie_id, radius_ip, radius_port, radius_secret):
"""
Args:
dot1x_intf (str):
chewie_id (str):
radius_ip (str):
radius_port (int):
radius_secret (str):
Returns:
Chewie
"""
_chewie = chewie.Chewie(
dot1x_intf, self.logger,
self.auth_handler, self.failure_handler, self.logoff_handler,
radius_ip, radius_port, radius_secret, chewie_id)
self.thread = hub.spawn(_chewie.run)
self.thread.name = 'chewie'
return _chewie
def _get_valve_and_port(self, port_id):
"""Finds the valve and port that this address corresponds to
Args:
port_id: is a macaddress string"""
valve, port = self.mac_to_port[port_id]
return (valve, port)
def _get_acls(self, datapath):
"""Returns tuple of acl values"""
auth_acl = datapath.acls.get(self._auth_acl_name)
noauth_acl = datapath.acls.get(self._noauth_acl_name)
return (auth_acl, noauth_acl)
# Loggin Methods
def log_auth_event(self, valve, port_num, mac_str, status):
"""Log an authentication attempt event"""
self.metrics.inc_var('dp_dot1x_{}'.format(status), valve.dp.base_prom_labels())
self.metrics.inc_var('port_dot1x_{}'.format(status), valve.dp.port_labels(port_num))
self.logger.info(
'{} from MAC {} on {}'.format(status.capitalize(), mac_str, port_num))
valve.dot1x_event({'AUTHENTICATION': {'dp_id': valve.dp.dp_id,
'port': port_num,
'eth_src': mac_str,
'status': status}})
def log_port_event(self, event_type, port_type, valve, port_num): # pylint: disable=no-self-use
"""Log a dot1x port event"""
valve.dot1x_event({event_type: {'dp_id': valve.dp.dp_id,
'port': port_num,
'port_type': port_type}})
@kill_on_exception(exc_logname)
def auth_handler(self, address, port_id, *args, **kwargs): # pylint: disable=unused-argument
"""Callback for when a successful auth happens."""
address_str = str(address)
valve, dot1x_port = self._get_valve_and_port(port_id)
port_num = dot1x_port.number
self.log_auth_event(valve, port_num, address_str, 'success')
flowmods = self._get_login_flowmod(dot1x_port, valve, address_str,
kwargs.get('vlan_name', None),
kwargs.get('filter_id', None))
if flowmods:
self._send_flow_msgs(valve, flowmods)
@kill_on_exception(exc_logname)
def logoff_handler(self, address, port_id):
"""Callback for when an EAP logoff happens."""
address_str = str(address)
valve, dot1x_port = self._get_valve_and_port(port_id)
port_num = dot1x_port.number
self.log_auth_event(valve, port_num, address_str, 'logoff')
flowmods = self._get_logoff_flowmod(dot1x_port, valve, address_str)
if flowmods:
self._send_flow_msgs(valve, flowmods)
@kill_on_exception(exc_logname)
def failure_handler(self, address, port_id):
"""Callback for when a EAP failure happens."""
address_str = str(address)
valve, dot1x_port = self._get_valve_and_port(port_id)
port_num = dot1x_port.number
self.log_auth_event(valve, port_num, address_str, 'failure')
flowmods = self._get_logoff_flowmod(dot1x_port, valve, address_str)
if flowmods:
self._send_flow_msgs(valve, flowmods)
def set_mac_str(self, valve, valve_index, port_num):
"""
Args:
valve (Valve):
valve_index (int):
port_num (int):
Returns:
str
"""
mac_str = get_mac_str(valve_index, port_num)
port = valve.dp.ports[port_num]
self.mac_to_port[mac_str] = (valve, port)
return mac_str
def nfv_sw_port_up(self, dp_id, dot1x_ports, nfv_sw_port):
"""Setup the dot1x forward port acls when the nfv_sw_port comes up.
Args:
dp_id (int):
dot1x_ports (Iterable of Port objects):
nfv_sw_port (Port):
Returns:
list of flowmods
"""
self._dot1x_speaker.port_down(
get_mac_str(self.dp_id_to_valve_index[dp_id], nfv_sw_port.number))
valve = self._valves[dp_id]
self.log_port_event("PORT_UP", 'nfv', valve, nfv_sw_port.number)
ret = []
for port in dot1x_ports:
ret.extend(self.create_flow_pair(
dp_id, port, nfv_sw_port, valve))
return ret
def port_up(self, dp_id, dot1x_port, nfv_sw_port):
"""Setup the dot1x forward port acls.
Args:
dp_id (int):
dot1x_port (Port):
nfv_sw_port (Port):
Returns:
list of flowmods
"""
port_num = dot1x_port.number
mac_str = get_mac_str(self.dp_id_to_valve_index[dp_id], port_num)
self._dot1x_speaker.port_up(mac_str)
valve = self._valves[dp_id]
self.log_port_event("PORT_UP", 'supplicant', valve, port_num)
# Dealing with ACLs
flowmods = []
flowmods.extend(self.create_flow_pair(
dp_id, dot1x_port, nfv_sw_port, valve))
flowmods.extend(self._add_unauthenticated_flowmod(dot1x_port, valve))
if dot1x_port.dot1x_mab:
self.logger.info("Port % is using Mac Auth Bypass", dot1x_port.number)
flowmods.append(self.create_mab_flow(dp_id, dot1x_port, nfv_sw_port, valve))
return flowmods
def create_mab_flow(self, dp_id, dot1x_port, nfv_sw_port, valve):
"""Creates a flow that mirrors UDP packets from port 68 (DHCP) from
the supplicant to the nfv port
Args:
dp_id (int):
dot1x_port (Port):
nfv_sw_port (Port):
valve (Valve):
Returns:
list
"""
acl_manager = valve.acl_manager
if dot1x_port.running():
valve_index = self.dp_id_to_valve_index[dp_id]
mac = get_mac_str(valve_index, dot1x_port.number)
return acl_manager.create_mab_flow(dot1x_port.number, nfv_sw_port.number, mac)
return []
def create_flow_pair(self, dp_id, dot1x_port, nfv_sw_port, valve):
"""Creates the pair of flows that redirects the eapol packets to/from
the supplicant and nfv port
Args:
dp_id (int):
dot1x_port (Port):
nfv_sw_port (Port):
valve (Valve):
Returns:
list
"""
acl_manager = valve.acl_manager
if dot1x_port.running():
valve_index = self.dp_id_to_valve_index[dp_id]
mac = get_mac_str(valve_index, dot1x_port.number)
return acl_manager.create_dot1x_flow_pair(
dot1x_port.number, nfv_sw_port.number, mac)
return []
def port_down(self, dp_id, dot1x_port, nfv_sw_port):
"""
Remove the acls added by FaucetDot1x.get_port_acls
Args:
dp_id (int):
dot1x_port (Port):
nfv_sw_port (Port):
Returns:
list of flowmods
"""
valve_index = self.dp_id_to_valve_index[dp_id]
port_num = dot1x_port.number
mac = get_mac_str(valve_index, port_num)
self._dot1x_speaker.port_down(mac)
valve = self._valves[dp_id]
acl_manager = valve.acl_manager
self.log_port_event("PORT_DOWN", 'supplicant', valve, port_num)
flowmods = []
flowmods.extend(self._del_authenticated_flowmod(dot1x_port, valve, mac))
flowmods.extend(self._del_unauthenticated_flowmod(dot1x_port, valve))
# NOTE: The flow_pair are not included in unauthed flowmod
flowmods.extend(acl_manager.del_mab_flow(dot1x_port.number, nfv_sw_port.number, mac))
flowmods.extend(acl_manager.del_dot1x_flow_pair(dot1x_port.number, nfv_sw_port.number, mac))
return flowmods
def reset(self, valves):
"""Set up a dot1x speaker."""
self._valves = valves
dot1x_valves = [
valve for valve in valves.values() if valve.dp.dot1x and valve.dp.dot1x_ports()]
assert len(dot1x_valves) < 255, 'dot1x not supported for > 255 DPs'
if not dot1x_valves:
return
first_valve = dot1x_valves[0]
dot1x_intf = first_valve.dp.dot1x['nfv_intf']
radius_ip = first_valve.dp.dot1x['radius_ip']
radius_port = first_valve.dp.dot1x['radius_port']
radius_secret = first_valve.dp.dot1x['radius_secret']
self._auth_acl_name = first_valve.dp.dot1x.get('auth_acl')
self._noauth_acl_name = first_valve.dp.dot1x.get('noauth_acl')
self._dot1x_speaker = self._create_dot1x_speaker(
dot1x_intf, first_valve.dp.faucet_dp_mac,
radius_ip, radius_port, radius_secret)
for valve_index, valve in enumerate(dot1x_valves, start=0):
self.dp_id_to_valve_index[valve.dp.dp_id] = valve_index
for dot1x_port in valve.dp.dot1x_ports():
self.set_mac_str(valve, valve_index, dot1x_port.number)
self.logger.info(
'dot1x enabled on %s (%s) port %s, NFV interface %s' % (
valve.dp, valve_index, dot1x_port, dot1x_intf))
valve.dot1x_event({'ENABLED': {'dp_id': valve.dp.dp_id}})
def _get_logoff_flowmod(self, dot1x_port, valve, mac_str):
"""Return flowmods required to logoff port"""
flowmods = []
flowmods.extend(
self._del_authenticated_flowmod(dot1x_port, valve, mac_str))
flowmods.extend(
self._add_unauthenticated_flowmod(dot1x_port, valve))
return flowmods
def _get_login_flowmod(self, dot1x_port, valve, # pylint: disable=too-many-arguments
mac_str, vlan_name, acl_name):
"""Return flowmods required to login port"""
flowmods = []
flowmods.extend(
self._del_unauthenticated_flowmod(dot1x_port, valve))
flowmods.extend(
self._add_authenticated_flowmod(dot1x_port, valve, mac_str, vlan_name, acl_name))
return flowmods
def _add_authenticated_flowmod(self, dot1x_port, valve, # pylint: disable=too-many-arguments
mac_str, vlan_name, acl_name):
"""Return flowmods for successful authentication on port"""
port_num = dot1x_port.number
flowmods = []
acl_manager = valve.acl_manager
acl = valve.dp.acls.get(acl_name, None)
if dot1x_port.dot1x_dyn_acl and acl:
self.logger.info("DOT1X_DYN_ACL: Adding ACL '{0}' for port '{1}'".format(
acl_name, port_num))
self.logger.debug("DOT1X_DYN_ACL: ACL contents: '{0}'".format(str(acl.__dict__)))
flowmods.extend(acl_manager.add_port_acl(acl, port_num, mac_str))
elif dot1x_port.dot1x_acl:
auth_acl, _ = self._get_acls(valve.dp)
self.logger.info("DOT1X_PRE_ACL: Adding ACL '{0}' for port '{1}'".format(
acl_name, port_num))
self.logger.debug("DOT1X_PRE_ACL: ACL contents: '{0}'".format(str(auth_acl.__dict__)))
flowmods.extend(acl_manager.add_port_acl(auth_acl, port_num, mac_str))
else:
flowmods.extend(acl_manager.add_authed_mac(port_num, mac_str))
if vlan_name:
flowmods.extend(valve.add_dot1x_native_vlan(port_num, vlan_name))
return flowmods
def _del_authenticated_flowmod(self, dot1x_port, valve, mac_str):
"""Return flowmods for deleting authentication flows from a port"""
flowmods = []
port_num = dot1x_port.number
acl_manager = valve.acl_manager
if dot1x_port.dot1x_acl:
auth_acl, _ = self._get_acls(valve.dp)
flowmods.extend(acl_manager.del_port_acl(auth_acl, port_num, mac_str))
elif dot1x_port.dot1x_dyn_acl:
flowmods.extend(acl_manager.del_authed_mac(port_num, mac_str, strict=False))
else:
flowmods.extend(acl_manager.del_authed_mac(port_num, mac_str))
flowmods.extend(valve.del_dot1x_native_vlan(port_num))
return flowmods
def _add_unauthenticated_flowmod(self, dot1x_port, valve, mac_str=None):
"""Return flowmods default on a port"""
flowmods = []
acl_manager = valve.acl_manager
if dot1x_port.dot1x_acl:
_, noauth_acl = self._get_acls(valve.dp)
flowmods.extend(acl_manager.add_port_acl(noauth_acl, dot1x_port.number, mac_str))
return flowmods
def _del_unauthenticated_flowmod(self, dot1x_port, valve, mac_str=None):
"""Return flowmods for deleting default / unauthenticated flows from a port"""
flowmods = []
acl_manager = valve.acl_manager
if dot1x_port.dot1x_acl:
_, noauth_acl = self._get_acls(valve.dp)
flowmods.extend(acl_manager.del_port_acl(noauth_acl, dot1x_port.number, mac_str))
return flowmods
|
|
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Project action implementations"""
import logging
from keystoneauth1 import exceptions as ks_exc
from osc_lib.cli import parseractions
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
from openstackclient.identity import common
from openstackclient.identity.v3 import tag
LOG = logging.getLogger(__name__)
class CreateProject(command.ShowOne):
_description = _("Create new project")
def get_parser(self, prog_name):
parser = super(CreateProject, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<project-name>',
help=_('New project name'),
)
parser.add_argument(
'--domain',
metavar='<domain>',
help=_('Domain owning the project (name or ID)'),
)
parser.add_argument(
'--parent',
metavar='<project>',
help=_('Parent of the project (name or ID)'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('Project description'),
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help=_('Enable project'),
)
enable_group.add_argument(
'--disable',
action='store_true',
help=_('Disable project'),
)
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help=_('Add a property to <name> '
'(repeat option to set multiple properties)'),
)
parser.add_argument(
'--or-show',
action='store_true',
help=_('Return existing project'),
)
tag.add_tag_option_to_parser_for_create(parser, _('project'))
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
domain = None
if parsed_args.domain:
domain = common.find_domain(identity_client,
parsed_args.domain).id
parent = None
if parsed_args.parent:
parent = utils.find_resource(
identity_client.projects,
parsed_args.parent,
).id
enabled = True
if parsed_args.disable:
enabled = False
kwargs = {}
if parsed_args.property:
kwargs = parsed_args.property.copy()
kwargs['tags'] = list(set(parsed_args.tags))
try:
project = identity_client.projects.create(
name=parsed_args.name,
domain=domain,
parent=parent,
description=parsed_args.description,
enabled=enabled,
**kwargs
)
except ks_exc.Conflict:
if parsed_args.or_show:
project = utils.find_resource(identity_client.projects,
parsed_args.name,
domain_id=domain)
LOG.info(_('Returning existing project %s'), project.name)
else:
raise
project._info.pop('links')
return zip(*sorted(project._info.items()))
class DeleteProject(command.Command):
_description = _("Delete project(s)")
def get_parser(self, prog_name):
parser = super(DeleteProject, self).get_parser(prog_name)
parser.add_argument(
'projects',
metavar='<project>',
nargs="+",
help=_('Project(s) to delete (name or ID)'),
)
parser.add_argument(
'--domain',
metavar='<domain>',
help=_('Domain owning <project> (name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
domain = None
if parsed_args.domain:
domain = common.find_domain(identity_client, parsed_args.domain)
errors = 0
for project in parsed_args.projects:
try:
if domain is not None:
project_obj = utils.find_resource(identity_client.projects,
project,
domain_id=domain.id)
else:
project_obj = utils.find_resource(identity_client.projects,
project)
identity_client.projects.delete(project_obj.id)
except Exception as e:
errors += 1
LOG.error(_("Failed to delete project with "
"name or ID '%(project)s': %(e)s"),
{'project': project, 'e': e})
if errors > 0:
total = len(parsed_args.projects)
msg = (_("%(errors)s of %(total)s projects failed "
"to delete.") % {'errors': errors, 'total': total})
raise exceptions.CommandError(msg)
class ListProject(command.Lister):
_description = _("List projects")
def get_parser(self, prog_name):
parser = super(ListProject, self).get_parser(prog_name)
parser.add_argument(
'--domain',
metavar='<domain>',
help=_('Filter projects by <domain> (name or ID)'),
)
parser.add_argument(
'--parent',
metavar='<parent>',
help=_('Filter projects whose parent is <parent> (name or ID)'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Filter projects by <user> (name or ID)'),
)
parser.add_argument(
'--my-projects',
action='store_true',
help=_('List projects for the authenticated user. '
'Supersedes other filters.'),
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_('List additional fields in output'),
)
parser.add_argument(
'--sort',
metavar='<key>[:<direction>]',
help=_('Sort output by selected keys and directions (asc or desc) '
'(default: asc), repeat this option to specify multiple '
'keys and directions.'),
)
tag.add_tag_filtering_option_to_parser(parser, _('projects'))
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if parsed_args.long:
columns = ('ID', 'Name', 'Domain ID', 'Description', 'Enabled')
else:
columns = ('ID', 'Name')
kwargs = {}
domain_id = None
if parsed_args.domain:
domain_id = common.find_domain(identity_client,
parsed_args.domain).id
kwargs['domain'] = domain_id
if parsed_args.parent:
parent_id = common.find_project(identity_client,
parsed_args.parent).id
kwargs['parent'] = parent_id
if parsed_args.user:
if parsed_args.domain:
user_id = utils.find_resource(identity_client.users,
parsed_args.user,
domain_id=domain_id).id
else:
user_id = utils.find_resource(identity_client.users,
parsed_args.user).id
kwargs['user'] = user_id
tag.get_tag_filtering_args(parsed_args, kwargs)
if parsed_args.my_projects:
# NOTE(adriant): my-projects supersedes all the other filters.
kwargs = {'user': self.app.client_manager.auth_ref.user_id}
try:
data = identity_client.projects.list(**kwargs)
except ks_exc.Forbidden:
# NOTE(adriant): if no filters, assume a forbidden is non-admin
# wanting their own project list.
if not kwargs:
user = self.app.client_manager.auth_ref.user_id
data = identity_client.projects.list(
user=user)
else:
raise
if parsed_args.sort:
data = utils.sort_items(data, parsed_args.sort)
return (columns,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class SetProject(command.Command):
_description = _("Set project properties")
def get_parser(self, prog_name):
parser = super(SetProject, self).get_parser(prog_name)
parser.add_argument(
'project',
metavar='<project>',
help=_('Project to modify (name or ID)'),
)
parser.add_argument(
'--name',
metavar='<name>',
help=_('Set project name'),
)
parser.add_argument(
'--domain',
metavar='<domain>',
help=_('Domain owning <project> (name or ID)'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('Set project description'),
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help=_('Enable project'),
)
enable_group.add_argument(
'--disable',
action='store_true',
help=_('Disable project'),
)
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help=_('Set a property on <project> '
'(repeat option to set multiple properties)'),
)
tag.add_tag_option_to_parser_for_set(parser, _('project'))
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
project = common.find_project(identity_client,
parsed_args.project,
parsed_args.domain)
kwargs = {}
if parsed_args.name:
kwargs['name'] = parsed_args.name
if parsed_args.description:
kwargs['description'] = parsed_args.description
if parsed_args.enable:
kwargs['enabled'] = True
if parsed_args.disable:
kwargs['enabled'] = False
if parsed_args.property:
kwargs.update(parsed_args.property)
tag.update_tags_in_args(parsed_args, project, kwargs)
identity_client.projects.update(project.id, **kwargs)
class ShowProject(command.ShowOne):
_description = _("Display project details")
def get_parser(self, prog_name):
parser = super(ShowProject, self).get_parser(prog_name)
parser.add_argument(
'project',
metavar='<project>',
help=_('Project to display (name or ID)'),
)
parser.add_argument(
'--domain',
metavar='<domain>',
help=_('Domain owning <project> (name or ID)'),
)
parser.add_argument(
'--parents',
action='store_true',
default=False,
help=_('Show the project\'s parents as a list'),
)
parser.add_argument(
'--children',
action='store_true',
default=False,
help=_('Show project\'s subtree (children) as a list'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
project_str = common._get_token_resource(identity_client, 'project',
parsed_args.project,
parsed_args.domain)
if parsed_args.domain:
domain = common.find_domain(identity_client, parsed_args.domain)
project = utils.find_resource(
identity_client.projects,
project_str,
domain_id=domain.id)
else:
project = utils.find_resource(
identity_client.projects,
project_str)
if parsed_args.parents or parsed_args.children:
# NOTE(RuiChen): utils.find_resource() can't pass kwargs,
# if id query hit the result at first, so call
# identity manager.get() with kwargs directly.
project = identity_client.projects.get(
project.id,
parents_as_ids=parsed_args.parents,
subtree_as_ids=parsed_args.children)
project._info.pop('links')
return zip(*sorted(project._info.items()))
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2022, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Atomic Position Data
############################
This module provides a collection of dataframes supporting nuclear positions,
forces, velocities, symbols, etc. (all data associated with atoms as points).
"""
from numbers import Integral
import numpy as np
import pandas as pd
from exatomic.exa import DataFrame, Series
from exatomic.exa.util.units import Length
from exatomic.base import sym2z, sym2mass
from exatomic.algorithms.distance import modv
from exatomic.core.error import PeriodicUniverseError
from exatomic.algorithms.geometry import make_small_molecule
from exatomic import plotter
class Atom(DataFrame):
"""
The atom dataframe.
+-------------------+----------+-------------------------------------------+
| Column | Type | Description |
+===================+==========+===========================================+
| x | float | position in x (req.) |
+-------------------+----------+-------------------------------------------+
| y | float | position in y (req.) |
+-------------------+----------+-------------------------------------------+
| z | float | position in z (req.) |
+-------------------+----------+-------------------------------------------+
| frame | category | non-unique integer (req.) |
+-------------------+----------+-------------------------------------------+
| symbol | category | element symbol (req.) |
+-------------------+----------+-------------------------------------------+
| fx | float | force in x |
+-------------------+----------+-------------------------------------------+
| fy | float | force in y |
+-------------------+----------+-------------------------------------------+
| fz | float | force in z |
+-------------------+----------+-------------------------------------------+
| vx | float | velocity in x |
+-------------------+----------+-------------------------------------------+
| vy | float | velocity in y |
+-------------------+----------+-------------------------------------------+
| vz | float | velocity in z |
+-------------------+----------+-------------------------------------------+
"""
_index = 'atom'
_cardinal = ('frame', np.int64)
_categories = {'symbol': str, 'set': np.int64, 'molecule': np.int64,
'label': np.int64}
_columns = ['x', 'y', 'z', 'symbol']
#@property
#def _constructor(self):
# return Atom
@property
def nframes(self):
"""Return the total number of frames in the atom table."""
return np.int64(self.frame.cat.as_ordered().max() + 1)
@property
def last_frame(self):
"""Return the last frame of the atom table."""
return self[self.frame == self.nframes - 1]
@property
def unique_atoms(self):
"""Return unique atom symbols of the last frame."""
return self.last_frame.symbol.unique()
@staticmethod
def _determine_center(attr, coords):
"""Determine the center of the molecule with respect to
the given attribute data. Used for the center of nuclear
charge and center of mass."""
center = 1/np.sum(attr)*np.sum(np.multiply(np.transpose(coords), attr), axis=1)
center = pd.Series(center, index=['x', 'y', 'z'])
return center
def center(self, idx=None, frame=None, to=None):
"""
Return a copy of a single frame of the atom table
centered around a specific atom index. There is also
the ability to center the molecule to the center of
nuclear charge (NuclChrg) or center of mass (Mass).
Args:
idx (int): Atom index in the atom table
frame (int): Frame to perform the operation on
to (str): Tells the program which centering algorithm to use
Returs:
frame (:class:`exatomic.Universe.atom`): Atom frame
"""
if frame is None: frame = self.last_frame.copy()
else: frame = self[self.frame == frame].copy()
if to is None:
if idx is None: raise TypeError("Must provide an atom to center to")
center = frame.iloc[idx]
elif to == 'NuclChrg':
try:
Z = frame['Z'].astype(int).values
except KeyError:
Z = frame['symbol'].map(sym2z).astype(int).values
center = self._determine_center(attr=Z, coords=frame[['x', 'y', 'z']].values)
elif to == 'Mass':
mass = frame['symbol'].map(sym2mass).astype(int).values
center = self._determine_center(attr=mass, coords=frame[['x', 'y', 'z']].values)
else:
raise NotImplementedError("Sorry the centering option {} is not available".format(to))
for r in ['x', 'y', 'z']:
if center[r] > 0: frame[r] = frame[r] - center[r]
else: frame[r] = frame[r] + np.abs(center[r])
return Atom(frame)
def rotate(self, theta, axis=None, frame=None, degrees=True):
"""
Return a copy of a single frame of the atom table rotated
around the specified rotation axis by the specified angle.
As we have the rotation axis and the rotation angle we are
able to use the Rodrigues' formula to get the rotated
vectors.
Args:
theta (float): The angle that you wish to rotate by
axis (list): The axis of rotation
frame (int): The frame that you wish to rotate
degrees (bool): If true convert from degrees to radians
Returns:
frame (:class:`exatomic.Universe.atom`): Atom frame
"""
if axis is None: axis = [0, 0, 1]
if frame is None: frame = self.last_frame.copy()
else: frame = self[self.frame == frame].copy()
if all(map(lambda x: x == 0., axis)) or theta == 0.: return frame
# as we have the rotation axis and the angle we will rotate over
# we implement the Rodrigues' rotation formula
# v_rot = v*np.cos(theta) + (np.cross(k,v))*np.sin(theta) + k*(np.dot(k,v))*(1-np.cos(theta))
# convert units if not degrees
if degrees: theta = theta*np.pi/180.
# normalize rotation axis vector
norm = np.linalg.norm(axis)
try:
axis /= norm
except ZeroDivisionError:
raise ZeroDivisionError("Trying to normalize axis {} by a 0 value".format(axis))
# get the coordinates
coords = frame[['x', 'y', 'z']].values
# generate the first term in rodrigues formula
a = coords * np.cos(theta)
# generate second term in rodrigures formula
# this creates a matrix of size coords.shape[0]
b = np.cross(axis, coords) * np.sin(theta)
# generate the last term in rodrigues formula
# we use np.outer to make a dyadic productof the result from the dot product vector
# and the axis vector
c = np.outer(np.dot(coords, axis), axis) * (1-np.cos(theta))
rotated = a + b + c
frame[['x', 'y', 'z']] = rotated
return Atom(frame)
def translate(self, dx=0, dy=0, dz=0, vector=None, frame=None, units='au'):
"""
Return a copy of a single frame of the atom table translated by
some specified distance.
Note:
Vector can be used instead of dx, dy, dz as it will be decomposed
into those components. If vector and any of the others are
specified the values in vector will be used.
Args:
dx (float): Displacement distance in x
dy (float): Displacement distance in y
dz (float): Displacement distance in z
vector (list): Displacement vector
units (str): Units that are used for the displacement
Returns:
frame (:class:`exatomic.Universe.atom`): Atom frame
"""
if frame is None: frame = self.last_frame.copy()
else: frame = self[self.frame == frame].copy()
# check if vector is specified
if vector is not None:
# convert vector units to au
vector = [i * Length[units, 'au'] for i in vector]
dx = vector[0]
dy = vector[1]
dz = vector[2]
# add the values to each respective coordinate
frame['x'] += dx
frame['y'] += dy
frame['z'] += dz
return Atom(frame)
def align_to_axis(self, adx0, adx1, axis=None, frame=None, center_to=None):
'''
This a short method to center and align the molecule along some defined axis.
Args:
adx0 (int): Atom to place at the origin
adx1 (int): Atom to align along the axis
axis (list): Axis that the vector adx0-adx1 will align to
frame (int): Frame to align
Returns:
aligned (:class:`exatomic.Universe.atom`): Aligned atom frame
'''
if frame is None: atom = self.last_frame.copy()
else: atom = self[self.frame == frame].copy()
cols = ['x', 'y', 'z']
# define the original vector
v0 = atom.iloc[adx1][cols].values.astype(np.float64) - atom.iloc[adx0][cols].values.astype(np.float64)
# get the vector to align with and normalize
v1 = axis/np.linalg.norm(axis)
# find the normal vector to rotate around
n = np.cross(v0, v1)
# find the angle to rotate the vector
theta = np.arccos(np.dot(v0, v1) / (np.linalg.norm(v0)*np.linalg.norm(v1)))
# use the center method to center the molecule
centered = Atom(atom).center(adx0, frame=frame, to=center_to)
# rotate the molecule around the normal vector
aligned = centered.rotate(theta=theta, axis=n, degrees=False)
return Atom(aligned)
def to_xyz(self, tag='symbol', header=False, comments='', columns=None,
frame=None, units='Angstrom'):
"""
Return atomic data in XYZ format, by default without the first 2 lines.
If multiple frames are specified, return an XYZ trajectory format. If
frame is not specified, by default returns the last frame in the table.
Args:
tag (str): column name to use in place of 'symbol'
header (bool): if True, return the first 2 lines of XYZ format
comment (str, list): comment(s) to put in the comment line
frame (int, iter): frame or frames to return
units (str): units (default angstroms)
Returns:
ret (str): XYZ formatted atomic data
"""
# TODO :: this is conceptually a duplicate of XYZ.from_universe
columns = (tag, 'x', 'y', 'z') if columns is None else columns
frame = self.nframes - 1 if frame is None else frame
if isinstance(frame, Integral): frame = [frame]
if not isinstance(comments, list): comments = [comments]
if len(comments) == 1: comments = comments * len(frame)
df = self[self['frame'].isin(frame)].copy()
if tag not in df.columns:
if tag == 'Z':
stoz = sym2z()
df[tag] = df['symbol'].map(stoz)
df['x'] *= Length['au', units]
df['y'] *= Length['au', units]
df['z'] *= Length['au', units]
grps = df.groupby('frame')
ret = ''
formatter = {tag: '{:<5}'.format}
stargs = {'columns': columns, 'header': False,
'index': False, 'formatters': formatter}
t = 0
for _, grp in grps:
if not len(grp): continue
tru = (header or comments[t] or len(frame) > 1)
hdr = '\n'.join([str(len(grp)), comments[t], '']) if tru else ''
ret = ''.join([ret, hdr, grp.to_string(**stargs), '\n'])
t += 1
return ret
def get_element_masses(self):
"""Compute and return element masses from symbols."""
return self['symbol'].astype('O').map(sym2mass)
def get_atom_labels(self):
"""
Compute and return enumerated atoms.
Returns:
labels (:class:`~exatomic.exa.core.numerical.Series`): Enumerated atom labels (of type int)
"""
nats = self.cardinal_groupby().size().values
labels = Series([i for nat in nats for i in range(nat)], dtype='category')
labels.index = self.index
return labels
@classmethod
def from_small_molecule_data(cls, center=None, ligand=None, distance=None, geometry=None,
offset=None, plane=None, axis=None, domains=None, unit='Angstrom'):
'''
A minimal molecule builder for simple one-center, homogeneous ligand
molecules of various general chemistry molecular geometries. If domains
is not specified and geometry is ambiguous (like 'bent'),
it just guesses the simplest geometry (smallest number of domains).
Args
center (str): atomic symbol of central atom
ligand (str): atomic symbol of ligand atoms
distance (float): distance between central atom and any ligand
geometry (str): molecular geometry
domains (int): number of electronic domains
offset (np.array): 3-array of position of central atom
plane (str): cartesian plane of molecule (eg. for 'square_planar')
axis (str): cartesian axis of molecule (eg. for 'linear')
Returns
exatomic.atom.Atom: Atom table of small molecule
'''
return cls(make_small_molecule(center=center, ligand=ligand, distance=distance,
geometry=geometry, offset=offset, plane=plane,
axis=axis, domains=domains, unit=unit))
class UnitAtom(DataFrame):
"""
In unit cell coordinates (sparse) for periodic systems. These coordinates
are used to update the corresponding :class:`~exatomic.atom.Atom` object
"""
_index = 'atom'
_columns = ['x', 'y', 'z']
#@property
#def _constructor(self):
# return UnitAtom
@classmethod
def from_universe(cls, universe):
if universe.periodic:
if "rx" not in universe.frame.columns:
universe.frame.compute_cell_magnitudes()
a, b, c = universe.frame[["rx", "ry", "rz"]].max().values
x = modv(universe.atom['x'].values, a)
y = modv(universe.atom['y'].values, b)
z = modv(universe.atom['z'].values, c)
df = pd.DataFrame.from_dict({'x': x, 'y': y, 'z': z})
df.index = universe.atom.index
return cls(df[universe.atom[['x', 'y', 'z']] != df])
raise PeriodicUniverseError()
class ProjectedAtom(DataFrame):
"""
Projected atom coordinates (e.g. on 3x3x3 supercell). These coordinates are
typically associated with their corresponding indices in another dataframe.
Note:
This table is computed when periodic two body properties are computed;
it doesn't have meaning outside of that context.
See Also:
:func:`~exatomic.two.compute_periodic_two`.
"""
_index = 'two'
_columns = ['x', 'y', 'z']
#@property
#def _constructor(self):
# return ProjectedAtom
class VisualAtom(DataFrame):
"""
"""
_index = 'atom'
_columns = ['x', 'y', 'z']
@classmethod
def from_universe(cls, universe):
"""
"""
if universe.frame.is_periodic():
atom = universe.atom[['x', 'y', 'z']].copy()
atom.update(universe.unit_atom)
bonded = universe.atom_two.loc[universe.atom_two['bond'] == True, 'atom1'].astype(np.int64)
prjd = universe.projected_atom.loc[bonded.index].to_dense()
prjd['atom'] = bonded
prjd.drop_duplicates('atom', inplace=True)
prjd.set_index('atom', inplace=True)
atom.update(prjd)
return cls(atom[atom != universe.atom[['x', 'y', 'z']]])
raise PeriodicUniverseError()
#@property
#def _constructor(self):
# return VisualAtom
class Frequency(DataFrame):
"""
The Frequency dataframe.
+-------------------+----------+-------------------------------------------+
| Column | Type | Description |
+===================+==========+===========================================+
| frame | category | non-unique integer (req.) |
+-------------------+----------+-------------------------------------------+
| frequency | float | frequency of oscillation (cm-1) (req.) |
+-------------------+----------+-------------------------------------------+
| freqdx | int | index of frequency of oscillation (req.) |
+-------------------+----------+-------------------------------------------+
| dx | float | atomic displacement in x direction (req.) |
+-------------------+----------+-------------------------------------------+
| dy | float | atomic displacement in y direction (req.) |
+-------------------+----------+-------------------------------------------+
| dz | float | atomic displacement in z direction (req.) |
+-------------------+----------+-------------------------------------------+
| ir_int | float | ir intensity of the vibrational mode |
+-------------------+----------+-------------------------------------------+
| symbol | str | atomic symbol (req.) |
+-------------------+----------+-------------------------------------------+
| label | int | atomic identifier |
+-------------------+----------+-------------------------------------------+
"""
_index = 'frequency'
_cardinal = ('frame', np.int64)
_categories = {'symbol': str, 'label': np.int64}
_columns = ['dx', 'dy', 'dz', 'symbol', 'frequency', 'freqdx', 'ir_int']
#@property
#def _constructor(self):
# return Frequency
def displacement(self, freqdx):
return self[self['freqdx'] == freqdx][['dx', 'dy', 'dz', 'symbol']]
def ir_spectra(self, fwhm=15, lineshape='gaussian', xrange=None, res=None, invert_x=False, **kwargs):
'''
Generate an IR spectra with the plotter classes. We can define a gaussian or lorentzian
lineshape functions. For the most part we pass all of the kwargs directly into the
plotter.Plot class.
Args:
fwhm (float): Full-width at half-maximum
lineshape (str): Switch between the different lineshape functions available
xrange (list): X-bounds for the plot
res (float): Resolution for the plot line
invert_x (bool): Invert x-axis
'''
# define the lineshape and store the function call in the line variable
try:
line = getattr(plotter, lineshape)
except AttributeError:
raise NotImplementedError("Sorry we have not yet implemented the lineshape {}.".format(lineshape))
# define a default parameter for the plot width
# we did this for a full-screen jupyter notebook on a 1920x1080 monitor
if not "plot_width" in kwargs:
kwargs.update(plot_width=900)
# define xbounds
xrange = [0, 4000] if xrange is None else xrange
# deal with inverted bounds
if xrange[0] > xrange[1]:
xrange = sorted(xrange)
invert_x = True
# define the resolution
res = fwhm/50 if res is None else res
# define the class
plot = plotter.Plot(**kwargs)
# this is designed for a single frame
if self['frame'].unique().shape[0] != 1:
raise NotImplementedError("We have not yet expanded to include multiple frames")
# grab the locations of the peaks between the bounds
freqdx = self['freqdx'].drop_duplicates().index
freq = self.loc[freqdx, 'frequency']
freq = freq[freq.between(*xrange)]
# grab the ir intensity data
# we use the frequency indexes instead of drop duplicates as we may have similar intensities
inten = self.loc[freq.index, 'ir_int'].astype(np.float64).values
# change to using the values instead as we no longer need the index data
# we could also use jit for the lineshape functions as we only deal with numpy arrays
freq = freq.values
x_data = np.arange(*xrange, res)
# get the y data by calling the lineshape function generator
y_data = line(freq=freq, x=x_data, fwhm=fwhm, inten=inten)
# plot the lineshape data
plot.fig.line(x_data, y_data)
# plot the points on the plot to show were the frequency values are
# more useful when we have nearly degenerate vibrations
plot.fig.scatter(freq, line(freq=freq, x=freq, fwhm=fwhm, inten=inten))
if invert_x:
plot.set_xrange(xmin=xrange[1], xmax=xrange[0])
else:
plot.set_xrange(xmin=xrange[0], xmax=xrange[1])
# display the figure with our generated method
plot.show()
def add_vibrational_mode(uni, freqdx):
displacements = uni.frequency.displacements(freqdx)
if not all(displacements['symbol'] == uni.atom['symbol']):
print('Mismatch in ordering of atoms and frequencies.')
return
displaced = []
frames = []
# Should these only be absolute values?
factor = np.abs(np.sin(np.linspace(-4*np.pi, 4*np.pi, 200)))
for fac in factor:
moved = uni.atom.copy()
moved['x'] += displacements['dx'].values * fac
moved['y'] += displacements['dy'].values * fac
moved['z'] += displacements['dz'].values * fac
displaced.append(moved)
frames.append(uni.frame)
movie = pd.concat(displaced).reset_index()
movie['frame'] = np.repeat(range(len(factor)), len(uni.atom))
uni.frame = pd.concat(frames).reset_index()
uni.atom = movie
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module provides an interface between the previous Pod
API and outputs a kubernetes.client.models.V1Pod.
The advantage being that the full Kubernetes API
is supported and no serialization need be written.
"""
import copy
import uuid
from typing import Dict, List, Optional, Union
import kubernetes.client.models as k8s
from airflow.version import version as airflow_version
MAX_POD_ID_LEN = 253
class PodDefaults:
"""
Static defaults for Pods
"""
XCOM_MOUNT_PATH = '/airflow/xcom'
SIDECAR_CONTAINER_NAME = 'airflow-xcom-sidecar'
XCOM_CMD = 'trap "exit 0" INT; while true; do sleep 30; done;'
VOLUME_MOUNT = k8s.V1VolumeMount(
name='xcom',
mount_path=XCOM_MOUNT_PATH
)
VOLUME = k8s.V1Volume(
name='xcom',
empty_dir=k8s.V1EmptyDirVolumeSource()
)
SIDECAR_CONTAINER = k8s.V1Container(
name=SIDECAR_CONTAINER_NAME,
command=['sh', '-c', XCOM_CMD],
image='alpine',
volume_mounts=[VOLUME_MOUNT],
resources=k8s.V1ResourceRequirements(
requests={
"cpu": "1m",
}
),
)
class PodGenerator:
"""
Contains Kubernetes Airflow Worker configuration logic
Represents a kubernetes pod and manages execution of a single pod.
Any configuration that is container specific gets applied to
the first container in the list of containers.
Parameters with a type of `kubernetes.client.models.*`/`k8s.*` can
often be replaced with their dictionary equivalent, for example the output of
`sanitize_for_serialization`.
:param image: The docker image
:type image: Optional[str]
:param name: name in the metadata section (not the container name)
:type name: Optional[str]
:param namespace: pod namespace
:type namespace: Optional[str]
:param volume_mounts: list of kubernetes volumes mounts
:type volume_mounts: Optional[List[Union[k8s.V1VolumeMount, dict]]]
:param envs: A dict containing the environment variables
:type envs: Optional[Dict[str, str]]
:param cmds: The command to be run on the first container
:type cmds: Optional[List[str]]
:param args: The arguments to be run on the pod
:type args: Optional[List[str]]
:param labels: labels for the pod metadata
:type labels: Optional[Dict[str, str]]
:param node_selectors: node selectors for the pod
:type node_selectors: Optional[Dict[str, str]]
:param ports: list of ports. Applies to the first container.
:type ports: Optional[List[Union[k8s.V1ContainerPort, dict]]]
:param volumes: Volumes to be attached to the first container
:type volumes: Optional[List[Union[k8s.V1Volume, dict]]]
:param image_pull_policy: Specify a policy to cache or always pull an image
:type image_pull_policy: str
:param restart_policy: The restart policy of the pod
:type restart_policy: str
:param image_pull_secrets: Any image pull secrets to be given to the pod.
If more than one secret is required, provide a comma separated list:
secret_a,secret_b
:type image_pull_secrets: str
:param init_containers: A list of init containers
:type init_containers: Optional[List[k8s.V1Container]]
:param service_account_name: Identity for processes that run in a Pod
:type service_account_name: Optional[str]
:param resources: Resource requirements for the first containers
:type resources: Optional[Union[k8s.V1ResourceRequirements, dict]]
:param annotations: annotations for the pod
:type annotations: Optional[Dict[str, str]]
:param affinity: A dict containing a group of affinity scheduling rules
:type affinity: Optional[dict]
:param hostnetwork: If True enable host networking on the pod
:type hostnetwork: bool
:param tolerations: A list of kubernetes tolerations
:type tolerations: Optional[list]
:param security_context: A dict containing the security context for the pod
:type security_context: Optional[Union[k8s.V1PodSecurityContext, dict]]
:param configmaps: Any configmap refs to envfrom.
If more than one configmap is required, provide a comma separated list
configmap_a,configmap_b
:type configmaps: List[str]
:param dnspolicy: Specify a dnspolicy for the pod
:type dnspolicy: Optional[str]
:param schedulername: Specify a schedulername for the pod
:type schedulername: Optional[str]
:param pod: The fully specified pod. Mutually exclusive with `path_or_string`
:type pod: Optional[kubernetes.client.models.V1Pod]
:param extract_xcom: Whether to bring up a container for xcom
:type extract_xcom: bool
"""
def __init__( # pylint: disable=too-many-arguments,too-many-locals
self,
image: Optional[str] = None,
name: Optional[str] = None,
namespace: Optional[str] = None,
volume_mounts: Optional[List[Union[k8s.V1VolumeMount, dict]]] = None,
envs: Optional[Dict[str, str]] = None,
cmds: Optional[List[str]] = None,
args: Optional[List[str]] = None,
labels: Optional[Dict[str, str]] = None,
node_selectors: Optional[Dict[str, str]] = None,
ports: Optional[List[Union[k8s.V1ContainerPort, dict]]] = None,
volumes: Optional[List[Union[k8s.V1Volume, dict]]] = None,
image_pull_policy: str = 'IfNotPresent',
restart_policy: str = 'Never',
image_pull_secrets: Optional[str] = None,
init_containers: Optional[List[k8s.V1Container]] = None,
service_account_name: Optional[str] = None,
resources: Optional[Union[k8s.V1ResourceRequirements, dict]] = None,
annotations: Optional[Dict[str, str]] = None,
affinity: Optional[dict] = None,
hostnetwork: bool = False,
tolerations: Optional[list] = None,
security_context: Optional[Union[k8s.V1PodSecurityContext, dict]] = None,
configmaps: Optional[List[str]] = None,
dnspolicy: Optional[str] = None,
schedulername: Optional[str] = None,
pod: Optional[k8s.V1Pod] = None,
extract_xcom: bool = False,
):
self.ud_pod = pod
self.pod = k8s.V1Pod()
self.pod.api_version = 'v1'
self.pod.kind = 'Pod'
# Pod Metadata
self.metadata = k8s.V1ObjectMeta()
self.metadata.labels = labels
self.metadata.name = name
self.metadata.namespace = namespace
self.metadata.annotations = annotations
# Pod Container
self.container = k8s.V1Container(name='base')
self.container.image = image
self.container.env = []
if envs:
if isinstance(envs, dict):
for key, val in envs.items():
self.container.env.append(k8s.V1EnvVar(
name=key,
value=val
))
elif isinstance(envs, list):
self.container.env.extend(envs)
configmaps = configmaps or []
self.container.env_from = []
for configmap in configmaps:
self.container.env_from.append(k8s.V1EnvFromSource(
config_map_ref=k8s.V1ConfigMapEnvSource(
name=configmap
)
))
self.container.command = cmds or []
self.container.args = args or []
self.container.image_pull_policy = image_pull_policy
self.container.ports = ports or []
self.container.resources = resources
self.container.volume_mounts = volume_mounts or []
# Pod Spec
self.spec = k8s.V1PodSpec(containers=[])
self.spec.security_context = security_context
self.spec.tolerations = tolerations
self.spec.dns_policy = dnspolicy
self.spec.scheduler_name = schedulername
self.spec.host_network = hostnetwork
self.spec.affinity = affinity
self.spec.service_account_name = service_account_name
self.spec.init_containers = init_containers
self.spec.volumes = volumes or []
self.spec.node_selector = node_selectors
self.spec.restart_policy = restart_policy
self.spec.image_pull_secrets = []
if image_pull_secrets:
for image_pull_secret in image_pull_secrets.split(','):
self.spec.image_pull_secrets.append(k8s.V1LocalObjectReference(
name=image_pull_secret
))
# Attach sidecar
self.extract_xcom = extract_xcom
def gen_pod(self) -> k8s.V1Pod:
"""Generates pod"""
result = self.ud_pod
if result is None:
result = self.pod
result.spec = self.spec
result.metadata = self.metadata
result.spec.containers = [self.container]
result.metadata.name = self.make_unique_pod_id(result.metadata.name)
if self.extract_xcom:
result = self.add_sidecar(result)
return result
@staticmethod
def add_sidecar(pod: k8s.V1Pod) -> k8s.V1Pod:
"""Adds sidecar"""
pod_cp = copy.deepcopy(pod)
pod_cp.spec.volumes = pod.spec.volumes or []
pod_cp.spec.volumes.insert(0, PodDefaults.VOLUME)
pod_cp.spec.containers[0].volume_mounts = pod_cp.spec.containers[0].volume_mounts or []
pod_cp.spec.containers[0].volume_mounts.insert(0, PodDefaults.VOLUME_MOUNT)
pod_cp.spec.containers.append(PodDefaults.SIDECAR_CONTAINER)
return pod_cp
@staticmethod
def from_obj(obj) -> Optional[k8s.V1Pod]:
"""Converts to pod from obj"""
if obj is None:
return None
if isinstance(obj, PodGenerator):
return obj.gen_pod()
if not isinstance(obj, dict):
raise TypeError(
'Cannot convert a non-dictionary or non-PodGenerator '
'object into a KubernetesExecutorConfig')
# We do not want to extract constant here from ExecutorLoader because it is just
# A name in dictionary rather than executor selection mechanism and it causes cyclic import
namespaced = obj.get("KubernetesExecutor", {})
if not namespaced:
return None
resources = namespaced.get('resources')
if resources is None:
requests = {
'cpu': namespaced.get('request_cpu'),
'memory': namespaced.get('request_memory')
}
limits = {
'cpu': namespaced.get('limit_cpu'),
'memory': namespaced.get('limit_memory')
}
all_resources = list(requests.values()) + list(limits.values())
if all(r is None for r in all_resources):
resources = None
else:
resources = k8s.V1ResourceRequirements(
requests=requests,
limits=limits
)
pod_spec_generator = PodGenerator(
image=namespaced.get('image'),
envs=namespaced.get('env'),
cmds=namespaced.get('cmds'),
args=namespaced.get('args'),
labels=namespaced.get('labels'),
node_selectors=namespaced.get('node_selectors'),
name=namespaced.get('name'),
ports=namespaced.get('ports'),
volumes=namespaced.get('volumes'),
volume_mounts=namespaced.get('volume_mounts'),
namespace=namespaced.get('namespace'),
image_pull_policy=namespaced.get('image_pull_policy'),
restart_policy=namespaced.get('restart_policy'),
image_pull_secrets=namespaced.get('image_pull_secrets'),
init_containers=namespaced.get('init_containers'),
service_account_name=namespaced.get('service_account_name'),
resources=resources,
annotations=namespaced.get('annotations'),
affinity=namespaced.get('affinity'),
hostnetwork=namespaced.get('hostnetwork'),
tolerations=namespaced.get('tolerations'),
security_context=namespaced.get('security_context'),
configmaps=namespaced.get('configmaps'),
dnspolicy=namespaced.get('dnspolicy'),
schedulername=namespaced.get('schedulername'),
pod=namespaced.get('pod'),
extract_xcom=namespaced.get('extract_xcom'),
)
return pod_spec_generator.gen_pod()
@staticmethod
def reconcile_pods(base_pod: k8s.V1Pod, client_pod: Optional[k8s.V1Pod]) -> k8s.V1Pod:
"""
:param base_pod: has the base attributes which are overwritten if they exist
in the client pod and remain if they do not exist in the client_pod
:type base_pod: k8s.V1Pod
:param client_pod: the pod that the client wants to create.
:type client_pod: k8s.V1Pod
:return: the merged pods
This can't be done recursively as certain fields some overwritten, and some concatenated.
"""
if client_pod is None:
return base_pod
client_pod_cp = copy.deepcopy(client_pod)
client_pod_cp.spec = PodGenerator.reconcile_specs(base_pod.spec, client_pod_cp.spec)
client_pod_cp.metadata = merge_objects(base_pod.metadata, client_pod_cp.metadata)
client_pod_cp = merge_objects(base_pod, client_pod_cp)
return client_pod_cp
@staticmethod
def reconcile_specs(base_spec: Optional[k8s.V1PodSpec],
client_spec: Optional[k8s.V1PodSpec]) -> Optional[k8s.V1PodSpec]:
"""
:param base_spec: has the base attributes which are overwritten if they exist
in the client_spec and remain if they do not exist in the client_spec
:type base_spec: k8s.V1PodSpec
:param client_spec: the spec that the client wants to create.
:type client_spec: k8s.V1PodSpec
:return: the merged specs
"""
if base_spec and not client_spec:
return base_spec
if not base_spec and client_spec:
return client_spec
elif client_spec and base_spec:
client_spec.containers = PodGenerator.reconcile_containers(
base_spec.containers, client_spec.containers
)
merged_spec = extend_object_field(base_spec, client_spec, 'volumes')
return merge_objects(base_spec, merged_spec)
return None
@staticmethod
def reconcile_containers(base_containers: List[k8s.V1Container],
client_containers: List[k8s.V1Container]) -> List[k8s.V1Container]:
"""
:param base_containers: has the base attributes which are overwritten if they exist
in the client_containers and remain if they do not exist in the client_containers
:type base_containers: List[k8s.V1Container]
:param client_containers: the containers that the client wants to create.
:type client_containers: List[k8s.V1Container]
:return: the merged containers
The runs recursively over the list of containers.
"""
if not base_containers:
return client_containers
if not client_containers:
return base_containers
client_container = client_containers[0]
base_container = base_containers[0]
client_container = extend_object_field(base_container, client_container, 'volume_mounts')
client_container = extend_object_field(base_container, client_container, 'env')
client_container = extend_object_field(base_container, client_container, 'env_from')
client_container = extend_object_field(base_container, client_container, 'ports')
client_container = extend_object_field(base_container, client_container, 'volume_devices')
client_container = merge_objects(base_container, client_container)
return [client_container] + PodGenerator.reconcile_containers(
base_containers[1:], client_containers[1:]
)
@staticmethod
def construct_pod(
dag_id: str,
task_id: str,
pod_id: str,
try_number: int,
date: str,
command: List[str],
kube_executor_config: Optional[k8s.V1Pod],
worker_config: k8s.V1Pod,
namespace: str,
worker_uuid: str
) -> k8s.V1Pod:
"""
Construct a pod by gathering and consolidating the configuration from 3 places:
- airflow.cfg
- executor_config
- dynamic arguments
"""
dynamic_pod = PodGenerator(
namespace=namespace,
labels={
'airflow-worker': worker_uuid,
'dag_id': dag_id,
'task_id': task_id,
'execution_date': date,
'try_number': str(try_number),
'airflow_version': airflow_version.replace('+', '-'),
'kubernetes_executor': 'True',
},
cmds=command,
name=pod_id
).gen_pod()
# Reconcile the pod generated by the Operator and the Pod
# generated by the .cfg file
pod_with_executor_config = PodGenerator.reconcile_pods(worker_config,
kube_executor_config)
# Reconcile that pod with the dynamic fields.
return PodGenerator.reconcile_pods(pod_with_executor_config, dynamic_pod)
@staticmethod
def make_unique_pod_id(dag_id):
"""
Kubernetes pod names must be <= 253 chars and must pass the following regex for
validation
``^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$``
:param dag_id: a dag_id with only alphanumeric characters
:return: ``str`` valid Pod name of appropriate length
"""
if not dag_id:
return None
safe_uuid = uuid.uuid4().hex
safe_pod_id = dag_id[:MAX_POD_ID_LEN - len(safe_uuid) - 1] + "-" + safe_uuid
return safe_pod_id
def merge_objects(base_obj, client_obj):
"""
:param base_obj: has the base attributes which are overwritten if they exist
in the client_obj and remain if they do not exist in the client_obj
:param client_obj: the object that the client wants to create.
:return: the merged objects
"""
if not base_obj:
return client_obj
if not client_obj:
return base_obj
client_obj_cp = copy.deepcopy(client_obj)
for base_key in base_obj.to_dict().keys():
base_val = getattr(base_obj, base_key, None)
if not getattr(client_obj, base_key, None) and base_val:
setattr(client_obj_cp, base_key, base_val)
return client_obj_cp
def extend_object_field(base_obj, client_obj, field_name):
"""
:param base_obj: an object which has a property `field_name` that is a list
:param client_obj: an object which has a property `field_name` that is a list.
A copy of this object is returned with `field_name` modified
:param field_name: the name of the list field
:type field_name: str
:return: the client_obj with the property `field_name` being the two properties appended
"""
client_obj_cp = copy.deepcopy(client_obj)
base_obj_field = getattr(base_obj, field_name, None)
client_obj_field = getattr(client_obj, field_name, None)
if (not isinstance(base_obj_field, list) and base_obj_field is not None) or \
(not isinstance(client_obj_field, list) and client_obj_field is not None):
raise ValueError("The chosen field must be a list.")
if not base_obj_field:
return client_obj_cp
if not client_obj_field:
setattr(client_obj_cp, field_name, base_obj_field)
return client_obj_cp
appended_fields = base_obj_field + client_obj_field
setattr(client_obj_cp, field_name, appended_fields)
return client_obj_cp
|
|
#!/usr/bin/env python
# Copyright (c) 2015-2016 Anish Athalye ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import dropbox
import json
import multiprocessing
import multiprocessing.dummy
import multiprocessing.pool
import os
import posixpath
import subprocess
import sys
import zlib
__version__ = '0.2.0'
CONFIG_FILE = '~/.git-remote-dropbox.json'
DEVNULL = open(os.devnull, 'w')
PROCESSES = 20
MAX_RETRIES = 3
def stdout(line):
"""
Write line to standard output.
"""
sys.stdout.write(line)
sys.stdout.flush()
def stderr(line):
"""
Write line to standard error.
"""
sys.stderr.write(line)
sys.stderr.flush()
def readline():
"""
Read a line from standard input.
"""
return sys.stdin.readline().strip() # remove trailing newline
def git_command_output(*args, **kwargs):
"""
Return the result of running a git command.
"""
args = ('git',) + args
output = subprocess.check_output(args, stderr=DEVNULL)
if kwargs.get('decode', True):
output = output.decode('utf8')
if kwargs.get('strip', True):
output = output.strip()
return output
def git_command_ok(*args):
"""
Return whether a git command runs successfully.
"""
args = ('git',) + args
return subprocess.call(args, stdout=DEVNULL, stderr=DEVNULL) == 0
def git_is_ancestor(ancestor, ref):
"""
Return whether ancestor is an ancestor of ref.
This returns true when it is possible to fast-forward from ancestor to ref.
"""
return git_command_ok('merge-base', '--is-ancestor', ancestor, ref)
def git_object_exists(sha):
"""
Return whether the object exists in the repository.
"""
return git_command_ok('cat-file', '-t', sha)
def git_history_exists(sha):
"""
Return whether the object, along with its history, exists in the
repository.
"""
return git_command_ok('rev-list', '--objects', sha)
def git_ref_value(ref):
"""
Return the hash of the ref.
"""
return git_command_output('rev-parse', ref)
def git_object_kind(sha):
"""
Return the type of the object.
"""
return git_command_output('cat-file', '-t', sha)
def git_object_data(sha, kind=None):
"""
Return the contents of the object.
If kind is None, return a pretty-printed representation of the object.
"""
if kind is not None:
return git_command_output('cat-file', kind, sha, decode=False, strip=False)
else:
return git_command_output('cat-file', '-p', sha, decode=False, strip=False)
def git_encode_object(sha):
"""
Return the encoded contents of the object.
The encoding is identical to the encoding git uses for loose objects.
This operation is the inverse of `git_decode_object`.
"""
kind = git_object_kind(sha)
size = git_command_output('cat-file', '-s', sha)
contents = git_object_data(sha, kind)
data = kind.encode('utf8') + b' ' + size.encode('utf8') + b'\0' + contents
compressed = zlib.compress(data)
return compressed
def git_decode_object(data):
"""
Decode the object, write it, and return the computed hash.
This operation is the inverse of `git_encode_object`.
"""
decompressed = zlib.decompress(data)
header, contents = decompressed.split(b'\0', 1)
kind = header.split()[0]
p = subprocess.Popen(['git', 'hash-object', '-w', '--stdin', '-t', kind],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=DEVNULL)
sha = p.communicate(contents)[0].decode('utf8').strip()
return sha
def git_list_objects(ref, exclude):
"""
Return the objects reachable from ref excluding the objects reachable from
exclude.
"""
exclude = ['^%s' % obj for obj in exclude if git_object_exists(obj)]
objects = git_command_output('rev-list', '--objects', ref, *exclude)
if not objects:
return []
return [i.split()[0] for i in objects.split('\n')]
def git_referenced_objects(sha):
"""
Return the objects directly referenced by the object.
"""
kind = git_object_kind(sha)
if kind == 'blob':
# blob objects do not reference any other objects
return []
data = git_object_data(sha).decode('utf8').strip()
if kind == 'tag':
# tag objects reference a single object
obj = data.split('\n')[0].split()[1]
return [obj]
elif kind == 'commit':
# commit objects reference a tree and zero or more parents
lines = data.split('\n')
tree = lines[0].split()[1]
objs = [tree]
for line in lines[1:]:
if line.startswith('parent '):
objs.append(line.split()[1])
else:
break
return objs
elif kind == 'tree':
# tree objects reference zero or more trees and blobs, or submodules
lines = data.split('\n')
# submodules have the mode '160000' and the kind 'commit', we filter them out because
# there is nothing to download and this causes errors
return [line.split()[2] for line in lines if not line.startswith('160000 commit ')]
else:
raise Exception('unexpected git object type: %s' % kind)
class Level(object):
"""
A class for severity levels.
"""
ERROR = 0
INFO = 1
DEBUG = 2
class Poison(object):
"""
A poison pill.
Instances of this class can be used as sentinel objects to communicate
termination requests to processes.
"""
def __init__(self, message=None):
self.message = message
class Binder(object):
"""
A class to bind a method to an object.
Python's built-in pickling does not work on bound methods or lambdas. This
class is designed to work around that restriction. In addition, it provides
the ability to partially apply a function.
For example, Binder can be used as follows:
>>> class A(object):
... def __init__(self, x):
... self.x = x
... def add(self, y, z):
... return self.x + y + z
...
>>> b = Binder(A(1), 'add', 2)
>>> b(3)
6
In the above example, it is possible to pickle the `b` object.
"""
def __init__(self, obj, func_name, *args):
"""
Initialize a Binder with an object and a function by its name.
Partially apply the function with args.
"""
self._obj = obj
self._func_name = func_name
self._args = args
def __call__(self, *args):
"""
Call the function bound to the object, passing args if given.
"""
# we cannot pickle an instance method, but we can pickle the instance
# itself along with the method name, and then we can dynamically
# retrieve the unbound method and call it with the instance and
# arguments
method = getattr(type(self._obj), self._func_name)
args = self._args + args
return method(self._obj, *args)
class Helper(object):
"""
A git remote helper to communicate with Dropbox.
"""
def __init__(self, token, url, processes=PROCESSES):
self._token = token
self._url = url
self._processes = processes
self._verbosity = Level.INFO # default verbosity
self._refs = {} # map from remote ref name => (rev number, sha)
self._pushed = {} # map from remote ref name => sha
def _write(self, message=None):
"""
Write a message to standard output.
"""
if message is not None:
stdout('%s\n' % message)
else:
stdout('\n')
def _trace(self, message, level=Level.DEBUG, exact=False):
"""
Log a message with a given severity level.
"""
if level > self._verbosity:
return
if exact:
if level == self._verbosity:
stderr(message)
return
if level <= Level.ERROR:
stderr('error: %s\n' % message)
elif level == Level.INFO:
stderr('info: %s\n' % message)
elif level >= Level.DEBUG:
stderr('debug: %s\n' % message)
def _fatal(self, message):
"""
Log a fatal error and exit.
"""
self._trace(message, Level.ERROR)
exit(1)
def _connection(self):
"""
Return a Dropbox connection object.
"""
# we use fresh connection objects for every use so that multiple
# threads can have connections simultaneously
return dropbox.Dropbox(self._token)
def run(self):
"""
Run the helper following the git remote helper communication protocol.
"""
while True:
line = readline()
if line == 'capabilities':
self._write('option')
self._write('push')
self._write('fetch')
self._write()
elif line.startswith('option'):
self._do_option(line)
elif line.startswith('list'):
self._do_list(line)
elif line.startswith('push'):
self._do_push(line)
elif line.startswith('fetch'):
self._do_fetch(line)
elif line == '':
break
else:
self._fatal('unsupported operation: %s' % line)
def _do_option(self, line):
"""
Handle the option command.
"""
if line.startswith('option verbosity'):
self._verbosity = int(line[len('option verbosity '):])
self._write('ok')
else:
self._write('unsupported')
def _do_list(self, line):
"""
Handle the list command.
"""
for_push = 'for-push' in line
refs = self._get_refs(for_push=for_push)
for ref in refs:
self._write(ref)
self._write()
def _do_push(self, line):
"""
Handle the push command.
"""
while True:
src, dst = line.split(' ')[1].split(':')
if src == '':
self._delete(dst)
else:
self._push(src, dst)
line = readline()
if line == '':
break
self._write()
def _do_fetch(self, line):
"""
Handle the fetch command.
"""
while True:
_, sha, value = line.split(' ')
self._fetch(sha)
line = readline()
if line == '':
break
self._write()
def _delete(self, ref):
"""
Delete the ref from the remote.
"""
self._trace('deleting ref %s' % ref)
try:
self._connection().files_delete(self._ref_path(ref))
except dropbox.exceptions.ApiError as e:
if not isinstance(e.error, dropbox.files.DeleteError):
raise
# someone else might have deleted it first, that's fine
self._refs.pop(ref, None) # discard
self._pushed.pop(ref, None) # discard
self._write('ok %s' % ref)
def _push(self, src, dst):
"""
Push src to dst on the remote.
"""
force = False
if src.startswith('+'):
src = src[1:]
force = True
present = [self._refs[name][1] for name in self._refs]
present.extend(self._pushed.values())
# before updating the ref, write all objects that are referenced
objects = git_list_objects(src, present)
try:
# upload objects in parallel
pool = multiprocessing.pool.ThreadPool(processes=self._processes)
res = pool.imap_unordered(Binder(self, '_put_object'), objects)
# show progress
total = len(objects)
self._trace('', level=Level.INFO, exact=True)
for done, _ in enumerate(res, 1):
pct = float(done) / total
message = '\rWriting objects: {:4.0%} ({}/{})'.format(pct, done, total)
if done == total:
message = '%s, done.\n' % message
self._trace(message, level=Level.INFO, exact=True)
except Exception:
self._fatal('exception while writing objects')
sha = git_ref_value(src)
error = self._write_ref(sha, dst, force)
if error is None:
self._write('ok %s' % dst)
self._pushed[dst] = sha
else:
self._write('error %s %s' % (dst, error))
def _ref_path(self, name):
"""
Return the path to the given ref on the remote.
"""
assert name.startswith('refs/')
return posixpath.join(self._url, name)
def _ref_name_from_path(self, path):
"""
Return the ref name given the full path of the remote ref.
"""
prefix = '%s/' % self._url
assert path.startswith(prefix)
return path[len(prefix):]
def _object_path(self, name):
"""
Return the path to the given object on the remote.
"""
prefix = name[:2]
suffix = name[2:]
return posixpath.join(self._url, 'objects', prefix, suffix)
def _get_file(self, path):
"""
Return the revision number and content of a given file on the remote.
Return a tuple (revision, content).
"""
self._trace('fetching: %s' % path)
meta, resp = self._connection().files_download(path)
return (meta.rev, resp.content)
def _put_object(self, sha):
"""
Upload an object to the remote.
"""
data = git_encode_object(sha)
path = self._object_path(sha)
self._trace('writing: %s' % path)
retries = 0
while True:
try:
mode = dropbox.files.WriteMode('overwrite')
self._connection().files_upload(data, path, mode, mute=True)
except dropbox.exceptions.InternalServerError:
self._trace('internal server error writing %s, retrying' % sha)
if retries < MAX_RETRIES:
retries += 1
else:
raise
else:
break
def _download(self, input_queue, output_queue):
"""
Download files given in input_queue and push results to output_queue.
"""
while True:
try:
obj = input_queue.get()
if isinstance(obj, Poison):
return
_, data = self._get_file(self._object_path(obj))
computed_sha = git_decode_object(data)
if computed_sha != obj:
output_queue.put(
Poison('hash mismatch %s != %s' % (computed_sha, obj)))
output_queue.put(obj)
except Exception as e:
output_queue.put(Poison('exception while downloading: %s' % e))
def _fetch(self, sha):
"""
Recursively fetch the given object and the objects it references.
"""
# have multiple threads downloading in parallel
queue = [sha]
pending = set()
downloaded = set()
input_queue = multiprocessing.Queue() # requesting downloads
output_queue = multiprocessing.Queue() # completed downloads
procs = []
for _ in range(self._processes):
target = Binder(self, '_download')
args = (input_queue, output_queue)
# use multiprocessing.dummy to use threads instead of processes
proc = multiprocessing.dummy.Process(target=target, args=args)
proc.daemon = True
proc.start()
procs.append(proc)
self._trace('', level=Level.INFO, exact=True) # for showing progress
while queue or pending:
if queue:
# if possible, queue up download
sha = queue.pop()
if sha in downloaded or sha in pending:
continue
if git_object_exists(sha):
if not git_history_exists(sha):
# this can only happen in the case of aborted fetches
# that are resumed later
self._trace('missing part of history from %s' % sha)
queue.extend(git_referenced_objects(sha))
else:
self._trace('%s already downloaded' % sha)
else:
pending.add(sha)
input_queue.put(sha)
else:
# process completed download
res = output_queue.get()
if isinstance(res, Poison):
self._fatal(res.message)
pending.remove(res)
downloaded.add(res)
queue.extend(git_referenced_objects(res))
# show progress
done = len(downloaded)
total = done + len(pending)
pct = float(done) / total
message = '\rReceiving objects: {:4.0%} ({}/{})'.format(pct, done, total)
self._trace(message, level=Level.INFO, exact=True)
self._trace('\rReceiving objects: 100% ({}/{}), done.\n'.format(done, total),
level=Level.INFO, exact=True)
for proc in procs:
input_queue.put(Poison())
for proc in procs:
proc.join()
def _write_ref(self, new_sha, dst, force=False):
"""
Atomically update the given reference to point to the given object.
Return None if there is no error, otherwise return a description of the
error.
"""
path = self._ref_path(dst)
if force:
# overwrite regardless of what is there before
mode = dropbox.files.WriteMode('overwrite')
else:
info = self._refs.get(dst, None)
if info:
rev, sha = info
is_fast_forward = git_is_ancestor(sha, new_sha)
if not is_fast_forward and not force:
return 'non-fast-forward'
# perform an atomic compare-and-swap
mode = dropbox.files.WriteMode.update(rev)
else:
# perform an atomic add, which fails if a concurrent writer
# writes before this does
mode = dropbox.files.WriteMode('add')
self._trace('writing ref %s with mode %s' % (dst, mode))
data = '%s\n' % new_sha
try:
self._connection().files_upload(data, path, mode, mute=True)
except dropbox.exceptions.ApiError as e:
if not isinstance(e.error, dropbox.files.UploadError):
raise
return 'fetch first'
else:
return None
def _get_refs(self, for_push):
"""
Return the refs present on the remote.
"""
try:
loc = posixpath.join(self._url, 'refs')
res = self._connection().files_list_folder(loc, recursive=True)
files = res.entries
while res.has_more:
res = self._connection().files_list_folder_continue(res.cursor)
files.extend(res.entries)
except dropbox.exceptions.ApiError as e:
if not isinstance(e.error, dropbox.files.ListFolderError):
raise
if not for_push:
# if we're pushing, it's okay if nothing exists beforehand,
# but it's good to notify the user just in case
self._trace('repository is empty', Level.INFO)
return []
refs = []
for ref_file in files:
if not isinstance(ref_file, dropbox.files.FileMetadata):
continue
path = ref_file.path_lower
name = self._ref_name_from_path(path)
rev, data = self._get_file(path)
sha = data.decode('utf8').strip()
self._refs[name] = (rev, sha)
refs.append('%s %s' % (sha, name))
return refs
class Config(object):
"""
A class to manage configuration data.
"""
def __init__(self, filename):
with open(filename) as f:
self._settings = json.load(f)
def __getitem__(self, key):
"""
Return the setting corresponding to key.
Raises KeyError if the config file is missing the key.
"""
return self._settings[key]
def main():
name, url = sys.argv[1:3]
url = url.lower()
if url.startswith('dropbox://'):
url = url[len('dropbox:/'):] # keep single leading slash
if not url.startswith('/') or url.endswith('/'):
stderr('error: URL must have leading slash and no trailing slash\n')
exit(1)
config_files = [
os.path.join(os.environ.get('XDG_CONFIG_HOME',
os.path.expanduser('~/.config')),
'git',
'git-remote-dropbox.json'),
os.path.expanduser('~/.git-remote-dropbox.json'),
]
config = None
for config_file in config_files:
try:
config = Config(config_file)
except ValueError:
stderr('error: malformed config file: %s\n' % config_file)
exit(1)
except IOError:
continue
else:
break
if not config:
stderr('error: missing config file: %s\n' % config_files[0])
exit(1)
try:
token = config['token']
except KeyError:
stderr('error: config file missing token\n')
exit(1)
helper = Helper(token, url)
try:
helper.run()
except Exception:
stderr('error: unexpected exception\n')
except KeyboardInterrupt:
# exit silently with an error code
exit(1)
if __name__ == '__main__':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
main()
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds parser models."""
import tensorflow as tf
import syntaxnet.load_parser_ops
from tensorflow.python.ops import control_flow_ops as cf
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from syntaxnet.ops import gen_parser_ops
def BatchedSparseToDense(sparse_indices, output_size):
"""Batch compatible sparse to dense conversion.
This is useful for one-hot coded target labels.
Args:
sparse_indices: [batch_size] tensor containing one index per batch
output_size: needed in order to generate the correct dense output
Returns:
A [batch_size, output_size] dense tensor.
"""
eye = tf.diag(tf.fill([output_size], tf.constant(1, tf.float32)))
return tf.nn.embedding_lookup(eye, sparse_indices)
def EmbeddingLookupFeatures(params, sparse_features, allow_weights):
"""Computes embeddings for each entry of sparse features sparse_features.
Args:
params: list of 2D tensors containing vector embeddings
sparse_features: 1D tensor of strings. Each entry is a string encoding of
dist_belief.SparseFeatures, and represents a variable length list of
feature ids, and optionally, corresponding weights values.
allow_weights: boolean to control whether the weights returned from the
SparseFeatures are used to multiply the embeddings.
Returns:
A tensor representing the combined embeddings for the sparse features.
For each entry s in sparse_features, the function looks up the embeddings
for each id and sums them into a single tensor weighing them by the
weight of each id. It returns a tensor with each entry of sparse_features
replaced by this combined embedding.
"""
if not isinstance(params, list):
params = [params]
# Lookup embeddings.
sparse_features = tf.convert_to_tensor(sparse_features)
indices, ids, weights = gen_parser_ops.unpack_sparse_features(sparse_features)
embeddings = tf.nn.embedding_lookup(params, ids)
if allow_weights:
# Multiply by weights, reshaping to allow broadcast.
broadcast_weights_shape = tf.concat([tf.shape(weights), [1]], 0)
embeddings *= tf.reshape(weights, broadcast_weights_shape)
# Sum embeddings by index.
return tf.unsorted_segment_sum(embeddings, indices, tf.size(sparse_features))
class GreedyParser(object):
"""Builds a Chen & Manning style greedy neural net parser.
Builds a graph with an optional reader op connected at one end and
operations needed to train the network on the other. Supports multiple
network instantiations sharing the same parameters and network topology.
The following named nodes are added to the training and eval networks:
epochs: a tensor containing the current epoch number
cost: a tensor containing the current training step cost
gold_actions: a tensor containing actions from gold decoding
feature_endpoints: a list of sparse feature vectors
logits: output of the final layer before computing softmax
The training network also contains:
train_op: an op that executes a single training step
Typical usage:
parser = graph_builder.GreedyParser(num_actions, num_features,
num_feature_ids, embedding_sizes,
hidden_layer_sizes)
parser.AddTraining(task_context, batch_size=5)
with tf.Session('local') as sess:
# This works because the session uses the same default graph as the
# GraphBuilder did.
sess.run(parser.inits.values())
while True:
tf_epoch, _ = sess.run([parser.training['epoch'],
parser.training['train_op']])
if tf_epoch[0] > 0:
break
"""
def __init__(self,
num_actions,
num_features,
num_feature_ids,
embedding_sizes,
hidden_layer_sizes,
seed=None,
gate_gradients=False,
use_locking=False,
embedding_init=1.0,
relu_init=1e-4,
bias_init=0.2,
softmax_init=1e-4,
averaging_decay=0.9999,
use_averaging=True,
check_parameters=True,
check_every=1,
allow_feature_weights=False,
only_train='',
arg_prefix=None,
**unused_kwargs):
"""Initialize the graph builder with parameters defining the network.
Args:
num_actions: int size of the set of parser actions
num_features: int list of dimensions of the feature vectors
num_feature_ids: int list of same length as num_features corresponding to
the sizes of the input feature spaces
embedding_sizes: int list of same length as num_features of the desired
embedding layer sizes
hidden_layer_sizes: int list of desired relu layer sizes; may be empty
seed: optional random initializer seed to enable reproducibility
gate_gradients: if True, gradient updates are computed synchronously,
ensuring consistency and reproducibility
use_locking: if True, use locking to avoid read-write contention when
updating Variables
embedding_init: sets the std dev of normal initializer of embeddings to
embedding_init / embedding_size ** .5
relu_init: sets the std dev of normal initializer of relu weights
to relu_init
bias_init: sets constant initializer of relu bias to bias_init
softmax_init: sets the std dev of normal initializer of softmax init
to softmax_init
averaging_decay: decay for exponential moving average when computing
averaged parameters, set to 1 to do vanilla averaging
use_averaging: whether to use moving averages of parameters during evals
check_parameters: whether to check for NaN/Inf parameters during
training
check_every: checks numerics every check_every steps.
allow_feature_weights: whether feature weights are allowed.
only_train: the comma separated set of parameter names to train. If empty,
all model parameters will be trained.
arg_prefix: prefix for context parameters.
"""
self._num_actions = num_actions
self._num_features = num_features
self._num_feature_ids = num_feature_ids
self._embedding_sizes = embedding_sizes
self._hidden_layer_sizes = hidden_layer_sizes
self._seed = seed
self._gate_gradients = gate_gradients
self._use_locking = use_locking
self._use_averaging = use_averaging
self._check_parameters = check_parameters
self._check_every = check_every
self._allow_feature_weights = allow_feature_weights
self._only_train = set(only_train.split(',')) if only_train else None
self._feature_size = len(embedding_sizes)
self._embedding_init = embedding_init
self._relu_init = relu_init
self._softmax_init = softmax_init
self._arg_prefix = arg_prefix
# Parameters of the network with respect to which training is done.
self.params = {}
# Other variables, with respect to which no training is done, but which we
# nonetheless need to save in order to capture the state of the graph.
self.variables = {}
# Operations to initialize any nodes that require initialization.
self.inits = {}
# Training- and eval-related nodes.
self.training = {}
self.evaluation = {}
self.saver = None
# Nodes to compute moving averages of parameters, called every train step.
self._averaging = {}
self._averaging_decay = averaging_decay
# Pretrained embeddings that can be used instead of constant initializers.
self._pretrained_embeddings = {}
# After the following 'with' statement, we'll be able to re-enter the
# 'params' scope by re-using the self._param_scope member variable. See for
# instance _AddParam.
with tf.name_scope('params') as self._param_scope:
self._relu_bias_init = tf.constant_initializer(bias_init)
@property
def embedding_size(self):
size = 0
for i in range(self._feature_size):
size += self._num_features[i] * self._embedding_sizes[i]
return size
def _AddParam(self,
shape,
dtype,
name,
initializer=None,
return_average=False):
"""Add a model parameter w.r.t. we expect to compute gradients.
_AddParam creates both regular parameters (usually for training) and
averaged nodes (usually for inference). It returns one or the other based
on the 'return_average' arg.
Args:
shape: int list, tensor shape of the parameter to create
dtype: tf.DataType, data type of the parameter
name: string, name of the parameter in the TF graph
initializer: optional initializer for the paramter
return_average: if False, return parameter otherwise return moving average
Returns:
parameter or averaged parameter
"""
if name not in self.params:
step = tf.cast(self.GetStep(), tf.float32)
# Put all parameters and their initializing ops in their own scope
# irrespective of the current scope (training or eval).
with tf.name_scope(self._param_scope):
self.params[name] = tf.get_variable(name, shape, dtype, initializer)
param = self.params[name]
if initializer is not None:
self.inits[name] = state_ops.init_variable(param, initializer)
if self._averaging_decay == 1:
logging.info('Using vanilla averaging of parameters.')
ema = tf.train.ExponentialMovingAverage(decay=(step / (step + 1.0)),
num_updates=None)
else:
ema = tf.train.ExponentialMovingAverage(decay=self._averaging_decay,
num_updates=step)
self._averaging[name + '_avg_update'] = ema.apply([param])
self.variables[name + '_avg_var'] = ema.average(param)
self.inits[name + '_avg_init'] = state_ops.init_variable(
ema.average(param), tf.zeros_initializer())
return (self.variables[name + '_avg_var'] if return_average else
self.params[name])
def GetStep(self):
def OnesInitializer(shape, dtype=tf.float32, partition_info=None):
return tf.ones(shape, dtype)
return self._AddVariable([], tf.int32, 'step', OnesInitializer)
def _AddVariable(self, shape, dtype, name, initializer=None):
if name in self.variables:
return self.variables[name]
self.variables[name] = tf.get_variable(name, shape, dtype, initializer)
if initializer is not None:
self.inits[name] = state_ops.init_variable(self.variables[name],
initializer)
return self.variables[name]
def _ReluWeightInitializer(self):
with tf.name_scope(self._param_scope):
return tf.random_normal_initializer(stddev=self._relu_init,
seed=self._seed)
def _EmbeddingMatrixInitializer(self, index, embedding_size):
if index in self._pretrained_embeddings:
return self._pretrained_embeddings[index]
else:
return tf.random_normal_initializer(
stddev=self._embedding_init / embedding_size**.5,
seed=self._seed)
def _AddEmbedding(self,
features,
num_features,
num_ids,
embedding_size,
index,
return_average=False):
"""Adds an embedding matrix and passes the `features` vector through it."""
embedding_matrix = self._AddParam(
[num_ids, embedding_size],
tf.float32,
'embedding_matrix_%d' % index,
self._EmbeddingMatrixInitializer(index, embedding_size),
return_average=return_average)
embedding = EmbeddingLookupFeatures(embedding_matrix,
tf.reshape(features,
[-1],
name='feature_%d' % index),
self._allow_feature_weights)
return tf.reshape(embedding, [-1, num_features * embedding_size])
def _BuildNetwork(self, feature_endpoints, return_average=False):
"""Builds a feed-forward part of the net given features as input.
The network topology is already defined in the constructor, so multiple
calls to BuildForward build multiple networks whose parameters are all
shared. It is the source of the input features and the use of the output
that distinguishes each network.
Args:
feature_endpoints: tensors with input features to the network
return_average: whether to use moving averages as model parameters
Returns:
logits: output of the final layer before computing softmax
"""
assert len(feature_endpoints) == self._feature_size
# Create embedding layer.
embeddings = []
for i in range(self._feature_size):
embeddings.append(self._AddEmbedding(feature_endpoints[i],
self._num_features[i],
self._num_feature_ids[i],
self._embedding_sizes[i],
i,
return_average=return_average))
last_layer = tf.concat(embeddings, 1)
last_layer_size = self.embedding_size
# Create ReLU layers.
for i, hidden_layer_size in enumerate(self._hidden_layer_sizes):
weights = self._AddParam(
[last_layer_size, hidden_layer_size],
tf.float32,
'weights_%d' % i,
self._ReluWeightInitializer(),
return_average=return_average)
bias = self._AddParam([hidden_layer_size],
tf.float32,
'bias_%d' % i,
self._relu_bias_init,
return_average=return_average)
last_layer = tf.nn.relu_layer(last_layer,
weights,
bias,
name='layer_%d' % i)
last_layer_size = hidden_layer_size
# Create softmax layer.
softmax_weight = self._AddParam(
[last_layer_size, self._num_actions],
tf.float32,
'softmax_weight',
tf.random_normal_initializer(stddev=self._softmax_init,
seed=self._seed),
return_average=return_average)
softmax_bias = self._AddParam(
[self._num_actions],
tf.float32,
'softmax_bias',
tf.zeros_initializer(),
return_average=return_average)
logits = tf.nn.xw_plus_b(last_layer,
softmax_weight,
softmax_bias,
name='logits')
return {'logits': logits}
def _AddGoldReader(self, task_context, batch_size, corpus_name):
features, epochs, gold_actions = (
gen_parser_ops.gold_parse_reader(task_context,
self._feature_size,
batch_size,
corpus_name=corpus_name,
arg_prefix=self._arg_prefix))
return {'gold_actions': tf.identity(gold_actions,
name='gold_actions'),
'epochs': tf.identity(epochs,
name='epochs'),
'feature_endpoints': features}
def _AddDecodedReader(self, task_context, batch_size, transition_scores,
corpus_name):
features, epochs, eval_metrics, documents = (
gen_parser_ops.decoded_parse_reader(transition_scores,
task_context,
self._feature_size,
batch_size,
corpus_name=corpus_name,
arg_prefix=self._arg_prefix))
return {'eval_metrics': eval_metrics,
'epochs': tf.identity(epochs,
name='epochs'),
'feature_endpoints': features,
'documents': documents}
def _AddCostFunction(self, batch_size, gold_actions, logits):
"""Cross entropy plus L2 loss on weights and biases of the hidden layers."""
dense_golden = BatchedSparseToDense(gold_actions, self._num_actions)
cross_entropy = tf.div(
tf.reduce_sum(
tf.nn.softmax_cross_entropy_with_logits(
labels=dense_golden, logits=logits)), batch_size)
regularized_params = [tf.nn.l2_loss(p)
for k, p in self.params.items()
if k.startswith('weights') or k.startswith('bias')]
l2_loss = 1e-4 * tf.add_n(regularized_params) if regularized_params else 0
return {'cost': tf.add(cross_entropy, l2_loss, name='cost')}
def AddEvaluation(self,
task_context,
batch_size,
evaluation_max_steps=300,
corpus_name='documents'):
"""Builds the forward network only without the training operation.
Args:
task_context: file path from which to read the task context.
batch_size: batch size to request from reader op.
evaluation_max_steps: max number of parsing actions during evaluation,
only used in beam parsing.
corpus_name: name of the task input to read parses from.
Returns:
Dictionary of named eval nodes.
"""
def _AssignTransitionScores():
return tf.assign(nodes['transition_scores'],
nodes['logits'], validate_shape=False)
def _Pass():
return tf.constant(-1.0)
unused_evaluation_max_steps = evaluation_max_steps
with tf.name_scope('evaluation'):
nodes = self.evaluation
nodes['transition_scores'] = self._AddVariable(
[batch_size, self._num_actions], tf.float32, 'transition_scores',
tf.constant_initializer(-1.0))
nodes.update(self._AddDecodedReader(task_context, batch_size, nodes[
'transition_scores'], corpus_name))
nodes.update(self._BuildNetwork(nodes['feature_endpoints'],
return_average=self._use_averaging))
nodes['eval_metrics'] = cf.with_dependencies(
[tf.cond(tf.greater(tf.size(nodes['logits']), 0),
_AssignTransitionScores, _Pass)],
nodes['eval_metrics'], name='eval_metrics')
return nodes
def _IncrementCounter(self, counter):
return state_ops.assign_add(counter, 1, use_locking=True)
def _AddLearningRate(self, initial_learning_rate, decay_steps):
"""Returns a learning rate that decays by 0.96 every decay_steps.
Args:
initial_learning_rate: initial value of the learning rate
decay_steps: decay by 0.96 every this many steps
Returns:
learning rate variable.
"""
step = self.GetStep()
return cf.with_dependencies(
[self._IncrementCounter(step)],
tf.train.exponential_decay(initial_learning_rate,
step,
decay_steps,
0.96,
staircase=True))
def AddPretrainedEmbeddings(self, index, embeddings_path, task_context):
"""Embeddings at the given index will be set to pretrained values."""
def _Initializer(shape, dtype=tf.float32, partition_info=None):
unused_dtype = dtype
t = gen_parser_ops.word_embedding_initializer(
vectors=embeddings_path,
task_context=task_context,
embedding_init=self._embedding_init)
t.set_shape(shape)
return t
self._pretrained_embeddings[index] = _Initializer
def AddTraining(self,
task_context,
batch_size,
learning_rate=0.1,
decay_steps=4000,
momentum=0.9,
corpus_name='documents'):
"""Builds a trainer to minimize the cross entropy cost function.
Args:
task_context: file path from which to read the task context
batch_size: batch size to request from reader op
learning_rate: initial value of the learning rate
decay_steps: decay learning rate by 0.96 every this many steps
momentum: momentum parameter used when training with momentum
corpus_name: name of the task input to read parses from
Returns:
Dictionary of named training nodes.
"""
with tf.name_scope('training'):
nodes = self.training
nodes.update(self._AddGoldReader(task_context, batch_size, corpus_name))
nodes.update(self._BuildNetwork(nodes['feature_endpoints'],
return_average=False))
nodes.update(self._AddCostFunction(batch_size, nodes['gold_actions'],
nodes['logits']))
# Add the optimizer
if self._only_train:
trainable_params = [v
for k, v in self.params.iteritems()
if k in self._only_train]
else:
trainable_params = self.params.values()
lr = self._AddLearningRate(learning_rate, decay_steps)
optimizer = tf.train.MomentumOptimizer(lr,
momentum,
use_locking=self._use_locking)
train_op = optimizer.minimize(nodes['cost'], var_list=trainable_params)
for param in trainable_params:
slot = optimizer.get_slot(param, 'momentum')
self.inits[slot.name] = state_ops.init_variable(slot,
tf.zeros_initializer())
self.variables[slot.name] = slot
numerical_checks = [
tf.check_numerics(param,
message='Parameter is not finite.')
for param in trainable_params
if param.dtype.base_dtype in [tf.float32, tf.float64]
]
check_op = tf.group(*numerical_checks)
avg_update_op = tf.group(*self._averaging.values())
train_ops = [train_op]
if self._check_parameters:
train_ops.append(check_op)
if self._use_averaging:
train_ops.append(avg_update_op)
nodes['train_op'] = tf.group(*train_ops, name='train_op')
return nodes
def AddSaver(self, slim_model=False):
"""Adds ops to save and restore model parameters.
Args:
slim_model: whether only averaged variables are saved.
Returns:
the saver object.
"""
# We have to put the save op in the root scope otherwise running
# "save/restore_all" won't find the "save/Const" node it expects.
with tf.name_scope(None):
variables_to_save = self.params.copy()
variables_to_save.update(self.variables)
if slim_model:
for key in variables_to_save.keys():
if not key.endswith('avg_var'):
del variables_to_save[key]
self.saver = tf.train.Saver(variables_to_save)
return self.saver
|
|
# Copyright (c) 2013-2017 Dell Inc, or its subsidiaries.
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import unittest
from eventlet import greenthread
import mock
from oslo_concurrency import processutils
import paramiko
import six
from cinder import context
from cinder import exception
from cinder import ssh_utils
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.dell_emc import ps
class PSSeriesISCSIDriverTestCase(test.TestCase):
def setUp(self):
super(PSSeriesISCSIDriverTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.san_is_local = False
self.configuration.san_ip = "10.0.0.1"
self.configuration.san_login = "foo"
self.configuration.san_password = "bar"
self.configuration.san_ssh_port = 16022
self.configuration.san_thin_provision = True
self.configuration.san_private_key = 'foo'
self.configuration.ssh_min_pool_conn = 1
self.configuration.ssh_max_pool_conn = 5
self.configuration.ssh_conn_timeout = 30
self.configuration.eqlx_pool = 'non-default'
self.configuration.eqlx_group_name = 'group-0'
self.configuration.eqlx_cli_max_retries = 5
self.configuration.use_chap_auth = True
self.configuration.chap_username = 'admin'
self.configuration.chap_password = 'password'
self.configuration.max_over_subscription_ratio = 1.0
self.driver_stats_output = ['TotalCapacity: 111GB',
'FreeSpace: 11GB',
'VolumeReportedSpace: 80GB',
'TotalVolumes: 100']
self.cmd = 'this is dummy command'
self._context = context.get_admin_context()
self.driver = ps.PSSeriesISCSIDriver(
configuration=self.configuration)
self.volume_name = "fakevolume"
self.volid = "fakeid"
self.volume = {'name': self.volume_name,
'display_name': 'fake_display_name'}
self.connector = {
'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:2227dab76162',
'host': 'fakehost'}
self.access_record_output = [
"ID Initiator Ipaddress AuthMethod UserName Apply-To",
"--- --------------- ------------- ---------- ---------- --------",
"1 iqn.1993-08.org.debian:01:222 *.*.*.* none both",
" 7dab76162"]
self.fake_access_id = '1'
self.fake_iqn = 'iqn.2003-10.com.equallogic:group01:25366:fakev'
self.fake_iqn_return = ['iSCSI target name is %s.' % self.fake_iqn]
self.fake_volume_output = ["Size: 5GB",
"iSCSI Name: %s" % self.fake_iqn,
"Description: "]
self.fake_volume_info = {'size': 5.0,
'iSCSI_Name': self.fake_iqn}
self.driver._group_ip = '10.0.1.6'
self.properties = {
'target_discovered': True,
'target_portal': '%s:3260' % self.driver._group_ip,
'target_iqn': self.fake_iqn,
'volume_id': 1,
'discard': True}
self._model_update = {
'provider_location': "%s:3260,1 %s 0" % (self.driver._group_ip,
self.fake_iqn),
'provider_auth': 'CHAP %s %s' % (
self.configuration.chap_username,
self.configuration.chap_password)
}
def _fake_get_iscsi_properties(self, volume):
return self.properties
def test_create_volume(self):
volume = {'name': self.volume_name, 'size': 1}
mock_attrs = {'args': ['volume', 'create', volume['name'],
"%sG" % (volume['size']), 'pool',
self.configuration.eqlx_pool,
'thin-provision']}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.return_value = self.fake_iqn_return
model_update = self.driver.create_volume(volume)
self.assertEqual(self._model_update, model_update)
def test_delete_volume(self):
volume = {'name': self.volume_name, 'size': 1}
show_attrs = {'args': ['volume', 'select', volume['name'], 'show']}
off_attrs = {'args': ['volume', 'select', volume['name'], 'offline']}
delete_attrs = {'args': ['volume', 'delete', volume['name']]}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**show_attrs)
mock_eql_execute.configure_mock(**off_attrs)
mock_eql_execute.configure_mock(**delete_attrs)
self.driver.delete_volume(volume)
def test_delete_absent_volume(self):
volume = {'name': self.volume_name, 'size': 1, 'id': self.volid}
mock_attrs = {'args': ['volume', 'select', volume['name'], 'show']}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.side_effect = processutils.ProcessExecutionError(
stdout='% Error ..... does not exist.\n')
self.driver.delete_volume(volume)
def test_ensure_export(self):
volume = {'name': self.volume_name, 'size': 1}
mock_attrs = {'args': ['volume', 'select', volume['name'], 'show']}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
self.driver.ensure_export({}, volume)
def test_create_snapshot(self):
snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name'}
snap_name = 'fake_snap_name'
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.return_value = ['Snapshot name is %s' % snap_name]
self.driver.create_snapshot(snapshot)
def test_create_volume_from_snapshot(self):
snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name',
'volume_size': '1'}
volume = {'name': self.volume_name, 'size': '1'}
mock_attrs = {'args': ['volume', 'select', snapshot['volume_name'],
'snapshot', 'select', snapshot['name'],
'clone', volume['name']]}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
with mock.patch.object(self.driver,
'extend_volume') as mock_extend_volume:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.return_value = self.fake_iqn_return
mock_extend_volume.return_value = self.fake_iqn_return
model_update = self.driver.create_volume_from_snapshot(
volume, snapshot)
self.assertEqual(self._model_update, model_update)
self.assertFalse(self.driver.extend_volume.called)
def test_create_volume_from_snapshot_extend(self):
snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name',
'volume_size': '100'}
volume = {'name': self.volume_name, 'size': '200'}
mock_attrs = {'args': ['volume', 'select', snapshot['volume_name'],
'snapshot', 'select', snapshot['name'],
'clone', volume['name']]}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
with mock.patch.object(self.driver,
'extend_volume') as mock_extend_volume:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.return_value = self.fake_iqn_return
mock_extend_volume.return_value = self.fake_iqn_return
model_update = self.driver.create_volume_from_snapshot(
volume, snapshot)
self.assertEqual(self._model_update, model_update)
self.assertTrue(self.driver.extend_volume.called)
self.driver.extend_volume.assert_called_once_with(
volume, volume['size'])
def test_create_cloned_volume(self):
src_vref = {'name': 'fake_uuid', 'size': '1'}
volume = {'name': self.volume_name, 'size': '1'}
mock_attrs = {'args': ['volume', 'select', volume['name'],
'multihost-access', 'enable']}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
with mock.patch.object(self.driver,
'extend_volume') as mock_extend_volume:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.return_value = self.fake_iqn_return
mock_extend_volume.return_value = self.fake_iqn_return
model_update = self.driver.create_cloned_volume(
volume, src_vref)
self.assertEqual(self._model_update, model_update)
self.assertFalse(self.driver.extend_volume.called)
def test_create_cloned_volume_extend(self):
src_vref = {'name': 'fake_uuid', 'size': '100'}
volume = {'name': self.volume_name, 'size': '200'}
mock_attrs = {'args': ['volume', 'select', volume['name'],
'multihost-access', 'enable']}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
with mock.patch.object(self.driver,
'extend_volume') as mock_extend_volume:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.return_value = self.fake_iqn_return
mock_extend_volume.return_value = self.fake_iqn_return
cloned_vol = self.driver.create_cloned_volume(volume, src_vref)
self.assertEqual(self._model_update, cloned_vol)
self.assertTrue(self.driver.extend_volume.called)
def test_delete_snapshot(self):
snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name'}
mock_attrs = {'args': ['volume', 'select', snapshot['volume_name'],
'snapshot', 'delete', snapshot['name']]}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
self.driver.delete_snapshot(snapshot)
def test_delete_absent_snapshot(self):
snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name'}
mock_attrs = {'args': ['volume', 'select', snapshot['volume_name'],
'snapshot', 'delete', snapshot['name']]}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.side_effect = processutils.ProcessExecutionError(
stdout='% Error ..... does not exist.\n')
self.driver.delete_snapshot(snapshot)
def test_extend_volume(self):
new_size = '200'
volume = {'name': self.volume_name, 'size': 100}
mock_attrs = {'args': ['volume', 'select', volume['name'],
'size', "%sG" % new_size]}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
self.driver.extend_volume(volume, new_size)
def test_get_volume_info(self):
attrs = ('volume', 'select', self.volume, 'show')
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.return_value = self.fake_volume_output
data = self.driver._get_volume_info(self.volume)
mock_eql_execute.assert_called_with(*attrs)
self.assertEqual(self.fake_volume_info, data)
def test_get_volume_info_negative(self):
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.side_effect = processutils.ProcessExecutionError(
stdout='% Error ..... does not exist.\n')
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver._get_volume_info, self.volume_name)
def test_manage_existing(self):
ref = {'source-name': self.volume_name}
attrs = ('volume', 'select', self.volume_name,
'multihost-access', 'enable')
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
with mock.patch.object(self.driver,
'_get_volume_info') as mock_volume_info:
mock_volume_info.return_value = self.fake_volume_info
mock_eql_execute.return_value = self.fake_iqn_return
model_update = self.driver.manage_existing(self.volume, ref)
mock_eql_execute.assert_called_with(*attrs)
self.assertEqual(self._model_update, model_update)
def test_manage_existing_invalid_ref(self):
ref = {}
self.assertRaises(exception.InvalidInput,
self.driver.manage_existing, self.volume, ref)
def test_manage_existing_get_size(self):
ref = {'source-name': self.volume_name}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.return_value = self.fake_volume_output
size = self.driver.manage_existing_get_size(self.volume, ref)
self.assertEqual(float('5.0'), size)
def test_manage_existing_get_size_invalid_ref(self):
"""Error on manage with invalid reference."""
ref = {}
self.assertRaises(exception.InvalidInput,
self.driver.manage_existing_get_size,
self.volume, ref)
def test_unmanage(self):
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.return_value = None
self.driver.unmanage(self.volume)
def test_initialize_connection(self):
volume = {'name': self.volume_name}
mock_attrs = {'args': ['volume', 'select', volume['name'], 'access',
'create', 'initiator',
self.connector['initiator'],
'authmethod', 'chap',
'username',
self.configuration.chap_username]}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
with mock.patch.object(self.driver,
'_get_iscsi_properties') as mock_iscsi:
mock_eql_execute.configure_mock(**mock_attrs)
mock_iscsi.return_value = self.properties
iscsi_properties = self.driver.initialize_connection(
volume, self.connector)
self.assertEqual(self._fake_get_iscsi_properties(volume),
iscsi_properties['data'])
self.assertTrue(iscsi_properties['data']['discard'])
def test_terminate_connection(self):
def my_side_effect(*args, **kwargs):
if args[4] == 'show':
return self.access_record_output
else:
return ''
volume = {'name': self.volume_name}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.side_effect = my_side_effect
self.driver.terminate_connection(volume, self.connector)
def test_get_access_record(self):
attrs = ('volume', 'select', self.volume['name'], 'access', 'show')
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.return_value = self.access_record_output
data = self.driver._get_access_record(self.volume, self.connector)
mock_eql_execute.assert_called_with(*attrs)
self.assertEqual(self.fake_access_id, data)
def test_get_access_record_negative(self):
attrs = ('volume', 'select', self.volume['name'], 'access', 'show')
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.return_value = []
data = self.driver._get_access_record(self.volume, self.connector)
mock_eql_execute.assert_called_with(*attrs)
self.assertIsNone(data)
def test_do_setup(self):
fake_group_ip = '10.1.2.3'
def my_side_effect(*args, **kwargs):
if args[0] == 'grpparams':
return ['Group-Ipaddress: %s' % fake_group_ip]
else:
return ''
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.side_effect = my_side_effect
self.driver.do_setup(self._context)
self.assertEqual(fake_group_ip, self.driver._group_ip)
def test_update_volume_stats_thin(self):
mock_attrs = {'args': ['pool', 'select',
self.configuration.eqlx_pool, 'show']}
self.configuration.san_thin_provision = True
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.return_value = self.driver_stats_output
self.driver._update_volume_stats()
self.assert_volume_stats(self.driver._stats)
def test_update_volume_stats_thick(self):
mock_attrs = {'args': ['pool', 'select',
self.configuration.eqlx_pool, 'show']}
self.configuration.san_thin_provision = False
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.return_value = self.driver_stats_output
self.driver._update_volume_stats()
self.assert_volume_stats(self.driver._stats)
def test_get_volume_stats_thin(self):
mock_attrs = {'args': ['pool', 'select',
self.configuration.eqlx_pool, 'show']}
self.configuration.san_thin_provision = True
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.return_value = self.driver_stats_output
stats = self.driver.get_volume_stats(refresh=True)
self.assert_volume_stats(stats)
def test_get_volume_stats_thick(self):
mock_attrs = {'args': ['pool', 'select',
self.configuration.eqlx_pool, 'show']}
self.configuration.san_thin_provision = False
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.return_value = self.driver_stats_output
stats = self.driver.get_volume_stats(refresh=True)
self.assert_volume_stats(stats)
def assert_volume_stats(self, stats):
thin_enabled = self.configuration.san_thin_provision
self.assertEqual(float('111.0'), stats['total_capacity_gb'])
self.assertEqual(float('11.0'), stats['free_capacity_gb'])
self.assertEqual(100, stats['total_volumes'])
if thin_enabled:
self.assertEqual(80.0, stats['provisioned_capacity_gb'])
else:
space = stats['total_capacity_gb'] - stats['free_capacity_gb']
self.assertEqual(space, stats['provisioned_capacity_gb'])
self.assertEqual(thin_enabled, stats['thin_provisioning_support'])
self.assertEqual(not thin_enabled,
stats['thick_provisioning_support'])
self.assertEqual('Dell EMC', stats['vendor_name'])
self.assertFalse(stats['multiattach'])
def test_get_space_in_gb(self):
self.assertEqual(123.0, self.driver._get_space_in_gb('123.0GB'))
self.assertEqual(124.0, self.driver._get_space_in_gb('123.5GB'))
self.assertEqual(123.0 * 1024, self.driver._get_space_in_gb('123.0TB'))
self.assertEqual(1.0, self.driver._get_space_in_gb('1024.0MB'))
self.assertEqual(2.0, self.driver._get_space_in_gb('1536.0MB'))
def test_get_output(self):
def _fake_recv(ignore_arg):
return '%s> ' % self.configuration.eqlx_group_name
chan = mock.Mock(paramiko.Channel)
mock_recv = self.mock_object(chan, 'recv')
mock_recv.return_value = '%s> ' % self.configuration.eqlx_group_name
self.assertEqual([_fake_recv(None)], self.driver._get_output(chan))
def test_get_prefixed_value(self):
lines = ['Line1 passed', 'Line1 failed']
prefix = ['Line1', 'Line2']
expected_output = [' passed', None]
self.assertEqual(expected_output[0],
self.driver._get_prefixed_value(lines, prefix[0]))
self.assertEqual(expected_output[1],
self.driver._get_prefixed_value(lines, prefix[1]))
def test_ssh_execute(self):
ssh = mock.Mock(paramiko.SSHClient)
chan = mock.Mock(paramiko.Channel)
transport = mock.Mock(paramiko.Transport)
mock_get_output = self.mock_object(self.driver, '_get_output')
self.mock_object(chan, 'invoke_shell')
expected_output = ['NoError: test run']
mock_get_output.return_value = expected_output
ssh.get_transport.return_value = transport
transport.open_session.return_value = chan
chan.invoke_shell()
chan.send('stty columns 255' + '\r')
chan.send(self.cmd + '\r')
chan.close()
self.assertEqual(expected_output,
self.driver._ssh_execute(ssh, self.cmd))
def test_ssh_execute_error(self):
self.mock_object(self.driver, '_ssh_execute',
side_effect=processutils.ProcessExecutionError)
ssh = mock.Mock(paramiko.SSHClient)
chan = mock.Mock(paramiko.Channel)
transport = mock.Mock(paramiko.Transport)
mock_get_output = self.mock_object(self.driver, '_get_output')
self.mock_object(ssh, 'get_transport')
self.mock_object(chan, 'invoke_shell')
expected_output = ['Error: test run', '% Error']
mock_get_output.return_value = expected_output
ssh.get_transport().return_value = transport
transport.open_session.return_value = chan
chan.invoke_shell()
chan.send('stty columns 255' + '\r')
chan.send(self.cmd + '\r')
chan.close()
self.assertRaises(processutils.ProcessExecutionError,
self.driver._ssh_execute, ssh, self.cmd)
@mock.patch.object(greenthread, 'sleep')
def test_ensure_retries(self, _gt_sleep):
num_attempts = 3
self.driver.configuration.eqlx_cli_max_retries = num_attempts
self.mock_object(self.driver, '_ssh_execute',
side_effect=exception.VolumeBackendAPIException(
"some error"))
# mocks for calls in _run_ssh
self.mock_object(utils, 'check_ssh_injection')
self.mock_object(ssh_utils, 'SSHPool')
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
self.driver.sshpool = mock.Mock(return_value=sshpool)
ssh = mock.Mock(paramiko.SSHClient)
self.driver.sshpool.item().__enter__ = mock.Mock(return_value=ssh)
self.driver.sshpool.item().__exit__ = mock.Mock(return_value=False)
# now call the execute
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._eql_execute, "fake command")
self.assertEqual(num_attempts + 1,
self.driver._ssh_execute.call_count)
@mock.patch.object(greenthread, 'sleep')
def test_ensure_connection_retries(self, _gt_sleep):
num_attempts = 3
self.driver.configuration.eqlx_cli_max_retries = num_attempts
self.mock_object(self.driver, '_ssh_execute',
side_effect=processutils.ProcessExecutionError(
stdout='% Error ... some error.\n'))
# mocks for calls in _run_ssh
self.mock_object(utils, 'check_ssh_injection')
self.mock_object(ssh_utils, 'SSHPool')
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
self.driver.sshpool = mock.Mock(return_value=sshpool)
ssh = mock.Mock(paramiko.SSHClient)
self.driver.sshpool.item().__enter__ = mock.Mock(return_value=ssh)
self.driver.sshpool.item().__exit__ = mock.Mock(return_value=False)
# now call the execute
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._eql_execute, "fake command")
self.assertEqual(num_attempts + 1,
self.driver._ssh_execute.call_count)
@unittest.skip("Skip until bug #1578986 is fixed")
@mock.patch.object(greenthread, 'sleep')
def test_ensure_retries_on_channel_timeout(self, _gt_sleep):
num_attempts = 3
self.driver.configuration.eqlx_cli_max_retries = num_attempts
# mocks for calls and objects in _run_ssh
self.mock_object(utils, 'check_ssh_injection')
self.mock_object(ssh_utils, 'SSHPool')
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
self.driver.sshpool = mock.Mock(return_value=sshpool)
ssh = mock.Mock(paramiko.SSHClient)
self.driver.sshpool.item().__enter__ = mock.Mock(return_value=ssh)
self.driver.sshpool.item().__exit__ = mock.Mock(return_value=False)
# mocks for _ssh_execute and _get_output
self.mock_object(self.driver, '_get_output',
side_effect=exception.VolumeBackendAPIException(
"some error"))
# now call the execute
with mock.patch('sys.stderr', new=six.StringIO()):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._eql_execute, "fake command")
self.assertEqual(num_attempts + 1, self.driver._get_output.call_count)
@unittest.skip("Skip until bug #1578986 is fixed")
def test_with_timeout(self):
@ps.with_timeout
def no_timeout(cmd, *args, **kwargs):
return 'no timeout'
@ps.with_timeout
def w_timeout(cmd, *args, **kwargs):
time.sleep(1)
self.assertEqual('no timeout', no_timeout('fake cmd'))
self.assertRaises(exception.VolumeBackendAPIException,
w_timeout, 'fake cmd', timeout=0.1)
def test_local_path(self):
self.assertRaises(NotImplementedError, self.driver.local_path, '')
|
|
"""Sorted dictionary implementation.
"""
from collections import Set, Sequence
from collections import KeysView as AbstractKeysView
from collections import ValuesView as AbstractValuesView
from collections import ItemsView as AbstractItemsView
from sys import hexversion
from .sortedlist import SortedList, recursive_repr, SortedListWithKey
from .sortedset import SortedSet
NONE = object()
class _IlocWrapper(object):
"Positional indexing support for sorted dictionary objects."
# pylint: disable=protected-access, too-few-public-methods
def __init__(self, _dict):
self._dict = _dict
def __len__(self):
return len(self._dict)
def __getitem__(self, index):
"""
Very efficiently return the key at index *index* in iteration. Supports
negative indices and slice notation. Raises IndexError on invalid
*index*.
"""
return self._dict._list[index]
def __delitem__(self, index):
"""
Remove the ``sdict[sdict.iloc[index]]`` from *sdict*. Supports negative
indices and slice notation. Raises IndexError on invalid *index*.
"""
_dict = self._dict
_list = _dict._list
_delitem = _dict._delitem
if isinstance(index, slice):
keys = _list[index]
del _list[index]
for key in keys:
_delitem(key)
else:
key = _list[index]
del _list[index]
_delitem(key)
class SortedDict(dict):
"""SortedDict provides the same methods as a dict. Additionally, SortedDict
efficiently maintains its keys in sorted order. Consequently, the keys
method will return the keys in sorted order, the popitem method will remove
the item with the highest key, etc.
"""
def __init__(self, *args, **kwargs):
"""SortedDict provides the same methods as a dict. Additionally, SortedDict
efficiently maintains its keys in sorted order. Consequently, the keys
method will return the keys in sorted order, the popitem method will
remove the item with the highest key, etc.
An optional *key* argument defines a callable that, like the `key`
argument to Python's `sorted` function, extracts a comparison key from
each dict key. If no function is specified, the default compares the
dict keys directly. The `key` argument must be provided as a positional
argument and must come before all other arguments.
An optional *load* argument defines the load factor of the internal list
used to maintain sort order. If present, this argument must come before
an iterable. The default load factor of '1000' works well for lists from
tens to tens of millions of elements. Good practice is to use a value
that is the cube root of the list size. With billions of elements, the
best load factor depends on your usage. It's best to leave the load
factor at the default until you start benchmarking.
An optional *iterable* argument provides an initial series of items to
populate the SortedDict. Each item in the series must itself contain
two items. The first is used as a key in the new dictionary, and the
second as the key's value. If a given key is seen more than once, the
last value associated with it is retained in the new dictionary.
If keyword arguments are given, the keywords themselves with their
associated values are added as items to the dictionary. If a key is
specified both in the positional argument and as a keyword argument, the
value associated with the keyword is retained in the dictionary. For
example, these all return a dictionary equal to ``{"one": 2, "two":
3}``:
* ``SortedDict(one=2, two=3)``
* ``SortedDict({'one': 2, 'two': 3})``
* ``SortedDict(zip(('one', 'two'), (2, 3)))``
* ``SortedDict([['two', 3], ['one', 2]])``
The first example only works for keys that are valid Python
identifiers; the others work with any valid keys.
"""
# pylint: disable=super-init-not-called, redefined-variable-type
if len(args) > 0 and (args[0] is None or callable(args[0])):
self._key = args[0]
args = args[1:]
else:
self._key = None
if len(args) > 0 and isinstance(args[0], int):
self._load = args[0]
args = args[1:]
else:
self._load = 1000
if self._key is None:
self._list = SortedList(load=self._load)
else:
self._list = SortedListWithKey(key=self._key, load=self._load)
# Cache function pointers to dict methods.
_dict = super(SortedDict, self)
self._dict = _dict
self._clear = _dict.clear
self._delitem = _dict.__delitem__
self._iter = _dict.__iter__
self._pop = _dict.pop
self._setdefault = _dict.setdefault
self._setitem = _dict.__setitem__
self._dict_update = _dict.update
# Cache function pointers to SortedList methods.
_list = self._list
self._list_add = _list.add
self.bisect_left = _list.bisect_left
self.bisect = _list.bisect_right
self.bisect_right = _list.bisect_right
self._list_clear = _list.clear
self.index = _list.index
self._list_pop = _list.pop
self._list_remove = _list.remove
self._list_update = _list.update
self.irange = _list.irange
self.islice = _list.islice
if self._key is not None:
self.bisect_key_left = _list.bisect_key_left
self.bisect_key_right = _list.bisect_key_right
self.bisect_key = _list.bisect_key
self.irange_key = _list.irange_key
self.iloc = _IlocWrapper(self)
self._update(*args, **kwargs)
def clear(self):
"""Remove all elements from the dictionary."""
self._clear()
self._list_clear()
def __delitem__(self, key):
"""
Remove ``d[key]`` from *d*. Raises a KeyError if *key* is not in the
dictionary.
"""
self._delitem(key)
self._list_remove(key)
def __iter__(self):
"""
Return an iterator over the sorted keys of the dictionary.
Iterating the Mapping while adding or deleting keys may raise a
`RuntimeError` or fail to iterate over all entries.
"""
return iter(self._list)
def __reversed__(self):
"""
Return a reversed iterator over the sorted keys of the dictionary.
Iterating the Mapping while adding or deleting keys may raise a
`RuntimeError` or fail to iterate over all entries.
"""
return reversed(self._list)
def __setitem__(self, key, value):
"""Set `d[key]` to *value*."""
if key not in self:
self._list_add(key)
self._setitem(key, value)
def copy(self):
"""Return a shallow copy of the sorted dictionary."""
return self.__class__(self._key, self._load, self._iteritems())
__copy__ = copy
@classmethod
def fromkeys(cls, seq, value=None):
"""
Create a new dictionary with keys from *seq* and values set to *value*.
"""
return cls((key, value) for key in seq)
if hexversion < 0x03000000:
def items(self):
"""
Return a list of the dictionary's items (``(key, value)`` pairs).
"""
return list(self._iteritems())
else:
def items(self):
"""
Return a new ItemsView of the dictionary's items. In addition to
the methods provided by the built-in `view` the ItemsView is
indexable (e.g. ``d.items()[5]``).
"""
return ItemsView(self)
def iteritems(self):
"""
Return an iterator over the items (``(key, value)`` pairs).
Iterating the Mapping while adding or deleting keys may raise a
`RuntimeError` or fail to iterate over all entries.
"""
return iter((key, self[key]) for key in self._list)
_iteritems = iteritems
if hexversion < 0x03000000:
def keys(self):
"""Return a SortedSet of the dictionary's keys."""
return SortedSet(self._list, key=self._key, load=self._load)
else:
def keys(self):
"""
Return a new KeysView of the dictionary's keys. In addition to the
methods provided by the built-in `view` the KeysView is indexable
(e.g. ``d.keys()[5]``).
"""
return KeysView(self)
def iterkeys(self):
"""
Return an iterator over the sorted keys of the Mapping.
Iterating the Mapping while adding or deleting keys may raise a
`RuntimeError` or fail to iterate over all entries.
"""
return iter(self._list)
if hexversion < 0x03000000:
def values(self):
"""Return a list of the dictionary's values."""
return list(self._itervalues())
else:
def values(self):
"""
Return a new :class:`ValuesView` of the dictionary's values.
In addition to the methods provided by the built-in `view` the
ValuesView is indexable (e.g., ``d.values()[5]``).
"""
return ValuesView(self)
def itervalues(self):
"""
Return an iterator over the values of the Mapping.
Iterating the Mapping while adding or deleting keys may raise a
`RuntimeError` or fail to iterate over all entries.
"""
return iter(self[key] for key in self._list)
_itervalues = itervalues
def pop(self, key, default=NONE):
"""
If *key* is in the dictionary, remove it and return its value,
else return *default*. If *default* is not given and *key* is not in
the dictionary, a KeyError is raised.
"""
if key in self:
self._list_remove(key)
return self._pop(key)
else:
if default is NONE:
raise KeyError(key)
else:
return default
def popitem(self, last=True):
"""
Remove and return a ``(key, value)`` pair from the dictionary. If
last=True (default) then remove the *greatest* `key` from the
diciontary. Else, remove the *least* key from the dictionary.
If the dictionary is empty, calling `popitem` raises a
KeyError`.
"""
if not len(self):
raise KeyError('popitem(): dictionary is empty')
key = self._list_pop(-1 if last else 0)
value = self._pop(key)
return (key, value)
def peekitem(self, index=-1):
"""Return (key, value) item pair at index.
Unlike ``popitem``, the sorted dictionary is not modified. Index
defaults to -1, the last/greatest key in the dictionary. Specify
``index=0`` to lookup the first/least key in the dictiony.
If index is out of range, raise IndexError.
"""
key = self._list[index]
return key, self[key]
def setdefault(self, key, default=None):
"""
If *key* is in the dictionary, return its value. If not, insert *key*
with a value of *default* and return *default*. *default* defaults to
``None``.
"""
if key in self:
return self[key]
else:
self._setitem(key, default)
self._list_add(key)
return default
def update(self, *args, **kwargs):
"""
Update the dictionary with the key/value pairs from *other*, overwriting
existing keys.
*update* accepts either another dictionary object or an iterable of
key/value pairs (as a tuple or other iterable of length two). If
keyword arguments are specified, the dictionary is then updated with
those key/value pairs: ``d.update(red=1, blue=2)``.
"""
if not len(self):
self._dict_update(*args, **kwargs)
self._list_update(self._iter())
return
if len(kwargs) == 0 and len(args) == 1 and isinstance(args[0], dict):
pairs = args[0]
else:
pairs = dict(*args, **kwargs)
if (10 * len(pairs)) > len(self):
self._dict_update(pairs)
self._list_clear()
self._list_update(self._iter())
else:
for key in pairs:
self[key] = pairs[key]
_update = update
if hexversion >= 0x02070000:
def viewkeys(self):
"Return ``KeysView`` of dictionary keys."
return KeysView(self)
def viewvalues(self):
"Return ``ValuesView`` of dictionary values."
return ValuesView(self)
def viewitems(self):
"Return ``ItemsView`` of dictionary (key, value) item pairs."
return ItemsView(self)
def __reduce__(self):
return (self.__class__, (self._key, self._load, list(self._iteritems())))
@recursive_repr
def __repr__(self):
temp = '{0}({1}, {2}, {{{3}}})'
items = ', '.join('{0}: {1}'.format(repr(key), repr(self[key]))
for key in self._list)
return temp.format(
self.__class__.__name__,
repr(self._key),
repr(self._load),
items
)
def _check(self):
# pylint: disable=protected-access
self._list._check()
assert len(self) == len(self._list)
assert all(key in self for key in self._list)
class KeysView(AbstractKeysView, Set, Sequence):
"""
A KeysView object is a dynamic view of the dictionary's keys, which
means that when the dictionary's keys change, the view reflects
those changes.
The KeysView class implements the Set and Sequence Abstract Base Classes.
"""
if hexversion < 0x03000000:
def __init__(self, sorted_dict):
"""
Initialize a KeysView from a SortedDict container as *sorted_dict*.
"""
# pylint: disable=super-init-not-called, protected-access
self._list = sorted_dict._list
self._view = sorted_dict._dict.viewkeys()
else:
def __init__(self, sorted_dict):
"""
Initialize a KeysView from a SortedDict container as *sorted_dict*.
"""
# pylint: disable=super-init-not-called, protected-access
self._list = sorted_dict._list
self._view = sorted_dict._dict.keys()
def __len__(self):
"""Return the number of entries in the dictionary."""
return len(self._view)
def __contains__(self, key):
"""
Return True if and only if *key* is one of the underlying dictionary's
keys.
"""
return key in self._view
def __iter__(self):
"""
Return an iterable over the keys in the dictionary. Keys are iterated
over in their sorted order.
Iterating views while adding or deleting entries in the dictionary may
raise a `RuntimeError` or fail to iterate over all entries.
"""
return iter(self._list)
def __getitem__(self, index):
"""Return the key at position *index*."""
return self._list[index]
def __reversed__(self):
"""
Return a reversed iterable over the keys in the dictionary. Keys are
iterated over in their reverse sort order.
Iterating views while adding or deleting entries in the dictionary may
raise a RuntimeError or fail to iterate over all entries.
"""
return reversed(self._list)
def index(self, value, start=None, stop=None):
"""
Return the smallest *k* such that `keysview[k] == value` and `start <= k
< end`. Raises `KeyError` if *value* is not present. *stop* defaults
to the end of the set. *start* defaults to the beginning. Negative
indexes are supported, as for slice indices.
"""
# pylint: disable=arguments-differ
return self._list.index(value, start, stop)
def count(self, value):
"""Return the number of occurrences of *value* in the set."""
return 1 if value in self._view else 0
def __eq__(self, that):
"""Test set-like equality with *that*."""
return self._view == that
def __ne__(self, that):
"""Test set-like inequality with *that*."""
return self._view != that
def __lt__(self, that):
"""Test whether self is a proper subset of *that*."""
return self._view < that
def __gt__(self, that):
"""Test whether self is a proper superset of *that*."""
return self._view > that
def __le__(self, that):
"""Test whether self is contained within *that*."""
return self._view <= that
def __ge__(self, that):
"""Test whether *that* is contained within self."""
return self._view >= that
def __and__(self, that):
"""Return a SortedSet of the intersection of self and *that*."""
return SortedSet(self._view & that)
def __or__(self, that):
"""Return a SortedSet of the union of self and *that*."""
return SortedSet(self._view | that)
def __sub__(self, that):
"""Return a SortedSet of the difference of self and *that*."""
return SortedSet(self._view - that)
def __xor__(self, that):
"""Return a SortedSet of the symmetric difference of self and *that*."""
return SortedSet(self._view ^ that)
if hexversion < 0x03000000:
def isdisjoint(self, that):
"""Return True if and only if *that* is disjoint with self."""
return not any(key in self._list for key in that)
else:
def isdisjoint(self, that):
"""Return True if and only if *that* is disjoint with self."""
return self._view.isdisjoint(that)
@recursive_repr
def __repr__(self):
return 'SortedDict_keys({0})'.format(repr(list(self)))
class ValuesView(AbstractValuesView, Sequence):
"""
A ValuesView object is a dynamic view of the dictionary's values, which
means that when the dictionary's values change, the view reflects those
changes.
The ValuesView class implements the Sequence Abstract Base Class.
"""
if hexversion < 0x03000000:
def __init__(self, sorted_dict):
"""
Initialize a ValuesView from a SortedDict container as
*sorted_dict*.
"""
# pylint: disable=super-init-not-called, protected-access
self._dict = sorted_dict
self._list = sorted_dict._list
self._view = sorted_dict._dict.viewvalues()
else:
def __init__(self, sorted_dict):
"""
Initialize a ValuesView from a SortedDict container as
*sorted_dict*.
"""
# pylint: disable=super-init-not-called, protected-access
self._dict = sorted_dict
self._list = sorted_dict._list
self._view = sorted_dict._dict.values()
def __len__(self):
"""Return the number of entries in the dictionary."""
return len(self._dict)
def __contains__(self, value):
"""
Return True if and only if *value* is in the underlying Mapping's
values.
"""
return value in self._view
def __iter__(self):
"""
Return an iterator over the values in the dictionary. Values are
iterated over in sorted order of the keys.
Iterating views while adding or deleting entries in the dictionary may
raise a `RuntimeError` or fail to iterate over all entries.
"""
_dict = self._dict
return iter(_dict[key] for key in self._list)
def __getitem__(self, index):
"""
Efficiently return value at *index* in iteration.
Supports slice notation and negative indexes.
"""
_dict, _list = self._dict, self._list
if isinstance(index, slice):
return [_dict[key] for key in _list[index]]
else:
return _dict[_list[index]]
def __reversed__(self):
"""
Return a reverse iterator over the values in the dictionary. Values are
iterated over in reverse sort order of the keys.
Iterating views while adding or deleting entries in the dictionary may
raise a `RuntimeError` or fail to iterate over all entries.
"""
_dict = self._dict
return iter(_dict[key] for key in reversed(self._list))
def index(self, value):
"""
Return index of *value* in self.
Raises ValueError if *value* is not found.
"""
for idx, val in enumerate(self):
if value == val:
return idx
raise ValueError('{0} is not in dict'.format(repr(value)))
if hexversion < 0x03000000:
def count(self, value):
"""Return the number of occurrences of *value* in self."""
return sum(1 for val in self._dict.itervalues() if val == value)
else:
def count(self, value):
"""Return the number of occurrences of *value* in self."""
return sum(1 for val in self._dict.values() if val == value)
def __lt__(self, that):
raise TypeError
def __gt__(self, that):
raise TypeError
def __le__(self, that):
raise TypeError
def __ge__(self, that):
raise TypeError
def __and__(self, that):
raise TypeError
def __or__(self, that):
raise TypeError
def __sub__(self, that):
raise TypeError
def __xor__(self, that):
raise TypeError
@recursive_repr
def __repr__(self):
return 'SortedDict_values({0})'.format(repr(list(self)))
class ItemsView(AbstractItemsView, Set, Sequence):
"""
An ItemsView object is a dynamic view of the dictionary's ``(key,
value)`` pairs, which means that when the dictionary changes, the
view reflects those changes.
The ItemsView class implements the Set and Sequence Abstract Base Classes.
However, the set-like operations (``&``, ``|``, ``-``, ``^``) will only
operate correctly if all of the dictionary's values are hashable.
"""
if hexversion < 0x03000000:
def __init__(self, sorted_dict):
"""
Initialize an ItemsView from a SortedDict container as
*sorted_dict*.
"""
# pylint: disable=super-init-not-called, protected-access
self._dict = sorted_dict
self._list = sorted_dict._list
self._view = sorted_dict._dict.viewitems()
else:
def __init__(self, sorted_dict):
"""
Initialize an ItemsView from a SortedDict container as
*sorted_dict*.
"""
# pylint: disable=super-init-not-called, protected-access
self._dict = sorted_dict
self._list = sorted_dict._list
self._view = sorted_dict._dict.items()
def __len__(self):
"""Return the number of entries in the dictionary."""
return len(self._view)
def __contains__(self, key):
"""
Return True if and only if *key* is one of the underlying dictionary's
items.
"""
return key in self._view
def __iter__(self):
"""
Return an iterable over the items in the dictionary. Items are iterated
over in their sorted order.
Iterating views while adding or deleting entries in the dictionary may
raise a `RuntimeError` or fail to iterate over all entries.
"""
_dict = self._dict
return iter((key, _dict[key]) for key in self._list)
def __getitem__(self, index):
"""Return the item as position *index*."""
_dict, _list = self._dict, self._list
if isinstance(index, slice):
return [(key, _dict[key]) for key in _list[index]]
else:
key = _list[index]
return (key, _dict[key])
def __reversed__(self):
"""
Return a reversed iterable over the items in the dictionary. Items are
iterated over in their reverse sort order.
Iterating views while adding or deleting entries in the dictionary may
raise a RuntimeError or fail to iterate over all entries.
"""
_dict = self._dict
return iter((key, _dict[key]) for key in reversed(self._list))
def index(self, key, start=None, stop=None):
"""
Return the smallest *k* such that `itemssview[k] == key` and `start <= k
< end`. Raises `KeyError` if *key* is not present. *stop* defaults
to the end of the set. *start* defaults to the beginning. Negative
indexes are supported, as for slice indices.
"""
# pylint: disable=arguments-differ
temp, value = key
pos = self._list.index(temp, start, stop)
if value == self._dict[temp]:
return pos
else:
raise ValueError('{0} is not in dict'.format(repr(key)))
def count(self, item):
"""Return the number of occurrences of *item* in the set."""
key, value = item
return 1 if key in self._dict and self._dict[key] == value else 0
def __eq__(self, that):
"""Test set-like equality with *that*."""
return self._view == that
def __ne__(self, that):
"""Test set-like inequality with *that*."""
return self._view != that
def __lt__(self, that):
"""Test whether self is a proper subset of *that*."""
return self._view < that
def __gt__(self, that):
"""Test whether self is a proper superset of *that*."""
return self._view > that
def __le__(self, that):
"""Test whether self is contained within *that*."""
return self._view <= that
def __ge__(self, that):
"""Test whether *that* is contained within self."""
return self._view >= that
def __and__(self, that):
"""Return a SortedSet of the intersection of self and *that*."""
return SortedSet(self._view & that)
def __or__(self, that):
"""Return a SortedSet of the union of self and *that*."""
return SortedSet(self._view | that)
def __sub__(self, that):
"""Return a SortedSet of the difference of self and *that*."""
return SortedSet(self._view - that)
def __xor__(self, that):
"""Return a SortedSet of the symmetric difference of self and *that*."""
return SortedSet(self._view ^ that)
if hexversion < 0x03000000:
def isdisjoint(self, that):
"""Return True if and only if *that* is disjoint with self."""
_dict = self._dict
for key, value in that:
if key in _dict and _dict[key] == value:
return False
return True
else:
def isdisjoint(self, that):
"""Return True if and only if *that* is disjoint with self."""
return self._view.isdisjoint(that)
@recursive_repr
def __repr__(self):
return 'SortedDict_items({0})'.format(repr(list(self)))
|
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for BLSTM
e110
* Back to Uniform(5) for BLSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single BLSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd BLSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using BLSTM not BBLSTM
e151
* Max pooling
171
lower learning rate
172
even lower learning rate
173
slightly higher learning rate!
175
same as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs
176
new cost function
177
another new cost func (this one avoids NaNs)
skip prob 0.7
10x higher learning rate
178
refactored cost func (functionally equiv to 177)
0.1x learning rate
e180
* mse
e181
* back to scaled cost
* different architecture:
- convd1 at input (2x)
- then 3 LSTM layers, each with a 2x conv in between
- no diff input
e189
* divide dominant appliance power
* mse
"""
# def scaled_cost(x, t):
# raw_cost = (x - t) ** 2
# energy_per_seq = t.sum(axis=1)
# energy_per_batch = energy_per_seq.sum(axis=1)
# energy_per_batch = energy_per_batch.reshape((-1, 1))
# normaliser = energy_per_seq / energy_per_batch
# cost = raw_cost.mean(axis=1) * (1 - normaliser)
# return cost.mean()
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[2500] * 5,
on_power_thresholds=[5] * 5,
max_input_power=2500,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=25,
subsample_target=3,
input_padding=4,
include_diff=False,
clip_appliance_power=False,
lag=10
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=250,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=0.1, clip_range=(-1, 1)),
layers_config=[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(25),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 50,
'filter_length': 5,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(1)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1 # pool over the time axis
},
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=13000)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
|
|
"""Examine callable regions following genome mapping of short reads.
Identifies callable analysis regions surrounded by larger regions lacking
aligned bases. This allows parallelization of smaller chromosome chunks
through post-processing and variant calling, with each sub-section
mapping handled separately.
Regions are split to try to maintain relative uniformity across the
genome and avoid extremes of large blocks or large numbers of
small blocks.
"""
import contextlib
import copy
from distutils.version import LooseVersion
import operator
import os
import subprocess
import sys
import numpy
import pybedtools
import pysam
import toolz as tz
import yaml
from bcbio import bam, broad, utils
from bcbio.bam import ref
from bcbio.log import logger
from bcbio.distributed import multi, prun
from bcbio.distributed.split import parallel_split_combine
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils, shared
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do, programs
from bcbio.variation import bedutils
from bcbio.variation import multi as vmulti
def parallel_callable_loci(in_bam, ref_file, data):
config = copy.deepcopy(data["config"])
num_cores = config["algorithm"].get("num_cores", 1)
data = {"work_bam": in_bam, "config": config,
"reference": data["reference"]}
parallel = {"type": "local", "cores": num_cores, "module": "bcbio.distributed"}
items = [[data]]
with prun.start(parallel, items, config, multiplier=int(num_cores)) as runner:
split_fn = shared.process_bam_by_chromosome("-callable.bed", "work_bam", remove_alts=True)
out = parallel_split_combine(items, split_fn, runner,
"calc_callable_loci", "combine_bed",
"callable_bed", ["config"])[0]
return out[0]["callable_bed"]
@multi.zeromq_aware_logging
def calc_callable_loci(data, region=None, out_file=None):
"""Determine callable bases for an input BAM in the given region.
"""
if out_file is None:
out_file = "%s-callable.bed" % os.path.splitext(data["work_bam"])[0]
max_depth = dd.get_coverage_depth_max(data)
depth = {"max": max_depth * 7 if max_depth > 0 else sys.maxint - 1,
"min": dd.get_coverage_depth_min(data)}
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
ref_file = tz.get_in(["reference", "fasta", "base"], data)
region_file, calc_callable = _regions_for_coverage(data, region, ref_file, tx_out_file)
if calc_callable:
_group_by_ctype(_get_coverage_file(data["work_bam"], ref_file, region, region_file, depth,
tx_out_file, data),
depth, region_file, tx_out_file, data)
# special case, do not calculate if we are in a chromosome not covered by BED file
else:
os.rename(region_file, tx_out_file)
return [{"callable_bed": out_file, "config": data["config"], "work_bam": data["work_bam"]}]
def _group_by_ctype(bed_file, depth, region_file, out_file, data):
"""Group adjacent callable/uncallble regions into defined intervals.
Uses tips from bedtools discussion:
https://groups.google.com/d/msg/bedtools-discuss/qYDE6XF-GRA/2icQtUeOX_UJ
https://gist.github.com/arq5x/b67196a46db5b63bee06
"""
def assign_coverage(feat):
feat.name = _get_ctype(float(feat.name), depth)
return feat
full_out_file = "%s-full%s" % utils.splitext_plus(out_file)
with open(full_out_file, "w") as out_handle:
kwargs = {"g": [1, 4], "c": [1, 2, 3, 4], "ops": ["first", "first", "max", "first"]}
# back compatible precision https://github.com/chapmanb/bcbio-nextgen/issues/664
if LooseVersion(programs.get_version_manifest("bedtools", data=data, required=True)) >= LooseVersion("2.22.0"):
kwargs["prec"] = 21
for line in open(pybedtools.BedTool(bed_file).each(assign_coverage).saveas()
.groupby(**kwargs).fn):
out_handle.write("\t".join(line.split("\t")[2:]))
pybedtools.BedTool(full_out_file).intersect(region_file, nonamecheck=True).saveas(out_file)
def _get_coverage_file(in_bam, ref_file, region, region_file, depth, base_file, data):
"""Retrieve summary of coverage in a region.
Requires positive non-zero mapping quality at a position, matching GATK's
CallableLoci defaults.
"""
out_file = "%s-genomecov.bed" % utils.splitext_plus(base_file)[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
bam.index(in_bam, data["config"])
fai_file = ref.fasta_idx(ref_file, data["config"])
sambamba = config_utils.get_program("sambamba", data["config"])
bedtools = config_utils.get_program("bedtools", data["config"])
max_depth = depth["max"] + 1
cmd = ("{sambamba} view -F 'mapping_quality > 0' -L {region_file} -f bam -l 1 {in_bam} | "
"{bedtools} genomecov -split -ibam stdin -bga -g {fai_file} -max {max_depth} "
"> {tx_out_file}")
do.run(cmd.format(**locals()), "bedtools genomecov: %s" % (str(region)), data)
# Empty output file, no coverage for the whole contig
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feat in get_ref_bedtool(ref_file, data["config"], region):
out_handle.write("%s\t%s\t%s\t%s\n" % (feat.chrom, feat.start, feat.end, 0))
return out_file
def _get_ctype(count, depth):
if count == 0:
return "NO_COVERAGE"
elif count < depth["min"]:
return "LOW_COVERAGE"
elif count > depth["max"]:
return "EXCESSIVE_COVERAGE"
else:
return "CALLABLE"
def _regions_for_coverage(data, region, ref_file, out_file):
"""Retrieve BED file of regions we need to calculate coverage in.
"""
variant_regions = bedutils.merge_overlaps(dd.get_variant_regions(data), data)
ready_region = shared.subset_variant_regions(variant_regions, region, out_file)
custom_file = "%s-coverageregions.bed" % utils.splitext_plus(out_file)[0]
if not ready_region:
get_ref_bedtool(ref_file, data["config"]).saveas(custom_file)
return custom_file, True
elif os.path.isfile(ready_region):
return ready_region, True
elif isinstance(ready_region, (list, tuple)):
c, s, e = ready_region
pybedtools.BedTool("%s\t%s\t%s\n" % (c, s, e), from_string=True).saveas(custom_file)
return custom_file, True
else:
with file_transaction(data, custom_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feat in get_ref_bedtool(ref_file, data["config"], region):
out_handle.write("%s\t%s\t%s\t%s\n" % (feat.chrom, feat.start, feat.end, "NO_COVERAGE"))
return custom_file, variant_regions is None
def sample_callable_bed(bam_file, ref_file, data):
"""Retrieve callable regions for a sample subset by defined analysis regions.
"""
config = data["config"]
out_file = "%s-callable_sample.bed" % os.path.splitext(bam_file)[0]
with shared.bedtools_tmpdir({"config": config}):
callable_bed = parallel_callable_loci(bam_file, ref_file, data)
input_regions_bed = config["algorithm"].get("variant_regions", None)
if not utils.file_uptodate(out_file, callable_bed):
with file_transaction(config, out_file) as tx_out_file:
callable_regions = pybedtools.BedTool(callable_bed)
filter_regions = callable_regions.filter(lambda x: x.name == "CALLABLE")
if input_regions_bed:
if not utils.file_uptodate(out_file, input_regions_bed):
input_regions = pybedtools.BedTool(input_regions_bed)
filter_regions.intersect(input_regions, nonamecheck=True).saveas(tx_out_file)
else:
filter_regions.saveas(tx_out_file)
return out_file
def calculate_offtarget(bam_file, ref_file, data):
"""Generate file of offtarget read counts for inputs with variant regions.
"""
vrs_file = dd.get_variant_regions(data)
if vrs_file:
out_file = "%s-offtarget-stats.yaml" % os.path.splitext(bam_file)[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
offtarget_regions = "%s-regions.bed" % utils.splitext_plus(out_file)[0]
ref_bed = get_ref_bedtool(ref_file, data["config"])
ref_bed.subtract(pybedtools.BedTool(vrs_file), nonamecheck=True).saveas(offtarget_regions)
cmd = ("samtools view -u {bam_file} -L {offtarget_regions} | "
"bedtools intersect -abam - -b {offtarget_regions} -f 1.0 -bed | wc -l")
offtarget_count = int(subprocess.check_output(cmd.format(**locals()), shell=True))
cmd = "samtools idxstats {bam_file} | awk '{{s+=$3}} END {{print s}}'"
mapped_count = int(subprocess.check_output(cmd.format(**locals()), shell=True))
with open(tx_out_file, "w") as out_handle:
yaml.safe_dump({"mapped": mapped_count, "offtarget": offtarget_count}, out_handle,
allow_unicode=False, default_flow_style=False)
return out_file
def get_ref_bedtool(ref_file, config, chrom=None):
"""Retrieve a pybedtool BedTool object with reference sizes from input reference.
"""
broad_runner = broad.runner_from_config(config, "picard")
ref_dict = broad_runner.run_fn("picard_index_ref", ref_file)
ref_lines = []
with contextlib.closing(pysam.Samfile(ref_dict, "r")) as ref_sam:
for sq in ref_sam.header["SQ"]:
if not chrom or sq["SN"] == chrom:
ref_lines.append("%s\t%s\t%s" % (sq["SN"], 0, sq["LN"]))
return pybedtools.BedTool("\n".join(ref_lines), from_string=True)
def _get_nblock_regions(in_file, min_n_size, ref_regions):
"""Retrieve coordinates of regions in reference genome with no mapping.
These are potential breakpoints for parallelizing analysis.
"""
out_lines = []
called_contigs = set([])
with open(in_file) as in_handle:
for line in in_handle:
contig, start, end, ctype = line.rstrip().split()
called_contigs.add(contig)
if (ctype in ["REF_N", "NO_COVERAGE", "EXCESSIVE_COVERAGE", "LOW_COVERAGE"] and
int(end) - int(start) > min_n_size):
out_lines.append("%s\t%s\t%s\n" % (contig, start, end))
for refr in ref_regions:
if refr.chrom not in called_contigs:
out_lines.append("%s\t%s\t%s\n" % (refr.chrom, 0, refr.stop))
return pybedtools.BedTool("\n".join(out_lines), from_string=True)
def _combine_regions(all_regions, ref_regions):
"""Combine multiple BEDtools regions of regions into sorted final BEDtool.
"""
chrom_order = {}
for i, x in enumerate(ref_regions):
chrom_order[x.chrom] = i
def wchrom_key(x):
chrom, start, end = x
return (chrom_order[chrom], start, end)
all_intervals = []
for region_group in all_regions:
for region in region_group:
all_intervals.append((region.chrom, int(region.start), int(region.stop)))
all_intervals.sort(key=wchrom_key)
bed_lines = ["%s\t%s\t%s" % (c, s, e) for (c, s, e) in all_intervals]
return pybedtools.BedTool("\n".join(bed_lines), from_string=True)
def _add_config_regions(nblock_regions, ref_regions, config):
"""Add additional nblock regions based on configured regions to call.
Identifies user defined regions which we should not be analyzing.
"""
input_regions_bed = config["algorithm"].get("variant_regions", None)
if input_regions_bed:
input_regions = pybedtools.BedTool(input_regions_bed)
# work around problem with single region not subtracted correctly.
if len(input_regions) == 1:
str_regions = str(input_regions[0]).strip()
input_regions = pybedtools.BedTool("%s\n%s" % (str_regions, str_regions),
from_string=True)
input_nblock = ref_regions.subtract(input_regions, nonamecheck=True)
if input_nblock == ref_regions:
raise ValueError("Input variant_region file (%s) "
"excludes all genomic regions. Do the chromosome names "
"in the BED file match your genome (chr1 vs 1)?" % input_regions_bed)
all_intervals = _combine_regions([input_nblock, nblock_regions], ref_regions)
return all_intervals.merge()
else:
return nblock_regions
class NBlockRegionPicker:
"""Choose nblock regions reasonably spaced across chromosomes.
This avoids excessively large blocks and also large numbers of tiny blocks
by splitting to a defined number of blocks.
Assumes to be iterating over an ordered input file and needs re-initiation
with each new file processed as it keeps track of previous blocks to
maintain the splitting.
"""
def __init__(self, ref_regions, config, min_n_size):
self._end_buffer = 250 if min_n_size > 50 else 0
self._chr_last_blocks = {}
target_blocks = int(config["algorithm"].get("nomap_split_targets", 200))
self._target_size = self._get_target_size(target_blocks, ref_regions)
self._ref_sizes = {x.chrom: x.stop for x in ref_regions}
def _get_target_size(self, target_blocks, ref_regions):
size = 0
for x in ref_regions:
size += (x.end - x.start)
return size // target_blocks
def include_block(self, x):
"""Check for inclusion of block based on distance from previous.
"""
last_pos = self._chr_last_blocks.get(x.chrom, 0)
# Region excludes an entire chromosome, typically decoy/haplotypes
if last_pos <= self._end_buffer and x.stop >= self._ref_sizes.get(x.chrom, 0) - self._end_buffer:
return True
# Do not split on smaller decoy and haplotype chromosomes
elif self._ref_sizes.get(x.chrom, 0) <= self._target_size:
return False
elif (x.start - last_pos) > self._target_size:
self._chr_last_blocks[x.chrom] = x.stop
return True
else:
return False
def expand_block(self, feat):
"""Expand any blocks which are near the start or end of a contig.
"""
chrom_end = self._ref_sizes.get(feat.chrom)
if chrom_end:
if feat.start < self._end_buffer:
feat.start = 0
if feat.stop >= chrom_end - self._end_buffer:
feat.stop = chrom_end
return feat
def block_regions(in_bam, ref_file, data):
"""Find blocks of regions for analysis from mapped input BAM file.
Identifies islands of callable regions, surrounding by regions
with no read support, that can be analyzed independently.
"""
config = data["config"]
min_n_size = int(config["algorithm"].get("nomap_split_size", 100))
with shared.bedtools_tmpdir({"config": config}):
callable_bed = parallel_callable_loci(in_bam, ref_file, data)
nblock_bed = "%s-nblocks%s" % os.path.splitext(callable_bed)
callblock_bed = "%s-callableblocks%s" % os.path.splitext(callable_bed)
if not utils.file_uptodate(nblock_bed, callable_bed):
ref_regions = get_ref_bedtool(ref_file, config)
nblock_regions = _get_nblock_regions(callable_bed, min_n_size, ref_regions)
nblock_regions = _add_config_regions(nblock_regions, ref_regions, config)
nblock_regions.saveas(nblock_bed)
if len(ref_regions.subtract(nblock_regions, nonamecheck=True)) > 0:
ref_regions.subtract(nblock_bed, nonamecheck=True).merge(d=min_n_size).saveas(callblock_bed)
else:
raise ValueError("No callable regions found from BAM file. Alignment regions might "
"not overlap with regions found in your `variant_regions` BED: %s" % in_bam)
return callblock_bed, nblock_bed, callable_bed
def _write_bed_regions(data, final_regions, out_file, out_file_ref):
ref_file = tz.get_in(["reference", "fasta", "base"], data)
ref_regions = get_ref_bedtool(ref_file, data["config"])
noanalysis_regions = ref_regions.subtract(final_regions, nonamecheck=True)
final_regions.saveas(out_file)
noanalysis_regions.saveas(out_file_ref)
def _analysis_block_stats(regions, samples):
"""Provide statistics on sizes and number of analysis blocks.
"""
prev = None
between_sizes = []
region_sizes = []
for region in regions:
if prev and prev.chrom == region.chrom:
between_sizes.append(region.start - prev.end)
region_sizes.append(region.end - region.start)
prev = region
def descriptive_stats(xs):
if len(xs) < 2:
return xs
parts = ["min: %s" % min(xs),
"5%%: %s" % numpy.percentile(xs, 5),
"25%%: %s" % numpy.percentile(xs, 25),
"median: %s" % numpy.percentile(xs, 50),
"75%%: %s" % numpy.percentile(xs, 75),
"95%%: %s" % numpy.percentile(xs, 95),
"99%%: %s" % numpy.percentile(xs, 99),
"max: %s" % max(xs)]
return "\n".join([" " + x for x in parts])
logger.info("Identified %s parallel analysis blocks\n" % len(region_sizes) +
"Block sizes:\n%s\n" % descriptive_stats(region_sizes) +
"Between block sizes:\n%s\n" % descriptive_stats(between_sizes))
if len(region_sizes) == 0:
raise ValueError("No callable regions found in: %s" %
(", ".join([dd.get_sample_name(x) for x in samples])))
def _needs_region_update(out_file, samples):
"""Check if we need to update BED file of regions, supporting back compatibility.
"""
nblock_files = [x["regions"]["nblock"] for x in samples if "regions" in x]
# For older approaches and do not create a new set of analysis
# regions, since the new algorithm will re-do all BAM and variant
# steps with new regions
for nblock_file in nblock_files:
test_old = nblock_file.replace("-nblocks", "-analysisblocks")
if os.path.exists(test_old):
return False
# Check if any of the local files have changed so we need to refresh
for noblock_file in nblock_files:
if not utils.file_uptodate(out_file, noblock_file):
return True
return False
def combine_sample_regions(*samples):
"""Create batch-level sets of callable regions for multi-sample calling.
Intersects all non-callable (nblock) regions from all samples in a batch,
producing a global set of callable regions.
"""
samples = [x[0] for x in samples]
# back compatibility -- global file for entire sample set
global_analysis_file = os.path.join(samples[0]["dirs"]["work"], "analysis_blocks.bed")
if utils.file_exists(global_analysis_file) and not _needs_region_update(global_analysis_file, samples):
global_no_analysis_file = os.path.join(os.path.dirname(global_analysis_file), "noanalysis_blocks.bed")
else:
global_analysis_file = None
out = []
analysis_files = []
batches = []
with shared.bedtools_tmpdir(samples[0]):
for batch, items in vmulti.group_by_batch(samples, require_bam=False).items():
batches.append(items)
if global_analysis_file:
analysis_file, no_analysis_file = global_analysis_file, global_no_analysis_file
else:
analysis_file, no_analysis_file = _combine_sample_regions_batch(batch, items)
for data in items:
vr_file = dd.get_variant_regions(data)
if analysis_file:
analysis_files.append(analysis_file)
data["config"]["algorithm"]["callable_regions"] = analysis_file
data["config"]["algorithm"]["non_callable_regions"] = no_analysis_file
data["config"]["algorithm"]["callable_count"] = pybedtools.BedTool(analysis_file).count()
elif vr_file:
data["config"]["algorithm"]["callable_count"] = pybedtools.BedTool(vr_file).count()
highdepth_bed = tz.get_in(["regions", "highdepth"], data)
if highdepth_bed:
data["config"]["algorithm"]["highdepth_regions"] = highdepth_bed
# attach a representative sample for calculating callable region
if not data.get("work_bam"):
for x in items:
if x.get("work_bam"):
data["work_bam_callable"] = x["work_bam"]
out.append([data])
assert len(out) == len(samples)
if len(analysis_files) > 0:
final_regions = pybedtools.BedTool(analysis_files[0])
_analysis_block_stats(final_regions, batches[0])
return out
def _combine_sample_regions_batch(batch, items):
"""Combine sample regions within a group of batched samples.
"""
config = items[0]["config"]
work_dir = utils.safe_makedir(os.path.join(items[0]["dirs"]["work"], "regions"))
analysis_file = os.path.join(work_dir, "%s-analysis_blocks.bed" % batch)
no_analysis_file = os.path.join(work_dir, "%s-noanalysis_blocks.bed" % batch)
if not utils.file_exists(analysis_file) or _needs_region_update(analysis_file, items):
# Combine all nblocks into a final set of intersecting regions
# without callable bases. HT @brentp for intersection approach
# https://groups.google.com/forum/?fromgroups#!topic/bedtools-discuss/qA9wK4zN8do
bed_regions = [pybedtools.BedTool(x["regions"]["nblock"])
for x in items if "regions" in x]
if len(bed_regions) == 0:
analysis_file, no_analysis_file = None, None
else:
with file_transaction(items[0], analysis_file, no_analysis_file) as (tx_afile, tx_noafile):
nblock_regions = reduce(operator.add, bed_regions).saveas(
"%s-nblock%s" % utils.splitext_plus(tx_afile))
ref_file = tz.get_in(["reference", "fasta", "base"], items[0])
ref_regions = get_ref_bedtool(ref_file, config)
min_n_size = int(config["algorithm"].get("nomap_split_size", 100))
block_filter = NBlockRegionPicker(ref_regions, config, min_n_size)
final_nblock_regions = nblock_regions.filter(
block_filter.include_block).saveas().each(block_filter.expand_block).saveas(
"%s-nblockfinal%s" % utils.splitext_plus(tx_afile))
final_regions = ref_regions.subtract(final_nblock_regions, nonamecheck=True).merge(d=min_n_size)
_write_bed_regions(items[0], final_regions, tx_afile, tx_noafile)
if analysis_file and utils.file_exists(analysis_file):
return analysis_file, no_analysis_file
else:
return None, None
|
|
# -*- coding: utf-8 -*-
"""
h2/stream
~~~~~~~~~
An implementation of a HTTP/2 stream.
"""
import warnings
from enum import Enum, IntEnum
from hpack import HeaderTuple
from hyperframe.frame import (
HeadersFrame, ContinuationFrame, DataFrame, WindowUpdateFrame,
RstStreamFrame, PushPromiseFrame, AltSvcFrame
)
from .errors import ErrorCodes, _error_code_from_int
from .events import (
RequestReceived, ResponseReceived, DataReceived, WindowUpdated,
StreamEnded, PushedStreamReceived, StreamReset, TrailersReceived,
InformationalResponseReceived, AlternativeServiceAvailable,
_ResponseSent, _RequestSent, _TrailersSent, _PushedRequestSent
)
from .exceptions import (
ProtocolError, StreamClosedError, InvalidBodyLengthError
)
from .utilities import (
guard_increment_window, is_informational_response, authority_from_headers,
validate_headers, validate_outbound_headers, normalize_outbound_headers,
HeaderValidationFlags, extract_method_header
)
from .windows import WindowManager
class StreamState(IntEnum):
IDLE = 0
RESERVED_REMOTE = 1
RESERVED_LOCAL = 2
OPEN = 3
HALF_CLOSED_REMOTE = 4
HALF_CLOSED_LOCAL = 5
CLOSED = 6
class StreamInputs(Enum):
SEND_HEADERS = 0
SEND_PUSH_PROMISE = 1
SEND_RST_STREAM = 2
SEND_DATA = 3
SEND_WINDOW_UPDATE = 4
SEND_END_STREAM = 5
RECV_HEADERS = 6
RECV_PUSH_PROMISE = 7
RECV_RST_STREAM = 8
RECV_DATA = 9
RECV_WINDOW_UPDATE = 10
RECV_END_STREAM = 11
RECV_CONTINUATION = 12 # Added in 2.0.0
SEND_INFORMATIONAL_HEADERS = 13 # Added in 2.2.0
RECV_INFORMATIONAL_HEADERS = 14 # Added in 2.2.0
SEND_ALTERNATIVE_SERVICE = 15 # Added in 2.3.0
RECV_ALTERNATIVE_SERVICE = 16 # Added in 2.3.0
UPGRADE_CLIENT = 17 # Added 2.3.0
UPGRADE_SERVER = 18 # Added 2.3.0
# This array is initialized once, and is indexed by the stream states above.
# It indicates whether a stream in the given state is open. The reason we do
# this is that we potentially check whether a stream in a given state is open
# quite frequently: given that we check so often, we should do so in the
# fastest and most performant way possible.
STREAM_OPEN = [False for _ in range(0, len(StreamState))]
STREAM_OPEN[StreamState.OPEN] = True
STREAM_OPEN[StreamState.HALF_CLOSED_LOCAL] = True
STREAM_OPEN[StreamState.HALF_CLOSED_REMOTE] = True
class H2StreamStateMachine(object):
"""
A single HTTP/2 stream state machine.
This stream object implements basically the state machine described in
RFC 7540 section 5.1.
:param stream_id: The stream ID of this stream. This is stored primarily
for logging purposes.
"""
def __init__(self, stream_id):
self.state = StreamState.IDLE
self.stream_id = stream_id
#: Whether this peer is the client side of this stream.
self.client = None
# Whether trailers have been sent/received on this stream or not.
self.headers_sent = None
self.trailers_sent = None
self.headers_received = None
self.trailers_received = None
def process_input(self, input_):
"""
Process a specific input in the state machine.
"""
if not isinstance(input_, StreamInputs):
raise ValueError("Input must be an instance of StreamInputs")
try:
func, target_state = _transitions[(self.state, input_)]
except KeyError:
old_state = self.state
self.state = StreamState.CLOSED
raise ProtocolError(
"Invalid input %s in state %s" % (input_, old_state)
)
else:
previous_state = self.state
self.state = target_state
if func is not None:
try:
return func(self, previous_state)
except ProtocolError:
self.state = StreamState.CLOSED
raise
except AssertionError as e: # pragma: no cover
self.state = StreamState.CLOSED
raise ProtocolError(e)
return []
def request_sent(self, previous_state):
"""
Fires when a request is sent.
"""
self.client = True
self.headers_sent = True
event = _RequestSent()
return [event]
def response_sent(self, previous_state):
"""
Fires when something that should be a response is sent. This 'response'
may actually be trailers.
"""
if not self.headers_sent:
if self.client is True or self.client is None:
raise ProtocolError("Client cannot send responses.")
self.headers_sent = True
event = _ResponseSent()
else:
assert not self.trailers_sent
self.trailers_sent = True
event = _TrailersSent()
return [event]
def request_received(self, previous_state):
"""
Fires when a request is received.
"""
assert not self.headers_received
assert not self.trailers_received
self.client = False
self.headers_received = True
event = RequestReceived()
event.stream_id = self.stream_id
return [event]
def response_received(self, previous_state):
"""
Fires when a response is received. Also disambiguates between responses
and trailers.
"""
if not self.headers_received:
assert self.client is True
self.headers_received = True
event = ResponseReceived()
else:
assert not self.trailers_received
self.trailers_received = True
event = TrailersReceived()
event.stream_id = self.stream_id
return [event]
def data_received(self, previous_state):
"""
Fires when data is received.
"""
event = DataReceived()
event.stream_id = self.stream_id
return [event]
def window_updated(self, previous_state):
"""
Fires when a window update frame is received.
"""
event = WindowUpdated()
event.stream_id = self.stream_id
return [event]
def stream_ended(self, previous_state):
"""
Fires when a stream is cleanly ended.
"""
event = StreamEnded()
event.stream_id = self.stream_id
return [event]
def stream_reset(self, previous_state):
"""
Fired when a stream is forcefully reset.
"""
event = StreamReset()
event.stream_id = self.stream_id
return [event]
def send_new_pushed_stream(self, previous_state):
"""
Fires on the newly pushed stream, when pushed by the local peer.
No event here, but definitionally this peer must be a server.
"""
assert self.client is None
self.client = False
self.headers_received = True
return []
def recv_new_pushed_stream(self, previous_state):
"""
Fires on the newly pushed stream, when pushed by the remote peer.
No event here, but definitionally this peer must be a client.
"""
assert self.client is None
self.client = True
self.headers_sent = True
return []
def send_push_promise(self, previous_state):
"""
Fires on the already-existing stream when a PUSH_PROMISE frame is sent.
We may only send PUSH_PROMISE frames if we're a server.
"""
if self.client is True:
raise ProtocolError("Cannot push streams from client peers.")
event = _PushedRequestSent()
return [event]
def recv_push_promise(self, previous_state):
"""
Fires on the already-existing stream when a PUSH_PROMISE frame is
received. We may only receive PUSH_PROMISE frames if we're a client.
Fires a PushedStreamReceived event.
"""
if not self.client:
if self.client is None: # pragma: no cover
msg = "Idle streams cannot receive pushes"
else: # pragma: no cover
msg = "Cannot receive pushed streams as a server"
raise ProtocolError(msg)
event = PushedStreamReceived()
event.parent_stream_id = self.stream_id
return [event]
def send_reset(self, previous_state):
"""
Called when we need to forcefully emit another RST_STREAM frame on
behalf of the state machine.
If this is the first time we've done this, we should also hang an event
off the StreamClosedError so that the user can be informed. We know
it's the first time we've done this if the stream is currently in a
state other than CLOSED.
"""
events = []
if previous_state != StreamState.CLOSED:
event = StreamReset()
event.stream_id = self.stream_id
event.error_code = ErrorCodes.STREAM_CLOSED
event.remote_reset = False
events.append(event)
error = StreamClosedError(self.stream_id)
error._events = events
raise error
def send_on_closed_stream(self, previous_state):
"""
Called when an attempt is made to send data on an already-closed
stream.
This essentially overrides the standard logic by throwing a
more-specific error: StreamClosedError. This is a ProtocolError, so it
matches the standard API of the state machine, but provides more detail
to the user.
"""
assert previous_state == StreamState.CLOSED
raise StreamClosedError(self.stream_id)
def push_on_closed_stream(self, previous_state):
"""
Called when an attempt is made to push on an already-closed stream.
This essentially overrides the standard logic by providing a more
useful error message. It's necessary because simply indicating that the
stream is closed is not enough: there is now a new stream that is not
allowed to be there. The only recourse is to tear the whole connection
down.
"""
assert previous_state == StreamState.CLOSED
raise ProtocolError("Attempted to push on closed stream.")
def send_informational_response(self, previous_state):
"""
Called when an informational header block is sent (that is, a block
where the :status header has a 1XX value).
Only enforces that these are sent *before* final headers are sent.
"""
if self.headers_sent:
raise ProtocolError("Information response after final response")
event = _ResponseSent()
return [event]
def recv_informational_response(self, previous_state):
"""
Called when an informational header block is received (that is, a block
where the :status header has a 1XX value).
"""
if self.headers_received:
raise ProtocolError("Informational response after final response")
event = InformationalResponseReceived()
event.stream_id = self.stream_id
return [event]
def recv_alt_svc(self, previous_state):
"""
Called when receiving an ALTSVC frame.
RFC 7838 allows us to receive ALTSVC frames at any stream state, which
is really absurdly overzealous. For that reason, we want to limit the
states in which we can actually receive it. It's really only sensible
to receive it after we've sent our own headers and before the server
has sent its header block: the server can't guarantee that we have any
state around after it completes its header block, and the server
doesn't know what origin we're talking about before we've sent ours.
For that reason, this function applies a few extra checks on both state
and some of the little state variables we keep around. If those suggest
an unreasonable situation for the ALTSVC frame to have been sent in,
we quietly ignore it (as RFC 7838 suggests).
This function is also *not* always called by the state machine. In some
states (IDLE, RESERVED_LOCAL, CLOSED) we don't bother to call it,
because we know the frame cannot be valid in that state (IDLE because
the server cannot know what origin the stream applies to, CLOSED
because the server cannot assume we still have state around,
RESERVED_LOCAL because by definition if we're in the RESERVED_LOCAL
state then *we* are the server).
"""
# Servers can't receive ALTSVC frames, but RFC 7838 tells us to ignore
# them.
if self.client is False:
return []
# If we've received the response headers from the server they can't
# guarantee we still have any state around. Other implementations
# (like nghttp2) ignore ALTSVC in this state, so we will too.
if self.headers_received:
return []
# Otherwise, this is a sensible enough frame to have received. Return
# the event and let it get populated.
return [AlternativeServiceAvailable()]
def send_alt_svc(self, previous_state):
"""
Called when sending an ALTSVC frame on this stream.
For consistency with the restrictions we apply on receiving ALTSVC
frames in ``recv_alt_svc``, we want to restrict when users can send
ALTSVC frames to the situations when we ourselves would accept them.
That means: when we are a server, when we have received the request
headers, and when we have not yet sent our own response headers.
"""
# We should not send ALTSVC after we've sent response headers, as the
# client may have disposed of its state.
if self.headers_sent:
raise ProtocolError(
"Cannot send ALTSVC after sending response headers."
)
return
# STATE MACHINE
#
# The stream state machine is defined here to avoid the need to allocate it
# repeatedly for each stream. It cannot be defined in the stream class because
# it needs to be able to reference the callbacks defined on the class, but
# because Python's scoping rules are weird the class object is not actually in
# scope during the body of the class object.
#
# For the sake of clarity, we reproduce the RFC 7540 state machine here:
#
# +--------+
# send PP | | recv PP
# ,--------| idle |--------.
# / | | \
# v +--------+ v
# +----------+ | +----------+
# | | | send H / | |
# ,------| reserved | | recv H | reserved |------.
# | | (local) | | | (remote) | |
# | +----------+ v +----------+ |
# | | +--------+ | |
# | | recv ES | | send ES | |
# | send H | ,-------| open |-------. | recv H |
# | | / | | \ | |
# | v v +--------+ v v |
# | +----------+ | +----------+ |
# | | half | | | half | |
# | | closed | | send R / | closed | |
# | | (remote) | | recv R | (local) | |
# | +----------+ | +----------+ |
# | | | | |
# | | send ES / | recv ES / | |
# | | send R / v send R / | |
# | | recv R +--------+ recv R | |
# | send R / `----------->| |<-----------' send R / |
# | recv R | closed | recv R |
# `----------------------->| |<----------------------'
# +--------+
#
# send: endpoint sends this frame
# recv: endpoint receives this frame
#
# H: HEADERS frame (with implied CONTINUATIONs)
# PP: PUSH_PROMISE frame (with implied CONTINUATIONs)
# ES: END_STREAM flag
# R: RST_STREAM frame
#
# For the purposes of this state machine we treat HEADERS and their
# associated CONTINUATION frames as a single jumbo frame. The protocol
# allows/requires this by preventing other frames from being interleved in
# between HEADERS/CONTINUATION frames. However, if a CONTINUATION frame is
# received without a prior HEADERS frame, it *will* be passed to this state
# machine. The state machine should always reject that frame, either as an
# invalid transition or because the stream is closed.
#
# There is a confusing relationship around PUSH_PROMISE frames. The state
# machine above considers them to be frames belonging to the new stream,
# which is *somewhat* true. However, they are sent with the stream ID of
# their related stream, and are only sendable in some cases.
# For this reason, our state machine implementation below allows for
# PUSH_PROMISE frames both in the IDLE state (as in the diagram), but also
# in the OPEN, HALF_CLOSED_LOCAL, and HALF_CLOSED_REMOTE states.
# Essentially, for hyper-h2, PUSH_PROMISE frames are effectively sent on
# two streams.
#
# The _transitions dictionary contains a mapping of tuples of
# (state, input) to tuples of (side_effect_function, end_state). This
# map contains all allowed transitions: anything not in this map is
# invalid and immediately causes a transition to ``closed``.
_transitions = {
# State: idle
(StreamState.IDLE, StreamInputs.SEND_HEADERS):
(H2StreamStateMachine.request_sent, StreamState.OPEN),
(StreamState.IDLE, StreamInputs.RECV_HEADERS):
(H2StreamStateMachine.request_received, StreamState.OPEN),
(StreamState.IDLE, StreamInputs.RECV_DATA):
(H2StreamStateMachine.send_reset, StreamState.CLOSED),
(StreamState.IDLE, StreamInputs.SEND_PUSH_PROMISE):
(H2StreamStateMachine.send_new_pushed_stream,
StreamState.RESERVED_LOCAL),
(StreamState.IDLE, StreamInputs.RECV_PUSH_PROMISE):
(H2StreamStateMachine.recv_new_pushed_stream,
StreamState.RESERVED_REMOTE),
(StreamState.IDLE, StreamInputs.RECV_ALTERNATIVE_SERVICE):
(None, StreamState.IDLE),
(StreamState.IDLE, StreamInputs.UPGRADE_CLIENT):
(H2StreamStateMachine.request_sent, StreamState.HALF_CLOSED_LOCAL),
(StreamState.IDLE, StreamInputs.UPGRADE_SERVER):
(H2StreamStateMachine.request_received,
StreamState.HALF_CLOSED_REMOTE),
# State: reserved local
(StreamState.RESERVED_LOCAL, StreamInputs.SEND_HEADERS):
(H2StreamStateMachine.response_sent, StreamState.HALF_CLOSED_REMOTE),
(StreamState.RESERVED_LOCAL, StreamInputs.RECV_DATA):
(H2StreamStateMachine.send_reset, StreamState.CLOSED),
(StreamState.RESERVED_LOCAL, StreamInputs.SEND_WINDOW_UPDATE):
(None, StreamState.RESERVED_LOCAL),
(StreamState.RESERVED_LOCAL, StreamInputs.RECV_WINDOW_UPDATE):
(H2StreamStateMachine.window_updated, StreamState.RESERVED_LOCAL),
(StreamState.RESERVED_LOCAL, StreamInputs.SEND_RST_STREAM):
(None, StreamState.CLOSED),
(StreamState.RESERVED_LOCAL, StreamInputs.RECV_RST_STREAM):
(H2StreamStateMachine.stream_reset, StreamState.CLOSED),
(StreamState.RESERVED_LOCAL, StreamInputs.SEND_ALTERNATIVE_SERVICE):
(H2StreamStateMachine.send_alt_svc, StreamState.RESERVED_LOCAL),
(StreamState.RESERVED_LOCAL, StreamInputs.RECV_ALTERNATIVE_SERVICE):
(None, StreamState.RESERVED_LOCAL),
# State: reserved remote
(StreamState.RESERVED_REMOTE, StreamInputs.RECV_HEADERS):
(H2StreamStateMachine.response_received,
StreamState.HALF_CLOSED_LOCAL),
(StreamState.RESERVED_REMOTE, StreamInputs.RECV_DATA):
(H2StreamStateMachine.send_reset, StreamState.CLOSED),
(StreamState.RESERVED_REMOTE, StreamInputs.SEND_WINDOW_UPDATE):
(None, StreamState.RESERVED_REMOTE),
(StreamState.RESERVED_REMOTE, StreamInputs.RECV_WINDOW_UPDATE):
(H2StreamStateMachine.window_updated, StreamState.RESERVED_REMOTE),
(StreamState.RESERVED_REMOTE, StreamInputs.SEND_RST_STREAM):
(None, StreamState.CLOSED),
(StreamState.RESERVED_REMOTE, StreamInputs.RECV_RST_STREAM):
(H2StreamStateMachine.stream_reset, StreamState.CLOSED),
(StreamState.RESERVED_REMOTE, StreamInputs.RECV_ALTERNATIVE_SERVICE):
(H2StreamStateMachine.recv_alt_svc, StreamState.RESERVED_REMOTE),
# State: open
(StreamState.OPEN, StreamInputs.SEND_HEADERS):
(H2StreamStateMachine.response_sent, StreamState.OPEN),
(StreamState.OPEN, StreamInputs.RECV_HEADERS):
(H2StreamStateMachine.response_received, StreamState.OPEN),
(StreamState.OPEN, StreamInputs.SEND_DATA):
(None, StreamState.OPEN),
(StreamState.OPEN, StreamInputs.RECV_DATA):
(H2StreamStateMachine.data_received, StreamState.OPEN),
(StreamState.OPEN, StreamInputs.SEND_END_STREAM):
(None, StreamState.HALF_CLOSED_LOCAL),
(StreamState.OPEN, StreamInputs.RECV_END_STREAM):
(H2StreamStateMachine.stream_ended, StreamState.HALF_CLOSED_REMOTE),
(StreamState.OPEN, StreamInputs.SEND_WINDOW_UPDATE):
(None, StreamState.OPEN),
(StreamState.OPEN, StreamInputs.RECV_WINDOW_UPDATE):
(H2StreamStateMachine.window_updated, StreamState.OPEN),
(StreamState.OPEN, StreamInputs.SEND_RST_STREAM):
(None, StreamState.CLOSED),
(StreamState.OPEN, StreamInputs.RECV_RST_STREAM):
(H2StreamStateMachine.stream_reset, StreamState.CLOSED),
(StreamState.OPEN, StreamInputs.SEND_PUSH_PROMISE):
(H2StreamStateMachine.send_push_promise, StreamState.OPEN),
(StreamState.OPEN, StreamInputs.RECV_PUSH_PROMISE):
(H2StreamStateMachine.recv_push_promise, StreamState.OPEN),
(StreamState.OPEN, StreamInputs.SEND_INFORMATIONAL_HEADERS):
(H2StreamStateMachine.send_informational_response, StreamState.OPEN),
(StreamState.OPEN, StreamInputs.RECV_INFORMATIONAL_HEADERS):
(H2StreamStateMachine.recv_informational_response, StreamState.OPEN),
(StreamState.OPEN, StreamInputs.SEND_ALTERNATIVE_SERVICE):
(H2StreamStateMachine.send_alt_svc, StreamState.OPEN),
(StreamState.OPEN, StreamInputs.RECV_ALTERNATIVE_SERVICE):
(H2StreamStateMachine.recv_alt_svc, StreamState.OPEN),
# State: half-closed remote
(StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_HEADERS):
(H2StreamStateMachine.response_sent, StreamState.HALF_CLOSED_REMOTE),
(StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_HEADERS):
(H2StreamStateMachine.send_reset, StreamState.CLOSED),
(StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_DATA):
(None, StreamState.HALF_CLOSED_REMOTE),
(StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_DATA):
(H2StreamStateMachine.send_reset, StreamState.CLOSED),
(StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_END_STREAM):
(None, StreamState.CLOSED),
(StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_WINDOW_UPDATE):
(None, StreamState.HALF_CLOSED_REMOTE),
(StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_WINDOW_UPDATE):
(H2StreamStateMachine.window_updated, StreamState.HALF_CLOSED_REMOTE),
(StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_RST_STREAM):
(None, StreamState.CLOSED),
(StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_RST_STREAM):
(H2StreamStateMachine.stream_reset, StreamState.CLOSED),
(StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_PUSH_PROMISE):
(H2StreamStateMachine.send_push_promise,
StreamState.HALF_CLOSED_REMOTE),
(StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_PUSH_PROMISE):
(H2StreamStateMachine.send_reset, StreamState.CLOSED),
(StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_CONTINUATION):
(H2StreamStateMachine.send_reset, StreamState.CLOSED),
(StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_INFORMATIONAL_HEADERS):
(H2StreamStateMachine.send_informational_response,
StreamState.HALF_CLOSED_REMOTE),
(StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_ALTERNATIVE_SERVICE):
(H2StreamStateMachine.send_alt_svc, StreamState.HALF_CLOSED_REMOTE),
(StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_ALTERNATIVE_SERVICE):
(H2StreamStateMachine.recv_alt_svc, StreamState.HALF_CLOSED_REMOTE),
# State: half-closed local
(StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_HEADERS):
(H2StreamStateMachine.response_received,
StreamState.HALF_CLOSED_LOCAL),
(StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_DATA):
(H2StreamStateMachine.data_received, StreamState.HALF_CLOSED_LOCAL),
(StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_END_STREAM):
(H2StreamStateMachine.stream_ended, StreamState.CLOSED),
(StreamState.HALF_CLOSED_LOCAL, StreamInputs.SEND_WINDOW_UPDATE):
(None, StreamState.HALF_CLOSED_LOCAL),
(StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_WINDOW_UPDATE):
(H2StreamStateMachine.window_updated, StreamState.HALF_CLOSED_LOCAL),
(StreamState.HALF_CLOSED_LOCAL, StreamInputs.SEND_RST_STREAM):
(None, StreamState.CLOSED),
(StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_RST_STREAM):
(H2StreamStateMachine.stream_reset, StreamState.CLOSED),
(StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_PUSH_PROMISE):
(H2StreamStateMachine.recv_push_promise,
StreamState.HALF_CLOSED_LOCAL),
(StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_INFORMATIONAL_HEADERS):
(H2StreamStateMachine.recv_informational_response,
StreamState.HALF_CLOSED_LOCAL),
(StreamState.HALF_CLOSED_LOCAL, StreamInputs.SEND_ALTERNATIVE_SERVICE):
(H2StreamStateMachine.send_alt_svc, StreamState.HALF_CLOSED_LOCAL),
(StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_ALTERNATIVE_SERVICE):
(H2StreamStateMachine.recv_alt_svc, StreamState.HALF_CLOSED_LOCAL),
# State: closed
(StreamState.CLOSED, StreamInputs.RECV_WINDOW_UPDATE):
(H2StreamStateMachine.window_updated, StreamState.CLOSED),
(StreamState.CLOSED, StreamInputs.RECV_RST_STREAM):
(None, StreamState.CLOSED), # Swallow further RST_STREAMs
(StreamState.CLOSED, StreamInputs.RECV_ALTERNATIVE_SERVICE):
(None, StreamState.CLOSED),
# While closed, all other received frames should cause RST_STREAM
# frames to be emitted. END_STREAM is always carried *by* a frame,
# so it should do nothing.
(StreamState.CLOSED, StreamInputs.RECV_HEADERS):
(H2StreamStateMachine.send_reset, StreamState.CLOSED),
(StreamState.CLOSED, StreamInputs.RECV_DATA):
(H2StreamStateMachine.send_reset, StreamState.CLOSED),
(StreamState.CLOSED, StreamInputs.RECV_PUSH_PROMISE):
(H2StreamStateMachine.push_on_closed_stream, StreamState.CLOSED),
(StreamState.CLOSED, StreamInputs.RECV_END_STREAM):
(None, StreamState.CLOSED),
(StreamState.CLOSED, StreamInputs.RECV_CONTINUATION):
(H2StreamStateMachine.send_reset, StreamState.CLOSED),
# Also, users should be forbidden from sending on closed streams.
(StreamState.CLOSED, StreamInputs.SEND_HEADERS):
(H2StreamStateMachine.send_on_closed_stream, StreamState.CLOSED),
(StreamState.CLOSED, StreamInputs.SEND_PUSH_PROMISE):
(H2StreamStateMachine.push_on_closed_stream, StreamState.CLOSED),
(StreamState.CLOSED, StreamInputs.SEND_RST_STREAM):
(H2StreamStateMachine.send_on_closed_stream, StreamState.CLOSED),
(StreamState.CLOSED, StreamInputs.SEND_DATA):
(H2StreamStateMachine.send_on_closed_stream, StreamState.CLOSED),
(StreamState.CLOSED, StreamInputs.SEND_WINDOW_UPDATE):
(H2StreamStateMachine.send_on_closed_stream, StreamState.CLOSED),
(StreamState.CLOSED, StreamInputs.SEND_END_STREAM):
(H2StreamStateMachine.send_on_closed_stream, StreamState.CLOSED),
}
class H2Stream(object):
"""
A low-level HTTP/2 stream object. This handles building and receiving
frames and maintains per-stream state.
This wraps a HTTP/2 Stream state machine implementation, ensuring that
frames can only be sent/received when the stream is in a valid state.
Attempts to create frames that cannot be sent will raise a
``ProtocolError``.
"""
def __init__(self,
stream_id,
config,
inbound_window_size,
outbound_window_size):
self.state_machine = H2StreamStateMachine(stream_id)
self.stream_id = stream_id
self.max_outbound_frame_size = None
self.request_method = None
# The curent value of the outbound stream flow control window
self.outbound_flow_control_window = outbound_window_size
# The flow control manager.
self._inbound_window_manager = WindowManager(inbound_window_size)
# The expected content length, if any.
self._expected_content_length = None
# The actual received content length. Always tracked.
self._actual_content_length = 0
# The authority we believe this stream belongs to.
self._authority = None
# The configuration for this stream.
self.config = config
@property
def inbound_flow_control_window(self):
"""
The size of the inbound flow control window for the stream. This is
rarely publicly useful: instead, use :meth:`remote_flow_control_window
<h2.stream.H2Stream.remote_flow_control_window>`. This shortcut is
largely present to provide a shortcut to this data.
"""
return self._inbound_window_manager.current_window_size
@property
def open(self):
"""
Whether the stream is 'open' in any sense: that is, whether it counts
against the number of concurrent streams.
"""
# RFC 7540 Section 5.1.2 defines 'open' for this purpose to mean either
# the OPEN state or either of the HALF_CLOSED states. Perplexingly,
# this excludes the reserved states.
# For more detail on why we're doing this in this slightly weird way,
# see the comment on ``STREAM_OPEN`` at the top of the file.
return STREAM_OPEN[self.state_machine.state]
@property
def closed(self):
"""
Whether the stream is closed.
"""
return self.state_machine.state == StreamState.CLOSED
def upgrade(self, client_side):
"""
Called by the connection to indicate that this stream is the initial
request/response of an upgraded connection. Places the stream into an
appropriate state.
"""
assert self.stream_id == 1
input_ = (
StreamInputs.UPGRADE_CLIENT if client_side
else StreamInputs.UPGRADE_SERVER
)
# This may return events, we deliberately don't want them.
self.state_machine.process_input(input_)
return
def send_headers(self, headers, encoder, end_stream=False):
"""
Returns a list of HEADERS/CONTINUATION frames to emit as either headers
or trailers.
"""
# Convert headers to two-tuples.
# FIXME: The fallback for dictionary headers is to be removed in 3.0.
try:
headers = headers.items()
warnings.warn(
"Implicit conversion of dictionaries to two-tuples for "
"headers is deprecated and will be removed in 3.0.",
DeprecationWarning
)
except AttributeError:
headers = headers
# Because encoding headers makes an irreversible change to the header
# compression context, we make the state transition before we encode
# them.
# First, check if we're a client. If we are, no problem: if we aren't,
# we need to scan the header block to see if this is an informational
# response.
input_ = StreamInputs.SEND_HEADERS
if ((not self.state_machine.client) and
is_informational_response(headers)):
if end_stream:
raise ProtocolError(
"Cannot set END_STREAM on informational responses."
)
input_ = StreamInputs.SEND_INFORMATIONAL_HEADERS
events = self.state_machine.process_input(input_)
hf = HeadersFrame(self.stream_id)
hdr_validation_flags = self._build_hdr_validation_flags(events)
frames = self._build_headers_frames(
headers, encoder, hf, hdr_validation_flags
)
if end_stream:
# Not a bug: the END_STREAM flag is valid on the initial HEADERS
# frame, not the CONTINUATION frames that follow.
self.state_machine.process_input(StreamInputs.SEND_END_STREAM)
frames[0].flags.add('END_STREAM')
if self.state_machine.trailers_sent and not end_stream:
raise ProtocolError("Trailers must have END_STREAM set.")
if self.state_machine.client and self._authority is None:
self._authority = authority_from_headers(headers)
# store request method for _initialize_content_length
self.request_method = extract_method_header(headers)
return frames
def push_stream_in_band(self, related_stream_id, headers, encoder):
"""
Returns a list of PUSH_PROMISE/CONTINUATION frames to emit as a pushed
stream header. Called on the stream that has the PUSH_PROMISE frame
sent on it.
"""
# Because encoding headers makes an irreversible change to the header
# compression context, we make the state transition *first*.
events = self.state_machine.process_input(
StreamInputs.SEND_PUSH_PROMISE
)
ppf = PushPromiseFrame(self.stream_id)
ppf.promised_stream_id = related_stream_id
hdr_validation_flags = self._build_hdr_validation_flags(events)
frames = self._build_headers_frames(
headers, encoder, ppf, hdr_validation_flags
)
return frames
def locally_pushed(self):
"""
Mark this stream as one that was pushed by this peer. Must be called
immediately after initialization. Sends no frames, simply updates the
state machine.
"""
# This does not trigger any events.
events = self.state_machine.process_input(
StreamInputs.SEND_PUSH_PROMISE
)
assert not events
return []
def send_data(self, data, end_stream=False):
"""
Prepare some data frames. Optionally end the stream.
.. warning:: Does not perform flow control checks.
"""
self.state_machine.process_input(StreamInputs.SEND_DATA)
df = DataFrame(self.stream_id)
df.data = data
if end_stream:
self.state_machine.process_input(StreamInputs.SEND_END_STREAM)
df.flags.add('END_STREAM')
self.outbound_flow_control_window -= len(data)
assert self.outbound_flow_control_window >= 0
return [df]
def end_stream(self):
"""
End a stream without sending data.
"""
self.state_machine.process_input(StreamInputs.SEND_END_STREAM)
df = DataFrame(self.stream_id)
df.flags.add('END_STREAM')
return [df]
def advertise_alternative_service(self, field_value):
"""
Advertise an RFC 7838 alternative service. The semantics of this are
better documented in the ``H2Connection`` class.
"""
self.state_machine.process_input(StreamInputs.SEND_ALTERNATIVE_SERVICE)
asf = AltSvcFrame(self.stream_id)
asf.field = field_value
return [asf]
def increase_flow_control_window(self, increment):
"""
Increase the size of the flow control window for the remote side.
"""
self.state_machine.process_input(StreamInputs.SEND_WINDOW_UPDATE)
self._inbound_window_manager.window_opened(increment)
wuf = WindowUpdateFrame(self.stream_id)
wuf.window_increment = increment
return [wuf]
def receive_push_promise_in_band(self,
promised_stream_id,
headers,
header_encoding):
"""
Receives a push promise frame sent on this stream, pushing a remote
stream. This is called on the stream that has the PUSH_PROMISE sent
on it.
"""
events = self.state_machine.process_input(
StreamInputs.RECV_PUSH_PROMISE
)
events[0].pushed_stream_id = promised_stream_id
if self.config.validate_inbound_headers:
hdr_validation_flags = self._build_hdr_validation_flags(events)
headers = validate_headers(headers, hdr_validation_flags)
if header_encoding:
headers = list(_decode_headers(headers, header_encoding))
events[0].headers = headers
return [], events
def remotely_pushed(self, pushed_headers):
"""
Mark this stream as one that was pushed by the remote peer. Must be
called immediately after initialization. Sends no frames, simply
updates the state machine.
"""
events = self.state_machine.process_input(
StreamInputs.RECV_PUSH_PROMISE
)
self._authority = authority_from_headers(pushed_headers)
return [], events
def receive_headers(self, headers, end_stream, header_encoding):
"""
Receive a set of headers (or trailers).
"""
if is_informational_response(headers):
if end_stream:
raise ProtocolError(
"Cannot set END_STREAM on informational responses"
)
input_ = StreamInputs.RECV_INFORMATIONAL_HEADERS
else:
input_ = StreamInputs.RECV_HEADERS
events = self.state_machine.process_input(input_)
if end_stream:
es_events = self.state_machine.process_input(
StreamInputs.RECV_END_STREAM
)
events[0].stream_ended = es_events[0]
events += es_events
self._initialize_content_length(headers)
if isinstance(events[0], TrailersReceived):
if not end_stream:
raise ProtocolError("Trailers must have END_STREAM set")
if self.config.validate_inbound_headers:
hdr_validation_flags = self._build_hdr_validation_flags(events)
headers = validate_headers(headers, hdr_validation_flags)
if header_encoding:
headers = list(_decode_headers(headers, header_encoding))
events[0].headers = headers
return [], events
def receive_data(self, data, end_stream, flow_control_len):
"""
Receive some data.
"""
events = self.state_machine.process_input(StreamInputs.RECV_DATA)
self._inbound_window_manager.window_consumed(flow_control_len)
self._track_content_length(len(data), end_stream)
if end_stream:
es_events = self.state_machine.process_input(
StreamInputs.RECV_END_STREAM
)
events[0].stream_ended = es_events[0]
events.extend(es_events)
events[0].data = data
events[0].flow_controlled_length = flow_control_len
return [], events
def receive_window_update(self, increment):
"""
Handle a WINDOW_UPDATE increment.
"""
events = self.state_machine.process_input(
StreamInputs.RECV_WINDOW_UPDATE
)
events[0].delta = increment
self.outbound_flow_control_window = guard_increment_window(
self.outbound_flow_control_window,
increment
)
return [], events
def receive_continuation(self):
"""
A naked CONTINUATION frame has been received. This is always an error,
but the type of error it is depends on the state of the stream and must
transition the state of the stream, so we need to handle it.
"""
self.state_machine.process_input(
StreamInputs.RECV_CONTINUATION
)
assert False, "Should not be reachable"
def receive_alt_svc(self, frame):
"""
An Alternative Service frame was received on the stream. This frame
inherits the origin associated with this stream.
"""
# If the origin is present, RFC 7838 says we have to ignore it.
if frame.origin:
return [], []
events = self.state_machine.process_input(
StreamInputs.RECV_ALTERNATIVE_SERVICE
)
# There are lots of situations where we want to ignore the ALTSVC
# frame. If we need to pay attention, we'll have an event and should
# fill it out.
if events:
assert isinstance(events[0], AlternativeServiceAvailable)
events[0].origin = self._authority
events[0].field_value = frame.field
return [], events
def reset_stream(self, error_code=0):
"""
Close the stream locally. Reset the stream with an error code.
"""
self.state_machine.process_input(StreamInputs.SEND_RST_STREAM)
rsf = RstStreamFrame(self.stream_id)
rsf.error_code = error_code
return [rsf]
def stream_reset(self, frame):
"""
Handle a stream being reset remotely.
"""
events = self.state_machine.process_input(StreamInputs.RECV_RST_STREAM)
if events:
# We don't fire an event if this stream is already closed.
events[0].error_code = _error_code_from_int(frame.error_code)
return [], events
def acknowledge_received_data(self, acknowledged_size):
"""
The user has informed us that they've processed some amount of data
that was received on this stream. Pass that to the window manager and
potentially return some WindowUpdate frames.
"""
increment = self._inbound_window_manager.process_bytes(
acknowledged_size
)
if increment:
f = WindowUpdateFrame(self.stream_id)
f.window_increment = increment
return [f]
return []
def _build_hdr_validation_flags(self, events):
"""
Constructs a set of header validation flags for use when normalizing
and validating header blocks.
"""
is_trailer = isinstance(
events[0], (_TrailersSent, TrailersReceived)
)
is_response_header = isinstance(
events[0],
(
_ResponseSent,
ResponseReceived,
InformationalResponseReceived
)
)
is_push_promise = isinstance(
events[0], (PushedStreamReceived, _PushedRequestSent)
)
return HeaderValidationFlags(
is_client=self.state_machine.client,
is_trailer=is_trailer,
is_response_header=is_response_header,
is_push_promise=is_push_promise,
)
def _build_headers_frames(self,
headers,
encoder,
first_frame,
hdr_validation_flags):
"""
Helper method to build headers or push promise frames.
"""
# We need to lowercase the header names, and to ensure that secure
# header fields are kept out of compression contexts.
if self.config.normalize_outbound_headers:
headers = normalize_outbound_headers(
headers, hdr_validation_flags
)
if self.config.validate_outbound_headers:
headers = validate_outbound_headers(
headers, hdr_validation_flags
)
encoded_headers = encoder.encode(headers)
# Slice into blocks of max_outbound_frame_size. Be careful with this:
# it only works right because we never send padded frames or priority
# information on the frames. Revisit this if we do.
header_blocks = [
encoded_headers[i:i+self.max_outbound_frame_size]
for i in range(
0, len(encoded_headers), self.max_outbound_frame_size
)
]
frames = []
first_frame.data = header_blocks[0]
frames.append(first_frame)
for block in header_blocks[1:]:
cf = ContinuationFrame(self.stream_id)
cf.data = block
frames.append(cf)
frames[-1].flags.add('END_HEADERS')
return frames
def _initialize_content_length(self, headers):
"""
Checks the headers for a content-length header and initializes the
_expected_content_length field from it. It's not an error for no
Content-Length header to be present.
"""
if self.request_method == b'HEAD':
self._expected_content_length = 0
return
for n, v in headers:
if n == b'content-length':
try:
self._expected_content_length = int(v, 10)
except ValueError:
raise ProtocolError(
"Invalid content-length header: %s" % v
)
return
def _track_content_length(self, length, end_stream):
"""
Update the expected content length in response to data being received.
Validates that the appropriate amount of data is sent. Always updates
the received data, but only validates the length against the
content-length header if one was sent.
:param length: The length of the body chunk received.
:param end_stream: If this is the last body chunk received.
"""
self._actual_content_length += length
actual = self._actual_content_length
expected = self._expected_content_length
if expected is not None:
if expected < actual:
raise InvalidBodyLengthError(expected, actual)
if end_stream and expected != actual:
raise InvalidBodyLengthError(expected, actual)
def _inbound_flow_control_change_from_settings(self, delta):
"""
We changed SETTINGS_INITIAL_WINDOW_SIZE, which means we need to
update the target window size for flow control. For our flow control
strategy, this means we need to do two things: we need to adjust the
current window size, but we also need to set the target maximum window
size to the new value.
"""
new_max_size = self._inbound_window_manager.max_window_size + delta
self._inbound_window_manager.window_opened(delta)
self._inbound_window_manager.max_window_size = new_max_size
def _decode_headers(headers, encoding):
"""
Given an iterable of header two-tuples and an encoding, decodes those
headers using that encoding while preserving the type of the header tuple.
This ensures that the use of ``HeaderTuple`` is preserved.
"""
for header in headers:
# This function expects to work on decoded headers, which are always
# HeaderTuple objects.
assert isinstance(header, HeaderTuple)
name, value = header
name = name.decode(encoding)
value = value.decode(encoding)
yield header.__class__(name, value)
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 2 13:27:20 2017
@author: SANDIPAN
"""
import nltk
import tensorflow as tf
import random
import pickle
import numpy as np
import os
from nltk.tokenize import word_tokenize
from nltk.tag.stanford import StanfordPOSTagger as POS_Tag
java_path = "C:/Program Files/Java/jdk1.8.0_121/bin/java.exe"
os.environ['JAVAHOME'] = java_path
max_length_cmd = []
featureset = []
global count
home = 'F:/Projects II/speech'
_path_to_model = home + '/stanford-postagger/models/english-bidirectional-distsim.tagger'
_path_to_jar = home + '/stanford-postagger/stanford-postagger.jar'
st = POS_Tag(model_filename=_path_to_model, path_to_jar=_path_to_jar)
cmd = open('command.txt','r')
non_cmd = open('non_command.txt','r')
for line in cmd:
max_length_cmd.append(len(line.split()))
for line in non_cmd:
max_length_cmd.append(len(line.split()))
max_length = max(max_length_cmd)
wh_words = ['how', 'what', 'who', 'when', 'whether', 'why', 'which', 'where']
noun_tags = ['NN', 'NNP', 'PRP', 'PRP$']
verb_tags = ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
adverb_tags = ['RB', 'RBR', 'RBS']
def create_features(features, words):
for i in range(0,len(words)):
if words[i][0] in wh_words:
features[i] = 10
elif words[i][1] in noun_tags:
features[i] = 20
elif words[i][1] in verb_tags:
features[i] = 30
elif words[i][1] in adverb_tags:
features[i] = 40
elif words[i][1] not in wh_words and words[i][1] not in noun_tags and words[i][1] not in verb_tags and words[i][1] not in adverb_tags:
features[i] = 50
return features
def make_featuresets(file, classification):
with open(file,'r') as f:
contents = f.readlines()
count = 0
for line in contents:
current_words = word_tokenize(line)
count = count+1
print(count)
tagged_words = st.tag(current_words)
features = [0]*(max_length)
features_in_line = create_features(features, tagged_words)
featureset.append([features_in_line, classification])
return featureset
def featuresets_and_labels(command, non_command, test_size = 0.3):
all_features = []
train_x = []
train_y = []
test_x= []
test_y = []
all_features+= make_featuresets (command, [1,0])
all_features+= make_featuresets (non_command, [0,1]) #changed
#print(all_features)
# all_features = pickle.load(open("feat.p","rb"))
random.shuffle(all_features)
all_features = np.array(all_features)
testing_size = int(test_size*len(all_features))
#testing_size = int(test_size*len(all_features))
#training_size = int(len(all_features) - testing_size)
#for i in range(0,training_size):
# train_x.append(all_features[i][0:1])
# train_y.append(all_features[i][1:2])
#for i in range(training_size+1, len(all_features)):
# test_x.append(all_features[i][0:1])
# test_y.append(all_features[i][1:2])
train_x = list(all_features[:,0][:-testing_size])
train_y = list(all_features[:,1][:-testing_size])
test_x = list(all_features[:,0][-testing_size:])
test_y = list(all_features[:,1][-testing_size:])
print(len(all_features))
#print(features)
return train_x, test_x, train_y, test_y
train_x, test_x, train_y, test_y = featuresets_and_labels('command.txt', 'non_command.txt')
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
#n_nodes_hl4 = 100
n_classes = 2
batch_size = 1
#hm_epochs = 20
x = tf.placeholder('float')
y = tf.placeholder('float')
hidden_1_layer = {'f_fum':n_nodes_hl1,
'weight':tf.Variable(tf.truncated_normal([len(train_x[0]), n_nodes_hl1])),
'bias':tf.Variable(tf.truncated_normal([n_nodes_hl1]))}
hidden_2_layer = {'f_fum':n_nodes_hl2,
'weight':tf.Variable(tf.truncated_normal([n_nodes_hl1, n_nodes_hl2])),
'bias':tf.Variable(tf.truncated_normal([n_nodes_hl2]))}
hidden_3_layer = {'f_fum':n_nodes_hl3,
'weight':tf.Variable(tf.truncated_normal([n_nodes_hl2, n_nodes_hl3])),
'bias':tf.Variable(tf.truncated_normal([n_nodes_hl3]))}
#hidden_4_layer = {'f_fum':n_nodes_hl4,
## 'weight':tf.Variable(tf.truncated_normal([n_nodes_hl3, n_nodes_hl4])),
## 'bias':tf.Variable(tf.truncated_normal([n_nodes_hl4]))}
output_layer = {'f_fum':None,
'weight':tf.Variable(tf.truncated_normal([n_nodes_hl3, n_classes])),
'bias':tf.Variable(tf.truncated_normal([n_classes])),}
def neural_network_model(data):
l1 = tf.add(tf.matmul(data,hidden_1_layer['weight']), hidden_1_layer['bias'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weight']), hidden_2_layer['bias'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weight']), hidden_3_layer['bias'])
#l3 = tf.nn.relu(l3)
l3 = tf.nn.relu(l3)
output = tf.matmul(l3,output_layer['weight']) + output_layer['bias']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
# OLD VERSION:
#cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )
# # NEW:
ratio =171.0 / (248.0 + 171.0)
class_weight = tf.constant([ratio, 1.0 - ratio])
logits = prediction
keep_prob = tf.placeholder(tf.float32)
weighted_logits = tf.multiply(logits, class_weight)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=weighted_logits, labels=y) )
optimizer = tf.train.AdamOptimizer(1e-4).minimize(cost)
hm_epochs = 150
# config=tf.ConfigProto()
# config.gpu_options.allow_growth = True
with tf.Session() as sess:
# OLD:
#sess.run(tf.initialize_all_variables())
# NEW:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while i<len(train_x):
start = i
end=i+batch_size
batch_x=np.array(train_x[start:end])
batch_y=np.array(train_y[start:end])
_, c=sess.run([optimizer, cost], feed_dict={x:batch_x,y:batch_y,keep_prob:1.0})
epoch_loss+=c
i+=batch_size
print('Epoch', epoch+1, 'Completed out of', hm_epochs, 'loss:', epoch_loss)
correct=tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy=tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:train_x, y:train_y,keep_prob:1.0}))
correct=tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy=tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:test_x, y:test_y}))
save_path = saver.save(sess, "F:/Projects II/speech/saved_model/classifier1/model.ckpt")
print("Model saved in file: %s" % save_path)
train_neural_network(x)
|
|
# -*- coding: Latin-1 -*-
"""peutils, Portable Executable utilities module
Copyright (c) 2005-2013 Ero Carrera <[email protected]>
All rights reserved.
For detailed copyright information see the file COPYING in
the root of the distribution archive.
"""
import os
import re
import string
import urllib
import pefile
__author__ = 'Ero Carrera'
__version__ = pefile.__version__
__contact__ = '[email protected]'
class SignatureDatabase:
"""This class loads and keeps a parsed PEiD signature database.
Usage:
sig_db = SignatureDatabase('/path/to/signature/file')
and/or
sig_db = SignatureDatabase()
sig_db.load('/path/to/signature/file')
Signature databases can be combined by performing multiple loads.
The filename parameter can be a URL too. In that case the
signature database will be downloaded from that location.
"""
def __init__(self, filename=None, data=None):
# RegExp to match a signature block
#
self.parse_sig = re.compile(
'\[(.*?)\]\s+?signature\s*=\s*(.*?)(\s+\?\?)*\s*ep_only\s*=\s*(\w+)(?:\s*section_start_only\s*=\s*(\w+)|)', re.S)
# Signature information
#
# Signatures are stored as trees using dictionaries
# The keys are the byte values while the values for
# each key are either:
#
# - Other dictionaries of the same form for further
# bytes in the signature
#
# - A dictionary with a string as a key (packer name)
# and None as value to indicate a full signature
#
self.signature_tree_eponly_true = dict ()
self.signature_count_eponly_true = 0
self.signature_tree_eponly_false = dict ()
self.signature_count_eponly_false = 0
self.signature_tree_section_start = dict ()
self.signature_count_section_start = 0
# The depth (length) of the longest signature
#
self.max_depth = 0
self.__load(filename=filename, data=data)
def generate_section_signatures(self, pe, name, sig_length=512):
"""Generates signatures for all the sections in a PE file.
If the section contains any data a signature will be created
for it. The signature name will be a combination of the
parameter 'name' and the section number and its name.
"""
section_signatures = list()
for idx, section in enumerate(pe.sections):
if section.SizeOfRawData < sig_length:
continue
#offset = pe.get_offset_from_rva(section.VirtualAddress)
offset = section.PointerToRawData
sig_name = '%s Section(%d/%d,%s)' % (
name, idx + 1, len(pe.sections),
''.join([c for c in section.Name if c in string.printable]))
section_signatures.append(
self.__generate_signature(
pe, offset, sig_name, ep_only=False,
section_start_only=True,
sig_length=sig_length) )
return '\n'.join(section_signatures)+'\n'
def generate_ep_signature(self, pe, name, sig_length=512):
"""Generate signatures for the entry point of a PE file.
Creates a signature whose name will be the parameter 'name'
and the section number and its name.
"""
offset = pe.get_offset_from_rva(pe.OPTIONAL_HEADER.AddressOfEntryPoint)
return self.__generate_signature(
pe, offset, name, ep_only=True, sig_length=sig_length)
def __generate_signature(self, pe, offset, name, ep_only=False,
section_start_only=False, sig_length=512):
data = pe.__data__[offset:offset+sig_length]
signature_bytes = ' '.join(['%02x' % ord(c) for c in data])
if ep_only == True:
ep_only = 'true'
else:
ep_only = 'false'
if section_start_only == True:
section_start_only = 'true'
else:
section_start_only = 'false'
signature = '[%s]\nsignature = %s\nep_only = %s\nsection_start_only = %s\n' % (
name, signature_bytes, ep_only, section_start_only)
return signature
def match(self, pe, ep_only=True, section_start_only=False):
"""Matches and returns the exact match(es).
If ep_only is True the result will be a string with
the packer name. Otherwise it will be a list of the
form (file_ofsset, packer_name). Specifying where
in the file the signature was found.
"""
matches = self.__match(pe, ep_only, section_start_only)
# The last match (the most precise) from the
# list of matches (if any) is returned
#
if matches:
if ep_only == False:
# Get the most exact match for each list of matches
# at a given offset
#
return [(match[0], match[1][-1]) for match in matches]
return matches[1][-1]
return None
def match_all(self, pe, ep_only=True, section_start_only=False):
"""Matches and returns all the likely matches."""
matches = self.__match(pe, ep_only, section_start_only)
if matches:
if ep_only == False:
# Get the most exact match for each list of matches
# at a given offset
#
return matches
return matches[1]
return None
def __match(self, pe, ep_only, section_start_only):
# Load the corresponding set of signatures
# Either the one for ep_only equal to True or
# to False
#
if section_start_only is True:
# Fetch the data of the executable as it'd
# look once loaded in memory
#
try :
data = pe.__data__
except Exception as excp :
raise
# Load the corresponding tree of signatures
#
signatures = self.signature_tree_section_start
# Set the starting address to start scanning from
#
scan_addresses = [section.PointerToRawData for section in pe.sections]
elif ep_only is True:
# Fetch the data of the executable as it'd
# look once loaded in memory
#
try :
data = pe.get_memory_mapped_image()
except Exception as excp :
raise
# Load the corresponding tree of signatures
#
signatures = self.signature_tree_eponly_true
# Fetch the entry point of the PE file and the data
# at the entry point
#
ep = pe.OPTIONAL_HEADER.AddressOfEntryPoint
# Set the starting address to start scanning from
#
scan_addresses = [ep]
else:
data = pe.__data__
signatures = self.signature_tree_eponly_false
scan_addresses = xrange( len(data) )
# For each start address, check if any signature matches
#
matches = []
for idx in scan_addresses:
result = self.__match_signature_tree(
signatures,
data[idx:idx+self.max_depth])
if result:
matches.append( (idx, result) )
# Return only the matched items found at the entry point if
# ep_only is True (matches will have only one element in that
# case)
#
if ep_only is True:
if matches:
return matches[0]
return matches
def match_data(self, code_data, ep_only=True, section_start_only=False):
data = code_data
scan_addresses = [ 0 ]
# Load the corresponding set of signatures
# Either the one for ep_only equal to True or
# to False
#
if section_start_only is True:
# Load the corresponding tree of signatures
#
signatures = self.signature_tree_section_start
# Set the starting address to start scanning from
#
elif ep_only is True:
# Load the corresponding tree of signatures
#
signatures = self.signature_tree_eponly_true
# For each start address, check if any signature matches
#
matches = []
for idx in scan_addresses:
result = self.__match_signature_tree(
signatures,
data[idx:idx+self.max_depth])
if result:
matches.append( (idx, result) )
# Return only the matched items found at the entry point if
# ep_only is True (matches will have only one element in that
# case)
#
if ep_only is True:
if matches:
return matches[0]
return matches
def __match_signature_tree(self, signature_tree, data, depth = 0):
"""Recursive function to find matches along the signature tree.
signature_tree is the part of the tree left to walk
data is the data being checked against the signature tree
depth keeps track of how far we have gone down the tree
"""
matched_names = list ()
match = signature_tree
# Walk the bytes in the data and match them
# against the signature
#
for idx, byte in enumerate ( [ord (b) for b in data] ):
# If the tree is exhausted...
#
if match is None :
break
# Get the next byte in the tree
#
match_next = match.get(byte, None)
# If None is among the values for the key
# it means that a signature in the database
# ends here and that there's an exact match.
#
if None in match.values():
# idx represent how deep we are in the tree
#
#names = [idx+depth]
names = list()
# For each of the item pairs we check
# if it has an element other than None,
# if not then we have an exact signature
#
for item in match.items():
if item[1] is None :
names.append (item[0])
matched_names.append(names)
# If a wildcard is found keep scanning the signature
# ignoring the byte.
#
if match.has_key ('??') :
match_tree_alternate = match.get ('??', None)
data_remaining = data[idx + 1 :]
if data_remaining:
matched_names.extend(
self.__match_signature_tree(
match_tree_alternate, data_remaining, idx+depth+1))
match = match_next
# If we have any more packer name in the end of the signature tree
# add them to the matches
#
if match is not None and None in match.values():
#names = [idx + depth + 1]
names = list()
for item in match.items() :
if item[1] is None:
names.append(item[0])
matched_names.append(names)
return matched_names
def load(self , filename=None, data=None):
"""Load a PEiD signature file.
Invoking this method on different files combines the signatures.
"""
self.__load(filename=filename, data=data)
def __load(self, filename=None, data=None):
if filename is not None:
# If the path does not exist, attempt to open a URL
#
if not os.path.exists(filename):
try:
sig_f = urllib.urlopen(filename)
sig_data = sig_f.read()
sig_f.close()
except IOError:
# Let this be raised back to the user...
raise
else:
# Get the data for a file
#
try:
sig_f = file( filename, 'rt' )
sig_data = sig_f.read()
sig_f.close()
except IOError:
# Let this be raised back to the user...
raise
else:
sig_data = data
# If the file/URL could not be read or no "raw" data
# was provided there's nothing else to do
#
if not sig_data:
return
# Helper function to parse the signature bytes
#
def to_byte(value) :
if value == '??' or value == '?0' :
return value
return int (value, 16)
# Parse all the signatures in the file
#
matches = self.parse_sig.findall(sig_data)
# For each signature, get the details and load it into the
# signature tree
#
for packer_name, signature, superfluous_wildcards, ep_only, section_start_only in matches:
ep_only = ep_only.strip().lower()
signature = signature.replace('\\n', '').strip()
signature_bytes = [to_byte(b) for b in signature.split()]
if ep_only == 'true':
ep_only = True
else:
ep_only = False
if section_start_only == 'true':
section_start_only = True
else:
section_start_only = False
depth = 0
if section_start_only is True:
tree = self.signature_tree_section_start
self.signature_count_section_start += 1
else:
if ep_only is True :
tree = self.signature_tree_eponly_true
self.signature_count_eponly_true += 1
else :
tree = self.signature_tree_eponly_false
self.signature_count_eponly_false += 1
for idx, byte in enumerate (signature_bytes) :
if idx+1 == len(signature_bytes):
tree[byte] = tree.get( byte, dict() )
tree[byte][packer_name] = None
else :
tree[byte] = tree.get ( byte, dict() )
tree = tree[byte]
depth += 1
if depth > self.max_depth:
self.max_depth = depth
def is_valid( pe ):
""""""
pass
def is_suspicious( pe ):
"""
unusual locations of import tables
non recognized section names
presence of long ASCII strings
"""
relocations_overlap_entry_point = False
sequential_relocs = 0
# If relocation data is found and the entries go over the entry point, and also are very
# continuous or point outside section's boundaries => it might imply that an obfuscation
# trick is being used or the relocations are corrupt (maybe intentionally)
#
if hasattr(pe, 'DIRECTORY_ENTRY_BASERELOC'):
for base_reloc in pe.DIRECTORY_ENTRY_BASERELOC:
last_reloc_rva = None
for reloc in base_reloc.entries:
if reloc.rva <= pe.OPTIONAL_HEADER.AddressOfEntryPoint <= reloc.rva + 4:
relocations_overlap_entry_point = True
if last_reloc_rva is not None and last_reloc_rva <= reloc.rva <= last_reloc_rva + 4:
sequential_relocs += 1
last_reloc_rva = reloc.rva
# If import tables or strings exist (are pointed to) to within the header or in the area
# between the PE header and the first section that's supicious
#
# IMPLEMENT
warnings_while_parsing = False
# If we have warnings, that's suspicious, some of those will be because of out-of-ordinary
# values are found in the PE header fields
# Things that are reported in warnings:
# (parsing problems, special section characteristics i.e. W & X, uncommon values of fields,
# unusual entrypoint, suspicious imports)
#
warnings = pe.get_warnings()
if warnings:
warnings_while_parsing
# If there are few or none (should come with a standard "density" of strings/kilobytes of data) longer (>8)
# ascii sequences that might indicate packed data, (this is similar to the entropy test in some ways but
# might help to discard cases of legitimate installer or compressed data)
# If compressed data (high entropy) and is_driver => uuuuhhh, nasty
pass
def is_probably_packed( pe ):
"""Returns True is there is a high likelihood that a file is packed or contains compressed data.
The sections of the PE file will be analyzed, if enough sections
look like containing containing compressed data and the data makes
up for more than 20% of the total file size. The function will
return True.
"""
# Calculate the lenth of the data up to the end of the last section in the
# file. Overlay data won't be taken into account
#
total_pe_data_length = len( pe.trim() )
has_significant_amount_of_compressed_data = False
# If some of the sections have high entropy and they make for more than 20% of the file's size
# it's assumed that it could be an installer or a packed file
total_compressed_data = 0
for section in pe.sections:
s_entropy = section.get_entropy()
s_length = len( section.get_data() )
# The value of 7.4 is empircal, based of looking at a few files packed
# by different packers
if s_entropy > 7.4:
total_compressed_data += s_length
if ((1.0 * total_compressed_data)/total_pe_data_length) > .2:
has_significant_amount_of_compressed_data = True
return has_significant_amount_of_compressed_data
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import inspect
import mock
import pytest
import raven
import time
from socket import socket, AF_INET, SOCK_DGRAM
from raven.base import Client, ClientState
from raven.exceptions import RateLimited
from raven.transport import AsyncTransport
from raven.utils.stacks import iter_stack_frames
from raven.utils import six
from raven.utils.testutils import TestCase
class TempStoreClient(Client):
def __init__(self, servers=None, **kwargs):
self.events = []
super(TempStoreClient, self).__init__(servers=servers, **kwargs)
def is_enabled(self):
return True
def send(self, **kwargs):
self.events.append(kwargs)
class ClientStateTest(TestCase):
def test_should_try_online(self):
state = ClientState()
self.assertEquals(state.should_try(), True)
def test_should_try_new_error(self):
state = ClientState()
state.status = state.ERROR
state.last_check = time.time()
state.retry_number = 1
self.assertEquals(state.should_try(), False)
def test_should_try_time_passed_error(self):
state = ClientState()
state.status = state.ERROR
state.last_check = time.time() - 10
state.retry_number = 1
self.assertEquals(state.should_try(), True)
def test_set_fail(self):
state = ClientState()
state.set_fail()
self.assertEquals(state.status, state.ERROR)
self.assertNotEquals(state.last_check, None)
self.assertEquals(state.retry_number, 1)
def test_set_success(self):
state = ClientState()
state.status = state.ERROR
state.last_check = 'foo'
state.retry_number = 0
state.set_success()
self.assertEquals(state.status, state.ONLINE)
self.assertEquals(state.last_check, None)
self.assertEquals(state.retry_number, 0)
def test_should_try_retry_after(self):
state = ClientState()
state.status = state.ERROR
state.last_check = time.time()
state.retry_number = 1
state.retry_after = 1
self.assertFalse(state.should_try())
def test_should_try_retry_after_passed(self):
state = ClientState()
state.status = state.ERROR
state.last_check = time.time() - 1
state.retry_number = 1
state.retry_after = 1
self.assertTrue(state.should_try())
class ClientTest(TestCase):
def setUp(self):
self.client = TempStoreClient()
def test_first_client_is_singleton(self):
from raven import base
base.Raven = None
client = Client()
client2 = Client()
assert base.Raven is client
assert client is not client2
@mock.patch('raven.transport.http.HTTPTransport.send')
@mock.patch('raven.base.ClientState.should_try')
def test_send_remote_failover(self, should_try, send):
should_try.return_value = True
client = Client(
dsn='sync+http://public:[email protected]/1'
)
# test error
send.side_effect = Exception()
client.send_remote('sync+http://example.com/api/store', 'foo')
self.assertEquals(client.state.status, client.state.ERROR)
# test recovery
send.side_effect = None
client.send_remote('sync+http://example.com/api/store', 'foo')
self.assertEquals(client.state.status, client.state.ONLINE)
@mock.patch('raven.transport.http.HTTPTransport.send')
@mock.patch('raven.base.ClientState.should_try')
def test_send_remote_failover_with_retry_after(self, should_try, send):
should_try.return_value = True
client = Client(
dsn='sync+http://public:[email protected]/1'
)
# test error
send.side_effect = RateLimited('foo', 5)
client.send_remote('sync+http://example.com/api/store', 'foo')
self.assertEquals(client.state.status, client.state.ERROR)
self.assertEqual(client.state.retry_after, 5)
# test recovery
send.side_effect = None
client.send_remote('sync+http://example.com/api/store', 'foo')
self.assertEquals(client.state.status, client.state.ONLINE)
self.assertEqual(client.state.retry_after, 0)
@mock.patch('raven.base.Client._registry.get_transport')
@mock.patch('raven.base.ClientState.should_try')
def test_async_send_remote_failover(self, should_try, get_transport):
should_try.return_value = True
async_transport = AsyncTransport()
async_transport.async_send = async_send = mock.Mock()
get_transport.return_value = async_transport
client = Client(
servers=['http://example.com'],
public_key='public',
secret_key='secret',
project=1,
)
# test immediate raise of error
async_send.side_effect = Exception()
client.send_remote('http://example.com/api/store', 'foo')
self.assertEquals(client.state.status, client.state.ERROR)
# test recovery
client.send_remote('http://example.com/api/store', 'foo')
success_cb = async_send.call_args[0][2]
success_cb()
self.assertEquals(client.state.status, client.state.ONLINE)
# test delayed raise of error
client.send_remote('http://example.com/api/store', 'foo')
failure_cb = async_send.call_args[0][3]
failure_cb(Exception())
self.assertEquals(client.state.status, client.state.ERROR)
@mock.patch('raven.base.Client.send_remote')
@mock.patch('raven.base.time.time')
def test_send(self, time, send_remote):
time.return_value = 1328055286.51
client = Client(
servers=['http://example.com'],
public_key='public',
secret_key='secret',
project=1,
)
client.send(**{
'foo': 'bar',
})
send_remote.assert_called_once_with(
url='http://example.com',
data=six.b('eJyrVkrLz1eyUlBKSixSqgUAIJgEVA=='),
headers={
'User-Agent': 'raven-python/%s' % (raven.VERSION,),
'Content-Type': 'application/octet-stream',
'X-Sentry-Auth': (
'Sentry sentry_timestamp=1328055286.51, '
'sentry_client=raven-python/%s, sentry_version=6, '
'sentry_key=public, '
'sentry_secret=secret' % (raven.VERSION,))
},
)
@mock.patch('raven.base.Client.send_remote')
@mock.patch('raven.base.time.time')
def test_send_with_auth_header(self, time, send_remote):
time.return_value = 1328055286.51
client = Client(
servers=['http://example.com'],
public_key='public',
secret_key='secret',
project=1,
)
client.send(auth_header='foo', **{
'foo': 'bar',
})
send_remote.assert_called_once_with(
url='http://example.com',
data=six.b('eJyrVkrLz1eyUlBKSixSqgUAIJgEVA=='),
headers={
'User-Agent': 'raven-python/%s' % (raven.VERSION,),
'Content-Type': 'application/octet-stream',
'X-Sentry-Auth': 'foo'
},
)
@mock.patch('raven.transport.http.HTTPTransport.send')
@mock.patch('raven.base.ClientState.should_try')
def test_raise_exception_on_send_error(self, should_try, _send_remote):
should_try.return_value = True
client = Client(
servers=['sync+http://example.com'],
public_key='public',
secret_key='secret',
project=1,
)
# Test for the default behaviour in which a send error is handled by the client
_send_remote.side_effect = Exception()
client.capture('Message', data={}, date=None, time_spent=10,
extra={}, stack=None, tags=None, message='Test message')
assert client.state.status == client.state.ERROR
# Test for the case in which a send error is raised to the calling frame.
client = Client(
servers=['sync+http://example.com'],
public_key='public',
secret_key='secret',
project=1,
raise_send_errors=True,
)
with self.assertRaises(Exception):
client.capture('Message', data={}, date=None, time_spent=10,
extra={}, stack=None, tags=None, message='Test message')
def test_encode_decode(self):
data = {'foo': 'bar'}
encoded = self.client.encode(data)
self.assertTrue(type(encoded), str)
self.assertEquals(data, self.client.decode(encoded))
def test_dsn(self):
client = Client(dsn='http://public:[email protected]/1')
self.assertEquals(client.servers, ['http://example.com/api/1/store/'])
self.assertEquals(client.project, '1')
self.assertEquals(client.public_key, 'public')
self.assertEquals(client.secret_key, 'secret')
def test_dsn_as_first_arg(self):
client = Client('http://public:[email protected]/1')
self.assertEquals(client.servers, ['http://example.com/api/1/store/'])
self.assertEquals(client.project, '1')
self.assertEquals(client.public_key, 'public')
self.assertEquals(client.secret_key, 'secret')
def test_slug_in_dsn(self):
client = Client('http://public:[email protected]/slug-name')
self.assertEquals(client.servers, ['http://example.com/api/slug-name/store/'])
self.assertEquals(client.project, 'slug-name')
self.assertEquals(client.public_key, 'public')
self.assertEquals(client.secret_key, 'secret')
def test_get_public_dsn(self):
client = Client('threaded+http://public:[email protected]/1')
public_dsn = client.get_public_dsn()
self.assertEquals(public_dsn, '//[email protected]/1')
def test_get_public_dsn_override_scheme(self):
client = Client('threaded+http://public:[email protected]/1')
public_dsn = client.get_public_dsn('https')
self.assertEquals(public_dsn, 'https://[email protected]/1')
def test_explicit_message_on_message_event(self):
self.client.captureMessage(message='test', data={
'message': 'foo'
})
self.assertEquals(len(self.client.events), 1)
event = self.client.events.pop(0)
self.assertEquals(event['message'], 'foo')
def test_message_from_kwargs(self):
try:
raise ValueError('foo')
except ValueError:
self.client.captureException(message='test', data={})
self.assertEquals(len(self.client.events), 1)
event = self.client.events.pop(0)
self.assertEquals(event['message'], 'test')
def test_explicit_message_on_exception_event(self):
try:
raise ValueError('foo')
except ValueError:
self.client.captureException(data={'message': 'foobar'})
self.assertEquals(len(self.client.events), 1)
event = self.client.events.pop(0)
self.assertEquals(event['message'], 'foobar')
def test_exception_event(self):
try:
raise ValueError('foo')
except ValueError:
self.client.captureException()
self.assertEquals(len(self.client.events), 1)
event = self.client.events.pop(0)
self.assertEquals(event['message'], 'ValueError: foo')
self.assertTrue('exception' in event)
exc = event['exception']['values'][0]
self.assertEquals(exc['type'], 'ValueError')
self.assertEquals(exc['value'], 'foo')
self.assertEquals(exc['module'], ValueError.__module__) # this differs in some Python versions
assert 'stacktrace' not in event
stacktrace = exc['stacktrace']
self.assertEquals(len(stacktrace['frames']), 1)
frame = stacktrace['frames'][0]
self.assertEquals(frame['abs_path'], __file__.replace('.pyc', '.py'))
self.assertEquals(frame['filename'], 'tests/base/tests.py')
self.assertEquals(frame['module'], __name__)
self.assertEquals(frame['function'], 'test_exception_event')
self.assertTrue('timestamp' in event)
def test_decorator_preserves_function(self):
@self.client.capture_exceptions
def test1():
return 'foo'
self.assertEquals(test1(), 'foo')
class DecoratorTestException(Exception):
pass
def test_decorator_functionality(self):
@self.client.capture_exceptions
def test2():
raise self.DecoratorTestException()
try:
test2()
except self.DecoratorTestException:
pass
self.assertEquals(len(self.client.events), 1)
event = self.client.events.pop(0)
self.assertEquals(event['message'], 'DecoratorTestException')
exc = event['exception']['values'][0]
self.assertEquals(exc['type'], 'DecoratorTestException')
self.assertEquals(exc['module'], self.DecoratorTestException.__module__)
stacktrace = exc['stacktrace']
# this is a wrapped function so two frames are expected
self.assertEquals(len(stacktrace['frames']), 2)
frame = stacktrace['frames'][1]
self.assertEquals(frame['module'], __name__)
self.assertEquals(frame['function'], 'test2')
def test_decorator_filtering(self):
@self.client.capture_exceptions(self.DecoratorTestException)
def test3():
raise Exception()
try:
test3()
except Exception:
pass
self.assertEquals(len(self.client.events), 0)
def test_message_event(self):
self.client.captureMessage(message='test')
self.assertEquals(len(self.client.events), 1)
event = self.client.events.pop(0)
self.assertEquals(event['message'], 'test')
assert 'stacktrace' not in event
self.assertTrue('timestamp' in event)
def test_context(self):
self.client.context.merge({
'tags': {'foo': 'bar'},
})
try:
raise ValueError('foo')
except ValueError:
self.client.captureException()
else:
self.fail('Exception should have been raised')
assert len(self.client.events) == 1
event = self.client.events.pop(0)
assert event['tags'] == {'foo': 'bar'}
def test_stack_explicit_frames(self):
def bar():
return inspect.stack()
frames = bar()
self.client.captureMessage('test', stack=iter_stack_frames(frames))
self.assertEquals(len(self.client.events), 1)
event = self.client.events.pop(0)
self.assertEquals(event['message'], 'test')
assert 'stacktrace' in event
self.assertEquals(len(frames), len(event['stacktrace']['frames']))
for frame, frame_i in zip(frames, event['stacktrace']['frames']):
self.assertEquals(frame[0].f_code.co_filename, frame_i['abs_path'])
self.assertEquals(frame[0].f_code.co_name, frame_i['function'])
def test_stack_auto_frames(self):
self.client.captureMessage('test', stack=True)
self.assertEquals(len(self.client.events), 1)
event = self.client.events.pop(0)
self.assertEquals(event['message'], 'test')
self.assertTrue('stacktrace' in event)
self.assertTrue('timestamp' in event)
def test_site(self):
self.client.captureMessage(message='test', data={'site': 'test'})
self.assertEquals(len(self.client.events), 1)
event = self.client.events.pop(0)
assert 'site' in event['tags']
assert event['tags']['site'] == 'test'
def test_implicit_site(self):
self.client = TempStoreClient(site='foo')
self.client.captureMessage(message='test')
self.assertEquals(len(self.client.events), 1)
event = self.client.events.pop(0)
assert 'site' in event['tags']
assert event['tags']['site'] == 'foo'
def test_logger(self):
self.client.captureMessage(message='test', data={'logger': 'test'})
self.assertEquals(len(self.client.events), 1)
event = self.client.events.pop(0)
self.assertEquals(event['logger'], 'test')
self.assertTrue('timestamp' in event)
def test_tags(self):
self.client.captureMessage(message='test', tags={'logger': 'test'})
self.assertEquals(len(self.client.events), 1)
event = self.client.events.pop(0)
self.assertEquals(event['tags'], {'logger': 'test'})
def test_client_extra_context(self):
self.client.extra = {
'foo': 'bar',
'logger': 'baz',
}
self.client.captureMessage(message='test', extra={'logger': 'test'})
self.assertEquals(len(self.client.events), 1)
event = self.client.events.pop(0)
if six.PY3:
expected = {'logger': "'test'", 'foo': "'bar'"}
else:
expected = {'logger': "u'test'", 'foo': "u'bar'"}
self.assertEquals(event['extra'], expected)
# TODO: Python 3
@pytest.mark.skipif(str("six.PY3"))
class ClientUDPTest(TestCase):
def setUp(self):
self.server_socket = socket(AF_INET, SOCK_DGRAM)
self.server_socket.bind(('127.0.0.1', 0))
self.client = Client(servers=["udp://%s:%s" % self.server_socket.getsockname()], key='BassOmatic')
def test_delivery(self):
self.client.captureMessage('test')
data, address = self.server_socket.recvfrom(2 ** 16)
self.assertTrue("\n\n" in data)
header, payload = data.split("\n\n")
for substring in ("sentry_timestamp=", "sentry_client="):
self.assertTrue(substring in header)
def tearDown(self):
self.server_socket.close()
|
|
"""
Penalty matrix generators
"""
import scipy as sp
import numpy as np
def derivative(n, coef, derivative=2, periodic=False):
"""
Builds a penalty matrix for P-Splines with continuous features.
Penalizes the squared differences between basis coefficients.
Parameters
----------
n : int
number of splines
coef : unused
for compatibility with constraints
derivative: int, default: 2
which derivative do we penalize.
derivative is 1, we penalize 1st order derivatives,
derivative is 2, we penalize 2nd order derivatives, etc
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n)
"""
if n == 1:
# no derivative for constant functions
return sp.sparse.csc_matrix(0.)
D = sparse_diff(sp.sparse.identity(n + 2*derivative*periodic).tocsc(), n=derivative).tolil()
if periodic:
# wrap penalty
cols = D[:, :derivative]
D[:, -2 * derivative:-derivative] += cols * (-1) ** derivative
# do symmetric operation on lower half of matrix
n_rows = int((n + 2 * derivative)/2)
D[-n_rows:] = D[:n_rows][::-1, ::-1]
# keep only the center of the augmented matrix
D = D[derivative:-derivative, derivative:-derivative]
return D.dot(D.T).tocsc()
def periodic(n, coef, derivative=2, _penalty=derivative):
return _penalty(n, coef, derivative=derivative, periodic=True)
def l2(n, coef):
"""
Builds a penalty matrix for P-Splines with categorical features.
Penalizes the squared value of each basis coefficient.
Parameters
----------
n : int
number of splines
coef : unused
for compatibility with constraints
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n)
"""
return sp.sparse.eye(n).tocsc()
def monotonicity_(n, coef, increasing=True):
"""
Builds a penalty matrix for P-Splines with continuous features.
Penalizes violation of monotonicity in the feature function.
Parameters
----------
n : int
number of splines
coef : array-like
coefficients of the feature function
increasing : bool, default: True
whether to enforce monotic increasing, or decreasing functions
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n)
"""
if n != len(coef.ravel()):
raise ValueError('dimension mismatch: expected n equals len(coef), '\
'but found n = {}, coef.shape = {}.'\
.format(n, coef.shape))
if n==1:
# no monotonic penalty for constant functions
return sp.sparse.csc_matrix(0.)
if increasing:
# only penalize the case where coef_i-1 > coef_i
mask = sp.sparse.diags((np.diff(coef.ravel()) < 0).astype(float))
else:
# only penalize the case where coef_i-1 < coef_i
mask = sp.sparse.diags((np.diff(coef.ravel()) > 0).astype(float))
derivative = 1
D = sparse_diff(sp.sparse.identity(n).tocsc(), n=derivative) * mask
return D.dot(D.T).tocsc()
def monotonic_inc(n, coef):
"""
Builds a penalty matrix for P-Splines with continuous features.
Penalizes violation of a monotonic increasing feature function.
Parameters
----------
n : int
number of splines
coef : array-like, coefficients of the feature function
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n)
"""
return monotonicity_(n, coef, increasing=True)
def monotonic_dec(n, coef):
"""
Builds a penalty matrix for P-Splines with continuous features.
Penalizes violation of a monotonic decreasing feature function.
Parameters
----------
n : int
number of splines
coef : array-like
coefficients of the feature function
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n)
"""
return monotonicity_(n, coef, increasing=False)
def convexity_(n, coef, convex=True):
"""
Builds a penalty matrix for P-Splines with continuous features.
Penalizes violation of convexity in the feature function.
Parameters
----------
n : int
number of splines
coef : array-like
coefficients of the feature function
convex : bool, default: True
whether to enforce convex, or concave functions
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n)
"""
if n != len(coef.ravel()):
raise ValueError('dimension mismatch: expected n equals len(coef), '\
'but found n = {}, coef.shape = {}.'\
.format(n, coef.shape))
if n==1:
# no convex penalty for constant functions
return sp.sparse.csc_matrix(0.)
if convex:
mask = sp.sparse.diags((np.diff(coef.ravel(), n=2) < 0).astype(float))
else:
mask = sp.sparse.diags((np.diff(coef.ravel(), n=2) > 0).astype(float))
derivative = 2
D = sparse_diff(sp.sparse.identity(n).tocsc(), n=derivative) * mask
return D.dot(D.T).tocsc()
def convex(n, coef):
"""
Builds a penalty matrix for P-Splines with continuous features.
Penalizes violation of a convex feature function.
Parameters
----------
n : int
number of splines
coef : array-like
coefficients of the feature function
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n)
"""
return convexity_(n, coef, convex=True)
def concave(n, coef):
"""
Builds a penalty matrix for P-Splines with continuous features.
Penalizes violation of a concave feature function.
Parameters
----------
n : int
number of splines
coef : array-like
coefficients of the feature function
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n)
"""
return convexity_(n, coef, convex=False)
# def circular(n, coef):
# """
# Builds a penalty matrix for P-Splines with continuous features.
# Penalizes violation of a circular feature function.
#
# Parameters
# ----------
# n : int
# number of splines
# coef : unused
# for compatibility with constraints
#
# Returns
# -------
# penalty matrix : sparse csc matrix of shape (n,n)
# """
# if n != len(coef.ravel()):
# raise ValueError('dimension mismatch: expected n equals len(coef), '\
# 'but found n = {}, coef.shape = {}.'\
# .format(n, coef.shape))
#
# if n==1:
# # no first circular penalty for constant functions
# return sp.sparse.csc_matrix(0.)
#
# row = np.zeros(n)
# row[0] = 1
# row[-1] = -1
# P = sp.sparse.vstack([row, sp.sparse.csc_matrix((n-2, n)), row[::-1]])
# return P.tocsc()
def none(n, coef):
"""
Build a matrix of zeros for features that should go unpenalized
Parameters
----------
n : int
number of splines
coef : unused
for compatibility with constraints
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n)
"""
return sp.sparse.csc_matrix(np.zeros((n, n)))
def wrap_penalty(p, fit_linear, linear_penalty=0.):
"""
tool to account for unity penalty on the linear term of any feature.
example:
p = wrap_penalty(derivative, fit_linear=True)(n, coef)
Parameters
----------
p : callable.
penalty-matrix-generating function.
fit_linear : boolean.
whether the current feature has a linear term or not.
linear_penalty : float, default: 0.
penalty on the linear term
Returns
-------
wrapped_p : callable
modified penalty-matrix-generating function
"""
def wrapped_p(n, *args):
if fit_linear:
if n == 1:
return sp.sparse.block_diag([linear_penalty], format='csc')
return sp.sparse.block_diag([linear_penalty,
p(n-1, *args)], format='csc')
else:
return p(n, *args)
return wrapped_p
def sparse_diff(array, n=1, axis=-1):
"""
A ported sparse version of np.diff.
Uses recursion to compute higher order differences
Parameters
----------
array : sparse array
n : int, default: 1
differencing order
axis : int, default: -1
axis along which differences are computed
Returns
-------
diff_array : sparse array
same shape as input array,
but 'axis' dimension is smaller by 'n'.
"""
if (n < 0) or (int(n) != n):
raise ValueError('Expected order is non-negative integer, '\
'but found: {}'.format(n))
if not sp.sparse.issparse(array):
warnings.warn('Array is not sparse. Consider using numpy.diff')
if n == 0:
return array
nd = array.ndim
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
A = sparse_diff(array, n-1, axis=axis)
return A[slice1] - A[slice2]
PENALTIES = {'auto': 'auto',
'derivative': derivative,
'l2': l2,
'none': none,
'periodic': periodic
}
CONSTRAINTS = {'convex': convex,
'concave': concave,
'monotonic_inc': monotonic_inc,
'monotonic_dec': monotonic_dec,
'none': none
}
|
|
from django.forms.util import ValidationError
from django.utils import simplejson as json
from django_facebook import settings as facebook_settings
from django_facebook.utils import mass_get_or_create, cleanup_oauth_url, \
get_profile_class
from open_facebook.exceptions import OpenFacebookException
from django_facebook.exceptions import FacebookException
try:
from dateutil.parser import parse as parse_date
except ImportError:
from django_facebook.utils import parse_like_datetime as parse_date
from django_facebook.utils import get_user_model
import datetime
import logging
from open_facebook import exceptions as open_facebook_exceptions
from open_facebook.utils import send_warning
from django_facebook import signals
logger = logging.getLogger(__name__)
def require_persistent_graph(request, *args, **kwargs):
'''
Just like get_persistent graph, but instead of returning None
raise an OpenFacebookException if we can't access facebook
'''
kwargs['raise_'] = True
graph = get_persistent_graph(request, *args, **kwargs)
if not graph:
raise OpenFacebookException('please authenticate')
return graph
def get_persistent_graph(request, *args, **kwargs):
'''
Wraps itself around get facebook graph
But stores the graph in the session, allowing usage across multiple
pageviews.
Note that Facebook session's expire at some point, you can't store this
for permanent usage
Atleast not without asking for the offline_access permission
'''
if not request:
raise(ValidationError,
'Request is required if you want to use persistent tokens')
graph = None
# some situations like an expired access token require us to refresh our graph
require_refresh = False
code = request.REQUEST.get('code')
if code:
require_refresh = True
local_graph = getattr(request, 'facebook', None)
if local_graph:
# gets the graph from the local memory if available
graph = local_graph
if not graph:
# search for the graph in the session
cached_graph = request.session.get('graph')
if cached_graph:
cached_graph._me = None
graph = cached_graph
if not graph or require_refresh:
# gets the new graph, note this might do token conversions (slow)
graph = get_facebook_graph(request, *args, **kwargs)
# if it's valid replace the old cache
if graph is not None and graph.access_token:
request.session['graph'] = graph
# add the current user id and cache the graph at the request level
_add_current_user_id(graph, request.user)
request.facebook = graph
return graph
def get_facebook_graph(request=None, access_token=None, redirect_uri=None, raise_=False):
'''
given a request from one of these
- js authentication flow (signed cookie)
- facebook app authentication flow (signed cookie)
- facebook oauth redirect (code param in url)
- mobile authentication flow (direct access_token)
- offline access token stored in user profile
returns a graph object
redirect path is the path from which you requested the token
for some reason facebook needs exactly this uri when converting the code
to a token
falls back to the current page without code in the request params
specify redirect_uri if you are not posting and recieving the code
on the same page
'''
# this is not a production flow, but very handy for testing
if not access_token and request.REQUEST.get('access_token'):
access_token = request.REQUEST['access_token']
# should drop query params be included in the open facebook api,
# maybe, weird this...
from open_facebook import OpenFacebook, FacebookAuthorization
from django.core.cache import cache
parsed_data = None
expires = None
if hasattr(request, 'facebook') and request.facebook:
graph = request.facebook
_add_current_user_id(graph, request.user)
return graph
if not access_token:
# easy case, code is in the get
code = request.REQUEST.get('code')
if code:
logger.info('Got code from the request data')
if not code:
# signed request or cookie leading, base 64 decoding needed
signed_data = request.REQUEST.get('signed_request')
cookie_name = 'fbsr_%s' % facebook_settings.FACEBOOK_APP_ID
cookie_data = request.COOKIES.get(cookie_name)
if cookie_data:
signed_data = cookie_data
# the javascript api assumes a redirect uri of ''
redirect_uri = ''
if signed_data:
logger.info('Got signed data from facebook')
parsed_data = FacebookAuthorization.parse_signed_data(
signed_data)
if parsed_data:
logger.info('Got parsed data from facebook')
# parsed data can fail because of signing issues
if 'oauth_token' in parsed_data:
logger.info('Got access_token from parsed data')
# we already have an active access token in the data
access_token = parsed_data['oauth_token']
else:
logger.info('Got code from parsed data')
# no access token, need to use this code to get one
code = parsed_data.get('code', None)
if not access_token:
if code:
cache_key = 'convert_code_%s' % code
access_token = cache.get(cache_key)
if not access_token:
# exchange the code for an access token
# based on the php api
# https://github.com/facebook/php-sdk/blob/master/src/base_facebook.php
# create a default for the redirect_uri
# when using the javascript sdk the default
# should be '' an empty string
# for other pages it should be the url
if not redirect_uri:
redirect_uri = ''
# we need to drop signed_request, code and state
redirect_uri = cleanup_oauth_url(redirect_uri)
try:
logger.info(
'trying to convert the code with redirect uri: %s',
redirect_uri)
# This is realy slow, that's why it's cached
token_response = FacebookAuthorization.convert_code(
code, redirect_uri=redirect_uri)
expires = token_response.get('expires')
access_token = token_response['access_token']
# would use cookies instead, but django's cookie setting
# is a bit of a mess
cache.set(cache_key, access_token, 60 * 60 * 2)
except open_facebook_exceptions.OAuthException, e:
# this sometimes fails, but it shouldnt raise because
# it happens when users remove your
# permissions and then try to reauthenticate
logger.warn('Error when trying to convert code %s',
unicode(e))
if raise_:
raise
else:
return None
elif request.user.is_authenticated():
# support for offline access tokens stored in the users profile
profile = request.user.get_profile()
access_token = getattr(profile, 'access_token', None)
if not access_token:
if raise_:
message = 'Couldnt find an access token in the request or the users profile'
raise open_facebook_exceptions.OAuthException(message)
else:
return None
else:
if raise_:
message = 'Couldnt find an access token in the request or cookies'
raise open_facebook_exceptions.OAuthException(message)
else:
return None
graph = OpenFacebook(access_token, parsed_data, expires=expires)
# add user specific identifiers
if request:
_add_current_user_id(graph, request.user)
return graph
def _add_current_user_id(graph, user):
'''
set the current user id, convenient if you want to make sure you
fb session and user belong together
'''
if graph:
graph.current_user_id = None
if user.is_authenticated() and graph:
profile = user.get_profile()
facebook_id = getattr(profile, 'facebook_id', None)
if facebook_id:
graph.current_user_id = facebook_id
class FacebookUserConverter(object):
'''
This conversion class helps you to convert Facebook users to Django users
Helps with
- extracting and prepopulating full profile data
- invite flows
- importing and storing likes
'''
def __init__(self, open_facebook):
from open_facebook.api import OpenFacebook
self.open_facebook = open_facebook
assert isinstance(open_facebook, OpenFacebook)
self._profile = None
def is_authenticated(self):
return self.open_facebook.is_authenticated()
def facebook_registration_data(self, username=True):
'''
Gets all registration data
and ensures its correct input for a django registration
'''
facebook_profile_data = self.facebook_profile_data()
user_data = {}
try:
user_data = self._convert_facebook_data(
facebook_profile_data, username=username)
except OpenFacebookException, e:
self._report_broken_facebook_data(
user_data, facebook_profile_data, e)
raise
return user_data
def facebook_profile_data(self):
'''
Returns the facebook profile data, together with the image locations
'''
if self._profile is None:
profile = self.open_facebook.me()
profile['image'] = self.open_facebook.my_image_url('large')
profile['image_thumb'] = self.open_facebook.my_image_url()
self._profile = profile
return self._profile
@classmethod
def _convert_facebook_data(cls, facebook_profile_data, username=True):
'''
Takes facebook user data and converts it to a format for
usage with Django
'''
user_data = facebook_profile_data.copy()
profile = facebook_profile_data.copy()
website = profile.get('website')
if website:
user_data['website_url'] = cls._extract_url(website)
user_data['facebook_profile_url'] = profile.get('link')
user_data['facebook_name'] = profile.get('name')
if len(user_data.get('email', '')) > 75:
# no more fake email accounts for facebook
del user_data['email']
gender = profile.get('gender', None)
if gender == 'male':
user_data['gender'] = 'm'
elif gender == 'female':
user_data['gender'] = 'f'
user_data['username'] = cls._retrieve_facebook_username(user_data)
user_data['password2'], user_data['password1'] = (
cls._generate_fake_password(),) * 2 # same as double equal
facebook_map = dict(birthday='date_of_birth',
about='about_me', id='facebook_id')
for k, v in facebook_map.items():
user_data[v] = user_data.get(k)
user_data['facebook_id'] = int(user_data['facebook_id'])
if not user_data['about_me'] and user_data.get('quotes'):
user_data['about_me'] = user_data.get('quotes')
user_data['date_of_birth'] = cls._parse_data_of_birth(
user_data['date_of_birth'])
if username:
user_data['username'] = cls._create_unique_username(
user_data['username'])
# make sure the first and last name are not too long
user_data['first_name'] = user_data['first_name'][:30]
user_data['last_name'] = user_data['last_name'][:30]
return user_data
@classmethod
def _extract_url(cls, text_url_field):
'''
>>> url_text = 'http://www.google.com blabla'
>>> FacebookAPI._extract_url(url_text)
u'http://www.google.com/'
>>> url_text = 'http://www.google.com/'
>>> FacebookAPI._extract_url(url_text)
u'http://www.google.com/'
>>> url_text = 'google.com/'
>>> FacebookAPI._extract_url(url_text)
u'http://google.com/'
>>> url_text = 'http://www.fahiolista.com/www.myspace.com/www.google.com'
>>> FacebookAPI._extract_url(url_text)
u'http://www.fahiolista.com/www.myspace.com/www.google.com'
>>> url_text = u"""http://fernandaferrervazquez.blogspot.com/\r\nhttp://twitter.com/fferrervazquez\r\nhttp://comunidad.redfashion.es/profile/fernandaferrervazquez\r\nhttp://www.facebook.com/group.php?gid3D40257259997&ref3Dts\r\nhttp://fernandaferrervazquez.spaces.live.com/blog/cns!EDCBAC31EE9D9A0C!326.trak\r\nhttp://www.linkedin.com/myprofile?trk3Dhb_pro\r\nhttp://www.youtube.com/account#profile\r\nhttp://www.flickr.com/\r\n Mi galer\xeda\r\nhttp://www.flickr.com/photos/wwwfernandaferrervazquez-showroomrecoletacom/ \r\n\r\nhttp://www.facebook.com/pages/Buenos-Aires-Argentina/Fernanda-F-Showroom-Recoleta/200218353804?ref3Dts\r\nhttp://fernandaferrervazquez.wordpress.com/wp-admin/"""
>>> FacebookAPI._extract_url(url_text)
u'http://fernandaferrervazquez.blogspot.com/a'
'''
import re
text_url_field = text_url_field.encode('utf8')
seperation = re.compile('[ ,;\n\r]+')
parts = seperation.split(text_url_field)
for part in parts:
from django.forms import URLField
url_check = URLField(verify_exists=False)
try:
clean_url = url_check.clean(part)
return clean_url
except ValidationError:
continue
@classmethod
def _generate_fake_password(cls):
'''
Returns a random fake password
'''
import string
from random import choice
size = 9
password = ''.join([choice(string.letters + string.digits)
for i in range(size)])
return password.lower()
@classmethod
def _parse_data_of_birth(cls, data_of_birth_string):
if data_of_birth_string:
format = '%m/%d/%Y'
try:
parsed_date = datetime.datetime.strptime(
data_of_birth_string, format)
return parsed_date
except ValueError:
# Facebook sometimes provides a partial date format
# ie 04/07 (ignore those)
if data_of_birth_string.count('/') != 1:
raise
@classmethod
def _report_broken_facebook_data(cls, facebook_data,
original_facebook_data, e):
'''
Sends a nice error email with the
- facebook data
- exception
- stacktrace
'''
from pprint import pformat
data_dump = json.dumps(original_facebook_data)
data_dump_python = pformat(original_facebook_data)
message_format = 'The following facebook data failed with error %s' \
'\n\n json %s \n\n python %s \n'
data_tuple = (unicode(e), data_dump, data_dump_python)
message = message_format % data_tuple
extra_data = {
'data_dump': data_dump,
'data_dump_python': data_dump_python,
'facebook_data': facebook_data,
}
send_warning(message, **extra_data)
@classmethod
def _create_unique_username(cls, base_username):
'''
Check the database and add numbers to the username to ensure its unique
'''
usernames = list(get_user_model().objects.filter(
username__istartswith=base_username).values_list(
'username', flat=True))
usernames_lower = [str(u).lower() for u in usernames]
username = str(base_username)
i = 1
while base_username.lower() in usernames_lower:
base_username = username + str(i)
i += 1
return base_username
@classmethod
def _retrieve_facebook_username(cls, facebook_data):
'''
Search for the username in 3 places
- public profile
- email
- name
'''
username = None
# start by checking the public profile link (your facebook username)
link = facebook_data.get('link')
if link:
username = link.split('/')[-1]
username = cls._make_username(username)
if 'profilephp' in username:
username = None
# try the email adress next
if not username and 'email' in facebook_data:
username = cls._make_username(facebook_data.get(
'email').split('@')[0])
# last try the name of the user
if not username or len(username) < 4:
username = cls._make_username(facebook_data.get('name'))
if not username:
raise FacebookException('couldnt figure out a username')
return username
@classmethod
def _make_username(cls, username):
'''
Slugify the username and replace - with _ to meet username requirements
'''
from django.template.defaultfilters import slugify
slugified_name = slugify(username).replace('-', '_')
# consider the username min and max constraints
slugified_name = slugified_name[:30]
if len(username) < 4:
slugified_name = None
return slugified_name
def get_and_store_likes(self, user):
'''
Gets and stores your facebook likes to DB
Both the get and the store run in a async task when
FACEBOOK_CELERY_STORE = True
'''
if facebook_settings.FACEBOOK_CELERY_STORE:
from django_facebook.tasks import get_and_store_likes
get_and_store_likes.delay(user, self)
else:
self._get_and_store_likes(user)
def _get_and_store_likes(self, user):
likes = self.get_likes()
stored_likes = self._store_likes(user, likes)
return stored_likes
def get_likes(self, limit=5000):
'''
Parses the facebook response and returns the likes
'''
likes_response = self.open_facebook.get('me/likes', limit=limit)
likes = likes_response and likes_response.get('data')
logger.info('found %s likes', len(likes))
return likes
def store_likes(self, user, likes):
'''
Given a user and likes store these in the db
Note this can be a heavy operation, best to do it
in the background using celery
'''
if facebook_settings.FACEBOOK_CELERY_STORE:
from django_facebook.tasks import store_likes
store_likes.delay(user, likes)
else:
self._store_likes(user, likes)
@classmethod
def _store_likes(self, user, likes):
current_likes = inserted_likes = None
if likes:
from django_facebook.models import FacebookLike
base_queryset = FacebookLike.objects.filter(user_id=user.id)
global_defaults = dict(user_id=user.id)
id_field = 'facebook_id'
default_dict = {}
for like in likes:
name = like.get('name')
created_time_string = like.get('created_time')
created_time = None
if created_time_string:
created_time = parse_date(like['created_time'])
default_dict[like['id']] = dict(
created_time=created_time,
category=like.get('category'),
name=name
)
current_likes, inserted_likes = mass_get_or_create(
FacebookLike, base_queryset, id_field, default_dict,
global_defaults)
logger.debug('found %s likes and inserted %s new likes',
len(current_likes), len(inserted_likes))
# fire an event, so u can do things like personalizing the users' account
# based on the likes
signals.facebook_post_store_likes.send(sender=get_profile_class(),
user=user, likes=likes, current_likes=current_likes,
inserted_likes=inserted_likes,
)
return likes
def get_and_store_friends(self, user):
'''
Gets and stores your facebook friends to DB
Both the get and the store run in a async task when
FACEBOOK_CELERY_STORE = True
'''
if facebook_settings.FACEBOOK_CELERY_STORE:
from django_facebook.tasks import get_and_store_friends
get_and_store_friends.delay(user, self)
else:
self._get_and_store_friends(user)
def _get_and_store_friends(self, user):
'''
Getting the friends via fb and storing them
'''
friends = self.get_friends()
stored_friends = self._store_friends(user, friends)
return stored_friends
def get_friends(self, limit=5000):
'''
Connects to the facebook api and gets the users friends
'''
friends = getattr(self, '_friends', None)
if friends is None:
friends_response = self.open_facebook.fql(
"SELECT uid, name, sex FROM user WHERE uid IN (SELECT uid2 "
"FROM friend WHERE uid1 = me()) LIMIT %s" % limit)
# friends_response = self.open_facebook.get('me/friends',
# limit=limit)
# friends = friends_response and friends_response.get('data')
friends = []
for response_dict in friends_response:
response_dict['id'] = response_dict['uid']
friends.append(response_dict)
logger.info('found %s friends', len(friends))
return friends
def store_friends(self, user, friends):
'''
Stores the given friends locally for this user
Quite slow, better do this using celery on a secondary db
'''
if facebook_settings.FACEBOOK_CELERY_STORE:
from django_facebook.tasks import store_friends
store_friends.delay(user, friends)
else:
self._store_friends(user, friends)
@classmethod
def _store_friends(self, user, friends):
from django_facebook.models import FacebookUser
current_friends = inserted_friends = None
# store the users for later retrieval
if friends:
# see which ids this user already stored
base_queryset = FacebookUser.objects.filter(user_id=user.id)
# if none if your friend have a gender clean the old data
genders = FacebookUser.objects.filter(
user_id=user.id, gender__in=('M', 'F')).count()
if not genders:
FacebookUser.objects.filter(user_id=user.id).delete()
global_defaults = dict(user_id=user.id)
default_dict = {}
gender_map = dict(female='F', male='M')
for f in friends:
name = f.get('name')
gender = None
if f.get('sex'):
gender = gender_map[f.get('sex')]
default_dict[str(f['id'])] = dict(name=name, gender=gender)
id_field = 'facebook_id'
current_friends, inserted_friends = mass_get_or_create(
FacebookUser, base_queryset, id_field, default_dict,
global_defaults)
logger.debug('found %s friends and inserted %s new ones',
len(current_friends), len(inserted_friends))
# fire an event, so u can do things like personalizing suggested users
# to follow
signals.facebook_post_store_friends.send(sender=get_profile_class(),
user=user, friends=friends, current_friends=current_friends,
inserted_friends=inserted_friends,
)
return friends
def registered_friends(self, user):
'''
Returns all profile models which are already registered on your site
and a list of friends which are not on your site
'''
from django_facebook.utils import get_profile_class
profile_class = get_profile_class()
friends = self.get_friends(limit=1000)
if friends:
friend_ids = [f['id'] for f in friends]
friend_objects = profile_class.objects.filter(
facebook_id__in=friend_ids).select_related('user')
registered_ids = [f.facebook_id for f in friend_objects]
new_friends = [f for f in friends if f['id'] not in registered_ids]
else:
new_friends = []
friend_objects = profile_class.objects.none()
return friend_objects, new_friends
|
|
# coding: utf-8
from __future__ import unicode_literals, division, print_function
import os
import tempfile
import shutil
from pymatgen.util.testing import PymatgenTest
from monty.functools import lazy_property
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.io.abinitio import *
from pymatgen.io.abinitio.flows import *
from pymatgen.io.abinitio.works import *
from pymatgen.io.abinitio.tasks import *
from pymatgen.io.abinitio.pseudos import Pseudo
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', "abinitio")
def ref_file(filename):
return os.path.join(_test_dir, filename)
class FakeAbinitInput(object):
"""Emulate an Abinit input."""
@lazy_property
def pseudos(self):
return [Pseudo.as_pseudo(ref_file("14si.pspnc"))]
@lazy_property
def structure(self):
coords = []
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
return Structure(lattice, ["Si", "Si"], coords)
class FlowUnitTest(PymatgenTest):
"""Provides helper function for testing Abinit flows."""
MANAGER = """\
policy:
autoparal: 1
qadapters:
- &batch
priority: 1
queue:
qtype: slurm
qname: Oban
qparams:
mail_user: nobody@nowhere
limits:
timelimit: 0:20:00
min_cores: 4
max_cores: 12
#condition: {"$eq": {omp_threads: 2}}
hardware:
num_nodes: 10
sockets_per_node: 1
cores_per_socket: 2
mem_per_node: 4 Gb
job:
modules:
- intel/compilerpro/13.0.1.117
- fftw3/intel/3.3
shell_env:
PATH: /home/user/tmp_intel13/src/98_main/:/home/user//NAPS/intel13/bin:$PATH
LD_LIBRARY_PATH: /home/user/NAPS/intel13/lib:$LD_LIBRARY_PATH
mpi_runner: mpirun
# Connection to the MongoDb database (optional)
db_connector:
database: abinit
collection: test
#host: 0.0.0.0
#port: 8080
#user: gmatteo
#password: helloworld
batch_adapter: *batch
"""
def setUp(self):
"""Initialization phase."""
super(FlowUnitTest, self).setUp()
# Temporary directory for the flow.
self.workdir = tempfile.mkdtemp()
# Create the TaskManager.
self.manager = TaskManager.from_string(self.MANAGER)
# Fake input file
self.fake_input = FakeAbinitInput()
def tearDown(self):
"""Delete workdir"""
shutil.rmtree(self.workdir)
class FlowTest(FlowUnitTest):
def test_base(self):
"""Testing Flow..."""
aequal, atrue, afalse = self.assertEqual, self.assertTrue, self.assertFalse
flow = Flow(workdir=self.workdir, manager=self.manager)
# Build a work with a task
work = flow.register_task(self.fake_input)
assert work.is_work
task0_w0 = work[0]
atrue(task0_w0.is_task)
print(task0_w0.status.colored)
atrue(len(flow) == 1)
aequal(flow.num_tasks, 1)
atrue(flow.has_db)
#print(task0_w0.input_structure)
print(task0_w0.make_input)
# Task history
assert len(task0_w0.history) == 0
task0_w0.history.info("Hello %s", "world")
assert len(task0_w0.history) == 1
print(task0_w0.history)
record = task0_w0.history.pop()
print(record, repr(record))
assert record.get_message(asctime=False) == "Hello world"
assert len(task0_w0.history) == 0
assert flow.select_tasks(nids=task0_w0.node_id)[0] == task0_w0
assert flow.select_tasks(wslice=slice(0,1,1)) == [task0_w0]
# Build a workflow containing two tasks depending on task0_w0
work = Work()
atrue(work.is_work)
work.register(self.fake_input)
work.register(self.fake_input)
aequal(len(work), 2)
flow.register_work(work, deps={task0_w0: "WFK"})
atrue(flow.is_flow)
aequal(len(flow), 2)
# Add another work without dependencies.
task0_w2 = flow.register_task(self.fake_input)[0]
atrue(len(flow) == 3)
afalse(flow.is_work)
# Allocate internal tables
flow.allocate()
# Check dependecies.
atrue(flow[1].depends_on(task0_w0))
atrue(flow[1][0].depends_on(task0_w0))
atrue(flow[1][0] in task0_w0.get_children())
atrue(task0_w0 in flow[1][0].get_parents())
afalse(flow[2][0].depends_on(task0_w0))
afalse(flow[2][0] in task0_w0.get_children())
afalse(task0_w0 in flow[2][0].get_parents())
aequal(flow[1].pos, 1)
aequal(flow[1][0].pos, (1, 0))
aequal(flow[2][0].pos, (2, 0))
afalse(flow.all_ok)
aequal(flow.num_tasks, 4)
aequal(flow.ncores_used, 0)
# API for iterations
aequal(len(list(flow.iflat_tasks(status="Initialized"))), sum(len(work) for work in flow))
aequal(list(flow.iflat_tasks(nids=task0_w0.node_id)), [task0_w0])
aequal([task0_w0], flow.tasks_from_nids(task0_w0.node_id))
aequal([(0, 0)], flow.wti_from_nids(task0_w0.node_id))
aequal([task0_w2], flow.tasks_from_nids([task0_w2.node_id]))
aequal([(2, 0)], flow.wti_from_nids([task0_w2.node_id]))
# Check for deadlocks
flow.check_dependencies()
# Save the flow in pickle format.
flow.build_and_pickle_dump()
# Find the pickle file in workdir and recreate the flow.
same_flow = Flow.pickle_load(self.workdir)
aequal(same_flow, flow)
# to/from string
same_flow = Flow.pickle_loads(flow.pickle_dumps())
aequal(same_flow, flow)
self.assertPMGSONable(flow)
flow.show_info()
flow.show_summary()
# Test show_status
flow.show_status()
flow.show_event_handlers()
def test_workdir(self):
"""Testing if one can use workdir=None in flow.__init__ and then flow.allocate(workdir)."""
flow = Flow(workdir=None, manager=self.manager)
flow.register_task(self.fake_input)
#flow.register_work(work)
work = Work()
work.register_scf_task(self.fake_input)
flow.register_work(work)
# If flow.workdir is None, we should used flow.allocate(workdir)
with self.assertRaises(RuntimeError): flow.allocate()
tmpdir = tempfile.mkdtemp()
flow.allocate(workdir=tmpdir)
print(flow)
assert len(flow) == 2
flow.build()
for i, work in enumerate(flow):
assert work.workdir == os.path.join(tmpdir, "w%d" % i)
for t, task in enumerate(work):
assert task.workdir == os.path.join(work.workdir, "t%d" % t)
class TestFlowInSpectatorMode(FlowUnitTest):
def test_spectator(self):
flow = Flow(workdir=self.workdir, manager=self.manager)
work0 = Work()
work0.register_scf_task(self.fake_input)
work0.register_scf_task(self.fake_input)
work1 = Work()
work1.register_scf_task(self.fake_input)
flow.register_work(work0)
flow.register_work(work1)
flow.disconnect_signals()
flow.disconnect_signals()
flow.connect_signals()
flow.connect_signals()
for mode in [False, True]:
flow.set_spectator_mode(mode=mode)
assert flow.in_spectator_mode == mode
for node in flow.iflat_nodes():
assert node.in_spectator_mode == mode
assert len(list(flow.iflat_nodes())) == 1 + len(flow.works) + sum(len(work) for work in flow)
assert flow.node_from_nid(flow.node_id) == flow
flow.set_spectator_mode(mode=False)
flow.build_and_pickle_dump()
# picke load always returns a flow in spectator mode.
flow = Flow.pickle_load(flow.workdir)
assert flow.in_spectator_mode
#with self.assertRaises(flow.SpectatorError): flow.pickle_dump()
#with self.assertRaises(flow.SpectatorError): flow.make_scheduler().start()
work = flow[0]
assert work.send_signal(work.S_OK) is None
#with self.assertRaises(work.SpectatorError): work.on_ok()
#with self.assertRaises(work.SpectatorError): work.on_all_ok()
task = work[0]
assert task.send_signal(task.S_OK) is None
#with self.assertRaises(task.SpectatorError): task._on_done()
#with self.assertRaises(task.SpectatorError): task.on_ok()
#with self.assertRaises(task.SpectatorError): task._on_ok()
class TestBatchLauncher(FlowUnitTest):
def test_batchlauncher(self):
"""Testing BatchLauncher methods."""
# Create the TaskManager.
manager = TaskManager.from_string(self.MANAGER)
print("batch_adapter", manager.batch_adapter)
assert manager.batch_adapter is not None
def build_flow_with_name(name):
"""Build a flow with workdir None and the given name."""
flow = Flow(workdir=None, manager=self.manager)
flow.set_name(name)
flow.register_task(self.fake_input)
work = Work()
work.register_scf_task(self.fake_input)
flow.register_work(work)
return flow
from pymatgen.io.abinitio.launcher import BatchLauncher
tmpdir = tempfile.mkdtemp()
batch = BatchLauncher(workdir=tmpdir, manager=manager)
print(batch)
flow0 = build_flow_with_name("flow0")
flow1 = build_flow_with_name("flow1")
flow2_same_name = build_flow_with_name("flow1")
batch.add_flow(flow0)
# Cannot add the same flow twice.
with self.assertRaises(batch.Error):
batch.add_flow(flow0)
batch.add_flow(flow1)
# Cannot add two flows with the same name.
with self.assertRaises(batch.Error):
batch.add_flow(flow2_same_name)
batch.submit(dry_run=True)
for i, flow in enumerate([flow0, flow1]):
assert flow.workdir == os.path.join(batch.workdir, "flow%d" % i)
batch.pickle_dump()
batch_from_pickle = BatchLauncher.pickle_load(batch.workdir)
assert all(f1 == f2 for f1, f2 in zip(batch.flows, batch_from_pickle.flows))
if __name__ == '__main__':
import unittest
unittest.main()
|
|
#!/usr/bin/env python3
import socket
from util import ip4_range, ip4_range
import unittest
from framework import VppTestCase, VppTestRunner
from template_bd import BridgeDomain
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
from scapy.contrib.geneve import GENEVE
from scapy.utils import atol
from vpp_ip_route import VppIpRoute, VppRoutePath
from vpp_ip import INVALID_INDEX
class TestGeneve(BridgeDomain, VppTestCase):
""" GENEVE Test Case """
def __init__(self, *args):
BridgeDomain.__init__(self)
VppTestCase.__init__(self, *args)
def encapsulate(self, pkt, vni):
"""
Encapsulate the original payload frame by adding GENEVE header with its
UDP, IP and Ethernet fields
"""
return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=self.dport, dport=self.dport, chksum=0) /
GENEVE(vni=vni) /
pkt)
def ip_range(self, start, end):
""" range of remote ip's """
return ip4_range(self.pg0.remote_ip4, start, end)
def encap_mcast(self, pkt, src_ip, src_mac, vni):
"""
Encapsulate the original payload frame by adding GENEVE header with its
UDP, IP and Ethernet fields
"""
return (Ether(src=src_mac, dst=self.mcast_mac) /
IP(src=src_ip, dst=self.mcast_ip4) /
UDP(sport=self.dport, dport=self.dport, chksum=0) /
GENEVE(vni=vni) /
pkt)
def decapsulate(self, pkt):
"""
Decapsulate the original payload frame by removing GENEVE header
"""
# check if is set I flag
# self.assertEqual(pkt[GENEVE].flags, int('0x8', 16))
return pkt[GENEVE].payload
# Method for checking GENEVE encapsulation.
#
def check_encapsulation(self, pkt, vni, local_only=False, mcast_pkt=False):
# TODO: add error messages
# Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
# by VPP using ARP.
self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
if not local_only:
if not mcast_pkt:
self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
else:
self.assertEqual(pkt[Ether].dst, type(self).mcast_mac)
# Verify GENEVE tunnel source IP is VPP_IP and destination IP is MY_IP.
self.assertEqual(pkt[IP].src, self.pg0.local_ip4)
if not local_only:
if not mcast_pkt:
self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4)
else:
self.assertEqual(pkt[IP].dst, type(self).mcast_ip4)
# Verify UDP destination port is GENEVE 4789, source UDP port could be
# arbitrary.
self.assertEqual(pkt[UDP].dport, type(self).dport)
# TODO: checksum check
# Verify VNI
self.assertEqual(pkt[GENEVE].vni, vni)
@classmethod
def create_geneve_flood_test_bd(cls, vni, n_ucast_tunnels):
# Create 10 ucast geneve tunnels under bd
ip_range_start = 10
ip_range_end = ip_range_start + n_ucast_tunnels
next_hop_address = cls.pg0.remote_ip4
for dest_ip4 in ip4_range(next_hop_address, ip_range_start,
ip_range_end):
# add host route so dest_ip4 will not be resolved
rip = VppIpRoute(cls, dest_ip4, 32,
[VppRoutePath(next_hop_address,
INVALID_INDEX)],
register=False)
rip.add_vpp_config()
r = cls.vapi.geneve_add_del_tunnel(
local_address=cls.pg0.local_ip4, remote_address=dest_ip4,
vni=vni)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
bd_id=vni)
@classmethod
def add_del_shared_mcast_dst_load(cls, is_add):
"""
add or del tunnels sharing the same mcast dst
to test geneve ref_count mechanism
"""
n_shared_dst_tunnels = 10
vni_start = 10000
vni_end = vni_start + n_shared_dst_tunnels
for vni in range(vni_start, vni_end):
r = cls.vapi.geneve_add_del_tunnel(
local_address=cls.pg0.local_ip4,
remote_address=cls.mcast_ip4, mcast_sw_if_index=1,
is_add=is_add, vni=vni)
if r.sw_if_index == 0xffffffff:
raise ValueError("bad sw_if_index: ~0")
@classmethod
def add_shared_mcast_dst_load(cls):
cls.add_del_shared_mcast_dst_load(is_add=1)
@classmethod
def del_shared_mcast_dst_load(cls):
cls.add_del_shared_mcast_dst_load(is_add=0)
@classmethod
def add_del_mcast_tunnels_load(cls, is_add):
"""
add or del tunnels to test geneve stability
"""
n_distinct_dst_tunnels = 10
ip_range_start = 10
ip_range_end = ip_range_start + n_distinct_dst_tunnels
for dest_ip4 in ip4_range(cls.mcast_ip4, ip_range_start,
ip_range_end):
vni = int(dest_ip4.split('.')[3])
cls.vapi.geneve_add_del_tunnel(local_address=cls.pg0.local_ip4,
remote_address=dest_ip4,
mcast_sw_if_index=1, is_add=is_add,
vni=vni)
@classmethod
def add_mcast_tunnels_load(cls):
cls.add_del_mcast_tunnels_load(is_add=1)
@classmethod
def del_mcast_tunnels_load(cls):
cls.add_del_mcast_tunnels_load(is_add=0)
# Class method to start the GENEVE test case.
# Overrides setUpClass method in VppTestCase class.
# Python try..except statement is used to ensure that the tear down of
# the class will be executed even if exception is raised.
# @param cls The class pointer.
@classmethod
def setUpClass(cls):
super(TestGeneve, cls).setUpClass()
try:
cls.dport = 6081
# Create 2 pg interfaces.
cls.create_pg_interfaces(range(4))
for pg in cls.pg_interfaces:
pg.admin_up()
# Configure IPv4 addresses on VPP pg0.
cls.pg0.config_ip4()
# Resolve MAC address for VPP's IP address on pg0.
cls.pg0.resolve_arp()
# Our Multicast address
cls.mcast_ip4 = '239.1.1.1'
iplong = atol(cls.mcast_ip4)
cls.mcast_mac = "01:00:5e:%02x:%02x:%02x" % (
(iplong >> 16) & 0x7F, (iplong >> 8) & 0xFF, iplong & 0xFF)
# Create GENEVE VTEP on VPP pg0, and put geneve_tunnel0 and pg1
# into BD.
cls.single_tunnel_bd = 1
r = cls.vapi.geneve_add_del_tunnel(
local_address=cls.pg0.local_ip4,
remote_address=cls.pg0.remote_ip4, vni=cls.single_tunnel_bd)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
bd_id=cls.single_tunnel_bd)
cls.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=cls.pg1.sw_if_index, bd_id=cls.single_tunnel_bd)
# Setup vni 2 to test multicast flooding
cls.n_ucast_tunnels = 10
cls.mcast_flood_bd = 2
cls.create_geneve_flood_test_bd(cls.mcast_flood_bd,
cls.n_ucast_tunnels)
r = cls.vapi.geneve_add_del_tunnel(
local_address=cls.pg0.local_ip4,
remote_address=cls.mcast_ip4, mcast_sw_if_index=1,
vni=cls.mcast_flood_bd)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
bd_id=cls.mcast_flood_bd)
cls.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=cls.pg2.sw_if_index, bd_id=cls.mcast_flood_bd)
# Add and delete mcast tunnels to check stability
cls.add_shared_mcast_dst_load()
cls.add_mcast_tunnels_load()
cls.del_shared_mcast_dst_load()
cls.del_mcast_tunnels_load()
# Setup vni 3 to test unicast flooding
cls.ucast_flood_bd = 3
cls.create_geneve_flood_test_bd(cls.ucast_flood_bd,
cls.n_ucast_tunnels)
cls.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=cls.pg3.sw_if_index, bd_id=cls.ucast_flood_bd)
except Exception:
super(TestGeneve, cls).tearDownClass()
raise
# Method to define VPP actions before tear down of the test case.
# Overrides tearDown method in VppTestCase class.
# @param self The object pointer.
def tearDown(self):
super(TestGeneve, self).tearDown()
def show_commands_at_teardown(self):
self.logger.info(self.vapi.cli("show bridge-domain 1 detail"))
self.logger.info(self.vapi.cli("show bridge-domain 2 detail"))
self.logger.info(self.vapi.cli("show bridge-domain 3 detail"))
self.logger.info(self.vapi.cli("show geneve tunnel"))
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.